annotate VMS.c @ 48:054006c26b92

Added instrumentation to measure master time, master lock time and create time
author Me
date Tue, 26 Oct 2010 18:18:30 -0700
parents 72373405c816
children 984f7d78bfdf
rev   line source
Me@0 1 /*
Me@38 2 * Copyright 2010 OpenSourceStewardshipFoundation
Me@0 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7 #include <stdio.h>
Me@0 8 #include <stdlib.h>
Me@0 9 #include <malloc.h>
Me@0 10
Me@0 11 #include "VMS.h"
Me@0 12 #include "Queue_impl/BlockingQueue.h"
Me@38 13 #include "Histogram/Histogram.h"
Me@0 14
Me@0 15
Me@26 16 #define thdAttrs NULL
Me@26 17
Me@22 18 //===========================================================================
Me@22 19 void
Me@22 20 shutdownFn( void *dummy, VirtProcr *dummy2 );
Me@22 21
Me@31 22 SchedSlot **
Me@31 23 create_sched_slots();
Me@22 24
Me@28 25 void
Me@28 26 create_masterEnv();
Me@28 27
Me@28 28 void
Me@28 29 create_the_coreLoop_OS_threads();
Me@28 30
Me@26 31 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
Me@26 32 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
Me@26 33
Me@22 34 //===========================================================================
Me@22 35
Me@0 36 /*Setup has two phases:
Me@0 37 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
Me@8 38 * the master virt procr into the work-queue, ready for first "call"
Me@8 39 * 2) Semantic layer then does its own init, which creates the seed virt
Me@8 40 * procr inside the semantic layer, ready to schedule it when
Me@0 41 * asked by the first run of the masterLoop.
Me@0 42 *
Me@0 43 *This part is bit weird because VMS really wants to be "always there", and
Me@0 44 * have applications attach and detach.. for now, this VMS is part of
Me@0 45 * the app, so the VMS system starts up as part of running the app.
Me@0 46 *
Me@8 47 *The semantic layer is isolated from the VMS internals by making the
Me@8 48 * semantic layer do setup to a state that it's ready with its
Me@8 49 * initial virt procrs, ready to schedule them to slots when the masterLoop
Me@0 50 * asks. Without this pattern, the semantic layer's setup would
Me@8 51 * have to modify slots directly to assign the initial virt-procrs, and put
Me@31 52 * them into the readyToAnimateQ itself, breaking the isolation completely.
Me@0 53 *
Me@0 54 *
Me@8 55 *The semantic layer creates the initial virt procr(s), and adds its
Me@8 56 * own environment to masterEnv, and fills in the pointers to
Me@0 57 * the requestHandler and slaveScheduler plug-in functions
Me@8 58 */
Me@8 59
Me@8 60 /*This allocates VMS data structures, populates the master VMSProc,
Me@0 61 * and master environment, and returns the master environment to the semantic
Me@0 62 * layer.
Me@0 63 */
Me@8 64 void
Me@8 65 VMS__init()
Me@28 66 {
Me@28 67 create_masterEnv();
Me@28 68 create_the_coreLoop_OS_threads();
Me@28 69 }
Me@28 70
Me@28 71 /*To initialize the sequential version, just don't create the threads
Me@28 72 */
Me@28 73 void
Me@28 74 VMS__init_Seq()
Me@28 75 {
Me@28 76 create_masterEnv();
Me@28 77 }
Me@28 78
Me@28 79 void
Me@28 80 create_masterEnv()
Me@31 81 { MasterEnv *masterEnv;
Me@47 82 VMSQueueStruc **readyToAnimateQs;
Me@31 83 int coreIdx;
Me@31 84 VirtProcr **masterVPs;
Me@31 85 SchedSlot ***allSchedSlots; //ptr to array of ptrs
Me@31 86
Me@31 87 //Make the master env, which holds everything else
Me@1 88 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
Me@1 89 masterEnv = _VMSMasterEnv;
Me@31 90 //Need to set start pt here 'cause used by seed procr, which is created
Me@31 91 // before the first core loop starts up. -- not sure how yet..
Me@31 92 // masterEnv->coreLoopStartPt = ;
Me@31 93 // masterEnv->coreLoopEndPt = ;
Me@31 94
Me@31 95 //Make a readyToAnimateQ for each core loop
Me@47 96 readyToAnimateQs = malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
Me@31 97 masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) );
Me@0 98
Me@31 99 //One array for each core, 3 in array, core's masterVP scheds all
Me@31 100 allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) );
Me@0 101
Me@31 102 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@31 103 {
Me@31 104 readyToAnimateQs[ coreIdx ] = makeSRSWQ();
Me@31 105
Me@31 106 //Q: should give masterVP core-specific into as its init data?
Me@31 107 masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv );
Me@31 108 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
Me@31 109 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
Me@31 110 }
Me@31 111 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
Me@31 112 _VMSMasterEnv->masterVPs = masterVPs;
Me@31 113 _VMSMasterEnv->allSchedSlots = allSchedSlots;
Me@0 114
Me@48 115 //============================= MEASUREMENT STUFF ========================
Me@48 116 #ifdef MEAS__TIME_MASTER
Me@28 117
Me@48 118 _VMSMasterEnv->stats->masterTimeHist = makeHistogram( 25, 500, 800 );
Me@48 119 _VMSMasterEnv->stats->masterLockHist = makeHistogram( 25, 0, 100000 );
Me@48 120 _VMSMasterEnv->stats->createHist = makeHistogram( 25, 0, 5000 );
Me@48 121 #endif
Me@48 122 //========================================================================
Me@12 123
Me@31 124 //Aug 19, 2010: no longer need to place initial masterVP into queue
Me@31 125 // because coreLoop now controls -- animates its masterVP when no work
Me@31 126
Me@30 127
Me@30 128 //==================== malloc substitute ========================
Me@30 129 //
Me@30 130 //Testing whether malloc is using thread-local storage and therefore
Me@30 131 // causing unreliable behavior.
Me@30 132 //Just allocate a massive chunk of memory and roll own malloc/free and
Me@30 133 // make app use VMS__malloc_to, which will suspend and perform malloc
Me@30 134 // in the master, taking from this massive chunk.
Me@30 135
Me@30 136 // initFreeList();
Me@38 137
Me@0 138 }
Me@0 139
Me@30 140 /*
Me@30 141 void
Me@30 142 initMasterMalloc()
Me@30 143 {
Me@30 144 _VMSMasterEnv->mallocChunk = malloc( MASSIVE_MALLOC_SIZE );
Me@30 145
Me@30 146 //The free-list element is the first several locations of an
Me@30 147 // allocated chunk -- the address given to the application is pre-
Me@30 148 // pended with both the ownership structure and the free-list struc.
Me@30 149 //So, write the values of these into the first locations of
Me@30 150 // mallocChunk -- which marks it as free & puts in its size.
Me@30 151 listElem = (FreeListElem *)_VMSMasterEnv->mallocChunk;
Me@30 152 listElem->size = MASSIVE_MALLOC_SIZE - NUM_PREPEND_BYTES
Me@30 153 listElem->next = NULL;
Me@30 154 }
Me@30 155
Me@30 156 void
Me@30 157 dissipateMasterMalloc()
Me@30 158 {
Me@30 159 //Just foo code -- to get going -- doing as if free list were link-list
Me@30 160 currElem = _VMSMasterEnv->freeList;
Me@30 161 while( currElem != NULL )
Me@30 162 {
Me@30 163 nextElem = currElem->next;
Me@30 164 masterFree( currElem );
Me@30 165 currElem = nextElem;
Me@30 166 }
Me@30 167 free( _VMSMasterEnv->freeList );
Me@30 168 }
Me@30 169 */
Me@30 170
Me@31 171 SchedSlot **
Me@31 172 create_sched_slots()
Me@31 173 { SchedSlot **schedSlots;
Me@0 174 int i;
Me@0 175
Me@8 176 schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
Me@8 177
Me@1 178 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@0 179 {
Me@8 180 schedSlots[i] = malloc( sizeof(SchedSlot) );
Me@8 181
Me@1 182 //Set state to mean "handling requests done, slot needs filling"
Me@8 183 schedSlots[i]->workIsDone = FALSE;
Me@8 184 schedSlots[i]->needsProcrAssigned = TRUE;
Me@0 185 }
Me@31 186 return schedSlots;
Me@31 187 }
Me@31 188
Me@31 189
Me@31 190 void
Me@31 191 freeSchedSlots( SchedSlot **schedSlots )
Me@31 192 { int i;
Me@31 193 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@31 194 {
Me@31 195 free( schedSlots[i] );
Me@31 196 }
Me@31 197 free( schedSlots );
Me@0 198 }
Me@0 199
Me@8 200
Me@28 201 void
Me@28 202 create_the_coreLoop_OS_threads()
Me@28 203 {
Me@28 204 //========================================================================
Me@28 205 // Create the Threads
Me@47 206 int coreIdx, retCode, i;
Me@47 207
Me@47 208 //create the arrays used to measure TSC offsets between cores
Me@47 209 pongNums = malloc( NUM_CORES * sizeof( int ) );
Me@47 210 pingTimes = malloc( NUM_CORES * NUM_TSC_ROUND_TRIPS * sizeof( TSCount ) );
Me@47 211 pongTimes = malloc( NUM_CORES * NUM_TSC_ROUND_TRIPS * sizeof( TSCount ) );
Me@47 212
Me@47 213 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@47 214 {
Me@47 215 pongNums[ coreIdx ] = 0;
Me@47 216 for( i = 0; i < NUM_TSC_ROUND_TRIPS; i++ )
Me@47 217 {
Me@47 218 pingTimes[ coreIdx * NUM_TSC_ROUND_TRIPS + i ] = (TSCount) 0;
Me@47 219 pingTimes[ coreIdx * NUM_TSC_ROUND_TRIPS + i ] = (TSCount) 0;
Me@47 220 }
Me@47 221 }
Me@28 222
Me@28 223 //Need the threads to be created suspended, and wait for a signal
Me@28 224 // before proceeding -- gives time after creating to initialize other
Me@28 225 // stuff before the coreLoops set off.
Me@28 226 _VMSMasterEnv->setupComplete = 0;
Me@28 227
Me@28 228 //Make the threads that animate the core loops
Me@28 229 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@28 230 { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) );
Me@28 231 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
Me@28 232
Me@28 233 retCode =
Me@28 234 pthread_create( &(coreLoopThdHandles[coreIdx]),
Me@28 235 thdAttrs,
Me@28 236 &coreLoop,
Me@28 237 (void *)(coreLoopThdParams[coreIdx]) );
Me@28 238 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
Me@28 239 }
Me@28 240 }
Me@28 241
Me@0 242 /*Semantic layer calls this when it want the system to start running..
Me@0 243 *
Me@24 244 *This starts the core loops running then waits for them to exit.
Me@0 245 */
Me@12 246 void
Me@24 247 VMS__start_the_work_then_wait_until_done()
Me@12 248 { int coreIdx;
Me@24 249 //Start the core loops running
Me@24 250 //===========================================================================
Me@25 251 TSCount startCount, endCount;
Me@24 252 unsigned long long count = 0, freq = 0;
Me@25 253 double runTime;
Me@0 254
Me@47 255 startCount = getTSC();
Me@25 256
Me@25 257 //tell the core loop threads that setup is complete
Me@25 258 //get lock, to lock out any threads still starting up -- they'll see
Me@25 259 // that setupComplete is true before entering while loop, and so never
Me@25 260 // wait on the condition
Me@26 261 pthread_mutex_lock( &suspendLock );
Me@25 262 _VMSMasterEnv->setupComplete = 1;
Me@26 263 pthread_mutex_unlock( &suspendLock );
Me@26 264 pthread_cond_broadcast( &suspend_cond );
Me@25 265
Me@25 266
Me@24 267 //wait for all to complete
Me@8 268 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@8 269 {
Me@25 270 pthread_join( coreLoopThdHandles[coreIdx], NULL );
Me@24 271 }
Me@25 272
Me@24 273 //NOTE: do not clean up VMS env here -- semantic layer has to have
Me@24 274 // a chance to clean up its environment first, then do a call to free
Me@24 275 // the Master env and rest of VMS locations
Me@24 276
Me@24 277
Me@47 278 endCount = getTSC();
Me@25 279 count = endCount - startCount;
Me@24 280
Me@25 281 runTime = (double)count / (double)TSCOUNT_FREQ;
Me@25 282
Me@25 283 printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
Me@8 284 }
Me@0 285
Me@28 286 /*Only difference between version with an OS thread pinned to each core and
Me@28 287 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
Me@28 288 */
Me@28 289 void
Me@28 290 VMS__start_the_work_then_wait_until_done_Seq()
Me@28 291 {
Me@28 292 //Instead of un-suspending threads, just call the one and only
Me@28 293 // core loop (sequential version), in the main thread.
Me@28 294 coreLoop_Seq( NULL );
Me@28 295
Me@28 296 }
Me@28 297
Me@0 298
Me@0 299
Me@8 300 /*Create stack, then create __cdecl structure on it and put initialData and
Me@8 301 * pointer to the new structure instance into the parameter positions on
Me@8 302 * the stack
Me@8 303 *Then put function pointer into nextInstrPt -- the stack is setup in std
Me@8 304 * call structure, so jumping to function ptr is same as a GCC generated
Me@8 305 * function call
Me@8 306 *No need to save registers on old stack frame, because there's no old
Me@8 307 * animator state to return to --
Me@8 308 *
Me@8 309 */
Me@8 310 VirtProcr *
Me@8 311 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
Me@8 312 { VirtProcr *newPr;
Me@8 313 char *stackLocs, *stackPtr;
Me@8 314
Me@48 315 //============================= MEASUREMENT STUFF ========================
Me@48 316 #ifdef MEAS__TIME_MASTER
Me@48 317 int32 startStamp;
Me@48 318 saveLowTimeStampCountInto( startStamp );
Me@48 319 #endif
Me@48 320 //========================================================================
Me@48 321
Me@8 322 newPr = malloc( sizeof(VirtProcr) );
Me@12 323 newPr->procrID = numProcrsCreated++;
Me@8 324 newPr->nextInstrPt = fnPtr;
Me@8 325 newPr->initialData = initialData;
Me@31 326 newPr->requests = NULL;
Me@38 327 newPr->schedSlot = NULL;
Me@31 328 // newPr->coreLoopStartPt = _VMSMasterEnv->coreLoopStartPt;
Me@8 329
Me@14 330 //fnPtr takes two params -- void *initData & void *animProcr
Me@8 331 //alloc stack locations, make stackPtr be the highest addr minus room
Me@14 332 // for 2 params + return addr. Return addr (NULL) is in loc pointed to
Me@14 333 // by stackPtr, initData at stackPtr + 4 bytes, animatingPr just above
Me@22 334 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
Me@47 335 if(stackLocs == 0) {perror("error: malloc stack"); exit(1);}
Me@22 336 newPr->startOfStack = stackLocs;
Me@22 337 stackPtr = ( (char *)stackLocs + VIRT_PROCR_STACK_SIZE - 0x10 );
Me@8 338 //setup __cdecl on stack -- coreloop will switch to stackPtr before jmp
Me@22 339 *( (int *)stackPtr + 2 ) = (int) newPr; //rightmost param -- 32bit pointer
Me@14 340 *( (int *)stackPtr + 1 ) = (int) initialData; //next param to left
Me@8 341 newPr->stackPtr = stackPtr; //core loop will switch to this, then
Me@8 342 newPr->framePtr = stackPtr; //suspend loop will save new stack & frame ptr
Me@8 343
Me@48 344 //============================= MEASUREMENT STUFF ========================
Me@48 345 #ifdef MEAS__TIME_MASTER
Me@48 346 int32 endStamp;
Me@48 347 saveLowTimeStampCountInto( endStamp );
Me@48 348 addIntervalToHist( startStamp, endStamp,
Me@48 349 _VMSMasterEnv->stats->createHist );
Me@48 350 #endif
Me@48 351 //========================================================================
Me@48 352
Me@8 353 return newPr;
Me@8 354 }
Me@8 355
Me@8 356
Me@26 357 /*there is a label inside this function -- save the addr of this label in
Me@0 358 * the callingPr struc, as the pick-up point from which to start the next
Me@0 359 * work-unit for that procr. If turns out have to save registers, then
Me@0 360 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
Me@0 361 * "done with work-unit" label. The procr struc is in the request in the
Me@0 362 * slave that animated the just-ended work-unit, so all the state is saved
Me@0 363 * there, and will get passed along, inside the request handler, to the
Me@0 364 * next work-unit for that procr.
Me@0 365 */
Me@8 366 void
Me@38 367 VMS__suspend_procr( VirtProcr *animatingPr )
Me@14 368 { void *jmpPt, *stackPtrAddr, *framePtrAddr, *coreLoopStackPtr;
Me@14 369 void *coreLoopFramePtr;
Me@0 370
Me@14 371 //The request to master will cause this suspended virt procr to get
Me@14 372 // scheduled again at some future point -- to resume, core loop jumps
Me@14 373 // to the resume point (below), which causes restore of saved regs and
Me@14 374 // "return" from this call.
Me@38 375 animatingPr->nextInstrPt = &&ResumePt;
Me@1 376
Me@1 377 //return ownership of the virt procr and sched slot to Master virt pr
Me@38 378 animatingPr->schedSlot->workIsDone = TRUE;
Me@14 379 // coreIdx = callingPr->coreAnimatedBy;
Me@1 380
Me@38 381 stackPtrAddr = &(animatingPr->stackPtr);
Me@38 382 framePtrAddr = &(animatingPr->framePtr);
Me@26 383
Me@31 384 jmpPt = _VMSMasterEnv->coreLoopStartPt;
Me@38 385 coreLoopFramePtr = animatingPr->coreLoopFramePtr;//need this only
Me@38 386 coreLoopStackPtr = animatingPr->coreLoopStackPtr;//safety
Me@1 387
Me@26 388 //Save the virt procr's stack and frame ptrs,
Me@18 389 asm volatile("movl %0, %%eax; \
Me@18 390 movl %%esp, (%%eax); \
Me@18 391 movl %1, %%eax; \
Me@26 392 movl %%ebp, (%%eax) "\
Me@26 393 /* outputs */ : "=g" (stackPtrAddr), "=g" (framePtrAddr) \
Me@26 394 /* inputs */ : \
Me@26 395 /* clobber */ : "%eax" \
Me@26 396 );
Me@26 397
Me@41 398 //=========================== Measurement stuff ========================
Me@38 399 #ifdef MEAS__TIME_STAMP_SUSP
Me@41 400 //record time stamp: compare to time-stamp recorded below
Me@38 401 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
Me@38 402 #endif
Me@41 403 //=======================================================================
Me@38 404
Me@26 405 //restore coreloop's frame ptr, then jump back to "start" of core loop
Me@26 406 //Note, GCC compiles to assembly that saves esp and ebp in the stack
Me@26 407 // frame -- so have to explicitly do assembly that saves to memory
Me@26 408 asm volatile("movl %0, %%eax; \
Me@26 409 movl %1, %%esp; \
Me@26 410 movl %2, %%ebp; \
Me@18 411 jmp %%eax " \
Me@26 412 /* outputs */ : \
Me@26 413 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
Me@18 414 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
Me@12 415 ); //list everything as clobbered to force GCC to save all
Me@12 416 // live vars that are in regs on stack before this
Me@12 417 // assembly, so that stack pointer is correct, before jmp
Me@1 418
Me@1 419 ResumePt:
Me@38 420 #ifdef MEAS__TIME_STAMP_SUSP
Me@41 421 //NOTE: only take low part of count -- do sanity check when take diff
Me@38 422 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
Me@38 423 #endif
Me@38 424
Me@0 425 return;
Me@0 426 }
Me@0 427
Me@22 428
Me@22 429
Me@22 430
Me@38 431 /*
Me@22 432 *This adds a request to dissipate, then suspends the processor so that the
Me@22 433 * request handler will receive the request. The request handler is what
Me@22 434 * does the work of freeing memory and removing the processor from the
Me@22 435 * semantic environment's data structures.
Me@22 436 *The request handler also is what figures out when to shutdown the VMS
Me@22 437 * system -- which causes all the core loop threads to die, and returns from
Me@22 438 * the call that started up VMS to perform the work.
Me@22 439 *
Me@22 440 *This form is a bit misleading to understand if one is trying to figure out
Me@22 441 * how VMS works -- it looks like a normal function call, but inside it
Me@22 442 * sends a request to the request handler and suspends the processor, which
Me@22 443 * jumps out of the VMS__dissipate_procr function, and out of all nestings
Me@22 444 * above it, transferring the work of dissipating to the request handler,
Me@22 445 * which then does the actual work -- causing the processor that animated
Me@22 446 * the call of this function to disappear and the "hanging" state of this
Me@22 447 * function to just poof into thin air -- the virtual processor's trace
Me@22 448 * never returns from this call, but instead the virtual processor's trace
Me@22 449 * gets suspended in this call and all the virt processor's state disap-
Me@22 450 * pears -- making that suspend the last thing in the virt procr's trace.
Me@8 451 */
Me@8 452 void
Me@22 453 VMS__dissipate_procr( VirtProcr *procrToDissipate )
Me@22 454 { VMSReqst *req;
Me@22 455
Me@22 456 req = malloc( sizeof(VMSReqst) );
Me@22 457 // req->virtProcrFrom = callingPr;
Me@22 458 req->reqType = dissipate;
Me@22 459 req->nextReqst = procrToDissipate->requests;
Me@22 460 procrToDissipate->requests = req;
Me@22 461
Me@22 462 VMS__suspend_procr( procrToDissipate );
Me@22 463 }
Me@22 464
Me@22 465
Me@22 466 /*This inserts the semantic-layer's request data into standard VMS carrier
Me@22 467 */
Me@22 468 inline void
Me@24 469 VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
Me@22 470 { VMSReqst *req;
Me@22 471
Me@22 472 req = malloc( sizeof(VMSReqst) );
Me@22 473 // req->virtProcrFrom = callingPr;
Me@22 474 req->reqType = semantic;
Me@22 475 req->semReqData = semReqData;
Me@22 476 req->nextReqst = callingPr->requests;
Me@22 477 callingPr->requests = req;
Me@22 478 }
Me@22 479
Me@22 480
Me@38 481 /*Use this to get first request before starting request handler's loop
Me@38 482 */
Me@24 483 VMSReqst *
Me@24 484 VMS__take_top_request_from( VirtProcr *procrWithReq )
Me@24 485 { VMSReqst *req;
Me@24 486
Me@24 487 req = procrWithReq->requests;
Me@24 488 if( req == NULL ) return req;
Me@31 489
Me@24 490 procrWithReq->requests = procrWithReq->requests->nextReqst;
Me@24 491 return req;
Me@24 492 }
Me@24 493
Me@38 494 /*A subtle bug due to freeing then accessing "next" after freed caused this
Me@38 495 * form of call to be put in -- so call this at end of request handler loop
Me@38 496 * that iterates through the requests.
Me@38 497 */
Me@31 498 VMSReqst *
Me@31 499 VMS__free_top_and_give_next_request_from( VirtProcr *procrWithReq )
Me@31 500 { VMSReqst *req;
Me@31 501
Me@31 502 req = procrWithReq->requests;
Me@38 503 if( req == NULL ) return NULL;
Me@31 504
Me@31 505 procrWithReq->requests = procrWithReq->requests->nextReqst;
Me@31 506 VMS__free_request( req );
Me@31 507 return procrWithReq->requests;
Me@31 508 }
Me@31 509
Me@38 510
Me@38 511 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion
Me@38 512 // of a request -- IE call with both a virt procr and a fn-ptr to request
Me@38 513 // freer (also maybe put sem request freer as a field in virt procr?)
Me@38 514 //MeasVMS relies right now on this only freeing VMS layer of request -- the
Me@38 515 // semantic portion of request is alloc'd and freed by request handler
Me@38 516 void
Me@38 517 VMS__free_request( VMSReqst *req )
Me@38 518 {
Me@38 519 free( req );
Me@38 520 }
Me@38 521
Me@38 522
Me@38 523
Me@24 524 inline int
Me@24 525 VMS__isSemanticReqst( VMSReqst *req )
Me@22 526 {
Me@24 527 return ( req->reqType == semantic );
Me@24 528 }
Me@22 529
Me@24 530
Me@24 531 inline void *
Me@24 532 VMS__take_sem_reqst_from( VMSReqst *req )
Me@24 533 {
Me@24 534 return req->semReqData;
Me@24 535 }
Me@24 536
Me@24 537 inline int
Me@24 538 VMS__isDissipateReqst( VMSReqst *req )
Me@24 539 {
Me@24 540 return ( req->reqType == dissipate );
Me@24 541 }
Me@24 542
Me@24 543 inline int
Me@24 544 VMS__isCreateReqst( VMSReqst *req )
Me@24 545 {
Me@24 546 return ( req->reqType == regCreated );
Me@24 547 }
Me@24 548
Me@24 549 void
Me@38 550 VMS__send_req_to_register_new_procr(VirtProcr *newPr, VirtProcr *reqstingPr)
Me@24 551 { VMSReqst *req;
Me@24 552
Me@24 553 req = malloc( sizeof(VMSReqst) );
Me@24 554 req->reqType = regCreated;
Me@24 555 req->semReqData = newPr;
Me@24 556 req->nextReqst = reqstingPr->requests;
Me@24 557 reqstingPr->requests = req;
Me@24 558
Me@24 559 VMS__suspend_procr( reqstingPr );
Me@22 560 }
Me@22 561
Me@22 562
Me@22 563
Me@24 564 /*This must be called by the request handler plugin -- it cannot be called
Me@24 565 * from the semantic library "dissipate processor" function -- instead, the
Me@24 566 * semantic layer has to generate a request for the plug-in to call this
Me@24 567 * function.
Me@24 568 *The reason is that this frees the virtual processor's stack -- which is
Me@24 569 * still in use inside semantic library calls!
Me@24 570 *
Me@24 571 *This frees or recycles all the state owned by and comprising the VMS
Me@24 572 * portion of the animating virtual procr. The request handler must first
Me@24 573 * free any semantic data created for the processor that didn't use the
Me@24 574 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
Me@24 575 * system to disown any state that did use VMS_malloc, and then frees the
Me@24 576 * statck and the processor-struct itself.
Me@24 577 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
Me@24 578 * state, then that state gets freed (or sent to recycling) as a side-effect
Me@24 579 * of dis-owning it.
Me@24 580 */
Me@24 581 void
Me@29 582 VMS__handle_dissipate_reqst( VirtProcr *animatingPr )
Me@24 583 {
Me@24 584 //dis-own all locations owned by this processor, causing to be freed
Me@24 585 // any locations that it is (was) sole owner of
Me@29 586 //TODO: implement VMS__malloc system, including "give up ownership"
Me@24 587
Me@24 588 //The dissipate request might still be attached, so remove and free it
Me@38 589 VMS__free_top_and_give_next_request_from( animatingPr );
Me@24 590
Me@24 591 //NOTE: initialData was given to the processor, so should either have
Me@24 592 // been alloc'd with VMS__malloc, or freed by the level above animPr.
Me@24 593 //So, all that's left to free here is the stack and the VirtProcr struc
Me@24 594 // itself
Me@24 595 free( animatingPr->startOfStack );
Me@24 596 free( animatingPr );
Me@24 597 }
Me@24 598
Me@24 599
Me@29 600 //TODO: re-architect so that have clean separation between request handler
Me@29 601 // and master loop, for dissipate, create, shutdown, and other non-semantic
Me@29 602 // requests. Issue is chain: one removes requests from AppVP, one dispatches
Me@29 603 // on type of request, and one handles each type.. but some types require
Me@29 604 // action from both request handler and master loop -- maybe just give the
Me@29 605 // request handler calls like: VMS__handle_X_request_type
Me@24 606
Me@29 607 void
Me@29 608 endOSThreadFn( void *initData, VirtProcr *animatingPr );
Me@29 609
Me@29 610 /*This is called by the semantic layer's request handler when it decides its
Me@29 611 * time to shut down the VMS system. Calling this causes the core loop OS
Me@29 612 * threads to exit, which unblocks the entry-point function that started up
Me@29 613 * VMS, and allows it to grab the result and return to the original single-
Me@29 614 * threaded application.
Me@22 615 *
Me@29 616 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
Me@29 617 * and-wait function has to free a bunch of stuff after it detects the
Me@29 618 * threads have all died: the masterEnv, the thread-related locations,
Me@29 619 * masterVP any AppVPs that might still be allocated and sitting in the
Me@29 620 * semantic environment, or have been orphaned in the _VMSWorkQ.
Me@29 621 *
Me@29 622 *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the
Me@29 623 * locations it needs, and give ownership to masterVP. Then, they will be
Me@29 624 * automatically freed when the masterVP is dissipated. (This happens after
Me@29 625 * the core loop threads have all exited)
Me@22 626 *
Me@29 627 *In here,create one core-loop shut-down processor for each core loop and put
Me@31 628 * them all directly into the readyToAnimateQ.
Me@29 629 *Note, this function can ONLY be called after the semantic environment no
Me@29 630 * longer cares if AppVPs get animated after the point this is called. In
Me@29 631 * other words, this can be used as an abort, or else it should only be
Me@29 632 * called when all AppVPs have finished dissipate requests -- only at that
Me@29 633 * point is it sure that all results have completed.
Me@22 634 */
Me@22 635 void
Me@29 636 VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr )
Me@8 637 { int coreIdx;
Me@14 638 VirtProcr *shutDownPr;
Me@22 639
Me@29 640 //create the shutdown processors, one for each core loop -- put them
Me@31 641 // directly into the Q -- each core will die when gets one
Me@8 642 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@8 643 {
Me@29 644 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
Me@31 645 writeSRSWQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
Me@8 646 }
Me@22 647
Me@12 648 }
Me@12 649
Me@12 650
Me@29 651 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
Me@29 652 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
Me@29 653 *This function has the sole purpose of setting the stack and framePtr
Me@29 654 * to the coreLoop's stack and framePtr.. it does that then jumps to the
Me@29 655 * core loop's shutdown point -- might be able to just call Pthread_exit
Me@30 656 * from here, but am going back to the pthread's stack and setting everything
Me@29 657 * up just as if it never jumped out, before calling pthread_exit.
Me@29 658 *The end-point of core loop will free the stack and so forth of the
Me@29 659 * processor that animates this function, (this fn is transfering the
Me@29 660 * animator of the AppVP that is in turn animating this function over
Me@29 661 * to core loop function -- note that this slices out a level of virtual
Me@29 662 * processors).
Me@29 663 */
Me@29 664 void
Me@29 665 endOSThreadFn( void *initData, VirtProcr *animatingPr )
Me@29 666 { void *jmpPt, *coreLoopStackPtr, *coreLoopFramePtr;
Me@29 667
Me@29 668 jmpPt = _VMSMasterEnv->coreLoopEndPt;
Me@29 669 coreLoopStackPtr = animatingPr->coreLoopStackPtr;
Me@29 670 coreLoopFramePtr = animatingPr->coreLoopFramePtr;
Me@29 671
Me@29 672
Me@29 673 asm volatile("movl %0, %%eax; \
Me@29 674 movl %1, %%esp; \
Me@29 675 movl %2, %%ebp; \
Me@29 676 jmp %%eax " \
Me@29 677 /* outputs */ : \
Me@29 678 /* inputs */ : "m" (jmpPt), "m"(coreLoopStackPtr), "m"(coreLoopFramePtr)\
Me@29 679 /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi","%esi" \
Me@29 680 );
Me@29 681 }
Me@29 682
Me@29 683
Me@31 684 /*This is called after the threads have shut down and control has returned
Me@30 685 * to the semantic layer, in the entry point function in the main thread.
Me@30 686 * It has to free anything allocated during VMS_init, and any other alloc'd
Me@24 687 * locations that might be left over.
Me@24 688 */
Me@24 689 void
Me@29 690 VMS__cleanup_after_shutdown()
Me@31 691 {
Me@47 692 VMSQueueStruc **readyToAnimateQs;
Me@31 693 int coreIdx;
Me@31 694 VirtProcr **masterVPs;
Me@31 695 SchedSlot ***allSchedSlots; //ptr to array of ptrs
Me@31 696
Me@31 697 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
Me@31 698 masterVPs = _VMSMasterEnv->masterVPs;
Me@31 699 allSchedSlots = _VMSMasterEnv->allSchedSlots;
Me@31 700
Me@31 701 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@24 702 {
Me@31 703 freeSRSWQ( readyToAnimateQs[ coreIdx ] );
Me@31 704
Me@31 705 VMS__handle_dissipate_reqst( masterVPs[ coreIdx ] );
Me@31 706
Me@31 707 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@24 708 }
Me@31 709
Me@31 710 free( _VMSMasterEnv->readyToAnimateQs );
Me@31 711 free( _VMSMasterEnv->masterVPs );
Me@31 712 free( _VMSMasterEnv->allSchedSlots );
Me@24 713
Me@24 714 free( _VMSMasterEnv );
Me@24 715 }
Me@24 716
Me@24 717
Me@24 718 //===========================================================================
Me@12 719
Me@47 720 inline TSCount getTSC()
Me@12 721 { unsigned int low, high;
Me@12 722 TSCount out;
Me@12 723
Me@12 724 saveTimeStampCountInto( low, high );
Me@12 725 out = high;
Me@12 726 out = (out << 32) + low;
Me@12 727 return out;
Me@12 728 }
Me@12 729