annotate VMS.c @ 200:6db9e4898978

VMS name chgs -- added "WL" "PI" and "int" and split vms.h up
author Me@portablequad
date Sun, 12 Feb 2012 01:49:33 -0800
parents ad8213a8e916
children
rev   line source
Me@0 1 /*
Me@38 2 * Copyright 2010 OpenSourceStewardshipFoundation
Me@0 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7 #include <stdio.h>
Me@0 8 #include <stdlib.h>
Me@50 9 #include <string.h>
Me@0 10 #include <malloc.h>
msach@76 11 #include <inttypes.h>
Me@50 12 #include <sys/time.h>
Me@0 13
Me@0 14 #include "VMS.h"
msach@77 15 #include "ProcrContext.h"
Me@0 16
Me@0 17
Me@26 18 #define thdAttrs NULL
Me@26 19
Me@22 20 //===========================================================================
Me@22 21 void
Me@200 22 shutdownFn( void *dummy, SlaveVP *dummy2 );
Me@22 23
Me@31 24 SchedSlot **
Me@31 25 create_sched_slots();
Me@22 26
Me@28 27 void
Me@28 28 create_masterEnv();
Me@28 29
Me@28 30 void
Me@28 31 create_the_coreLoop_OS_threads();
Me@28 32
Me@50 33 MallocProlog *
Me@50 34 create_free_list();
Me@50 35
Me@53 36 void
Me@200 37 endOSThreadFn( void *initData, SlaveVP *animatingPr );
Me@50 38
Me@26 39 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
Me@26 40 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
Me@26 41
Me@22 42 //===========================================================================
Me@22 43
Me@0 44 /*Setup has two phases:
Me@0 45 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
Me@8 46 * the master virt procr into the work-queue, ready for first "call"
Me@8 47 * 2) Semantic layer then does its own init, which creates the seed virt
Me@8 48 * procr inside the semantic layer, ready to schedule it when
Me@0 49 * asked by the first run of the masterLoop.
Me@0 50 *
Me@0 51 *This part is bit weird because VMS really wants to be "always there", and
Me@0 52 * have applications attach and detach.. for now, this VMS is part of
Me@0 53 * the app, so the VMS system starts up as part of running the app.
Me@0 54 *
Me@8 55 *The semantic layer is isolated from the VMS internals by making the
Me@8 56 * semantic layer do setup to a state that it's ready with its
Me@8 57 * initial virt procrs, ready to schedule them to slots when the masterLoop
Me@0 58 * asks. Without this pattern, the semantic layer's setup would
Me@8 59 * have to modify slots directly to assign the initial virt-procrs, and put
Me@31 60 * them into the readyToAnimateQ itself, breaking the isolation completely.
Me@0 61 *
Me@0 62 *
Me@8 63 *The semantic layer creates the initial virt procr(s), and adds its
Me@8 64 * own environment to masterEnv, and fills in the pointers to
Me@0 65 * the requestHandler and slaveScheduler plug-in functions
Me@8 66 */
Me@8 67
Me@8 68 /*This allocates VMS data structures, populates the master VMSProc,
Me@0 69 * and master environment, and returns the master environment to the semantic
Me@0 70 * layer.
Me@0 71 */
Me@8 72 void
Me@200 73 VMS_int__init()
Me@28 74 {
Me@28 75 create_masterEnv();
Me@28 76 create_the_coreLoop_OS_threads();
Me@28 77 }
Me@28 78
msach@71 79 #ifdef SEQUENTIAL
msach@71 80
Me@28 81 /*To initialize the sequential version, just don't create the threads
Me@28 82 */
Me@28 83 void
Me@200 84 VMS_int__init_Seq()
Me@28 85 {
Me@28 86 create_masterEnv();
Me@28 87 }
Me@28 88
msach@71 89 #endif
msach@71 90
Me@28 91 void
Me@28 92 create_masterEnv()
Me@31 93 { MasterEnv *masterEnv;
Me@55 94 VMSQueueStruc **readyToAnimateQs;
Me@31 95 int coreIdx;
Me@200 96 SlaveVP **masterVPs;
Me@31 97 SchedSlot ***allSchedSlots; //ptr to array of ptrs
Me@53 98
Me@53 99
Me@31 100 //Make the master env, which holds everything else
Me@1 101 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
Me@53 102
Me@53 103 //Very first thing put into the master env is the free-list, seeded
Me@53 104 // with a massive initial chunk of memory.
Me@53 105 //After this, all other mallocs are VMS__malloc.
Me@53 106 _VMSMasterEnv->freeListHead = VMS_ext__create_free_list();
Me@53 107
Me@65 108
Me@65 109 //============================= MEASUREMENT STUFF ========================
Me@65 110 #ifdef MEAS__TIME_MALLOC
msach@160 111 _VMSMasterEnv->mallocTimeHist = makeFixedBinHistExt( 100, 0, 30,
msach@79 112 "malloc_time_hist");
msach@160 113 _VMSMasterEnv->freeTimeHist = makeFixedBinHistExt( 100, 0, 30,
msach@79 114 "free_time_hist");
Me@65 115 #endif
Me@68 116 #ifdef MEAS__TIME_PLUGIN
msach@160 117 _VMSMasterEnv->reqHdlrLowTimeHist = makeFixedBinHistExt( 100, 0, 200,
msach@79 118 "plugin_low_time_hist");
msach@160 119 _VMSMasterEnv->reqHdlrHighTimeHist = makeFixedBinHistExt( 100, 0, 200,
msach@79 120 "plugin_high_time_hist");
Me@68 121 #endif
Me@65 122 //========================================================================
Me@65 123
Me@53 124 //===================== Only VMS__malloc after this ====================
msach@69 125 masterEnv = (MasterEnv*)_VMSMasterEnv;
Me@31 126
Me@31 127 //Make a readyToAnimateQ for each core loop
Me@200 128 readyToAnimateQs = VMS_int__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
Me@200 129 masterVPs = VMS_int__malloc( NUM_CORES * sizeof(SlaveVP *) );
Me@0 130
Me@31 131 //One array for each core, 3 in array, core's masterVP scheds all
Me@200 132 allSchedSlots = VMS_int__malloc( NUM_CORES * sizeof(SchedSlot *) );
Me@0 133
Me@200 134 _VMSMasterEnv->numVPsCreated = 0; //used by create procr
Me@31 135 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@53 136 {
Me@55 137 readyToAnimateQs[ coreIdx ] = makeVMSQ();
Me@31 138
Me@50 139 //Q: should give masterVP core-specific info as its init data?
Me@200 140 masterVPs[ coreIdx ] = VMS_int__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
Me@31 141 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
Me@31 142 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
Me@53 143 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
Me@55 144 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL;
Me@31 145 }
Me@31 146 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
Me@31 147 _VMSMasterEnv->masterVPs = masterVPs;
Me@50 148 _VMSMasterEnv->masterLock = UNLOCKED;
Me@31 149 _VMSMasterEnv->allSchedSlots = allSchedSlots;
Me@55 150 _VMSMasterEnv->workStealingLock = UNLOCKED;
Me@28 151
Me@12 152
Me@31 153 //Aug 19, 2010: no longer need to place initial masterVP into queue
Me@31 154 // because coreLoop now controls -- animates its masterVP when no work
Me@31 155
Me@30 156
Me@50 157 //============================= MEASUREMENT STUFF ========================
Me@50 158 #ifdef STATS__TURN_ON_PROBES
Me@50 159 _VMSMasterEnv->dynIntervalProbesInfo =
msach@69 160 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
Me@30 161
Me@200 162 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS_int__free );
Me@53 163
Me@53 164 //put creation time directly into master env, for fast retrieval
Me@50 165 struct timeval timeStamp;
Me@50 166 gettimeofday( &(timeStamp), NULL);
Me@50 167 _VMSMasterEnv->createPtInSecs =
Me@50 168 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
Me@50 169 #endif
Me@65 170 #ifdef MEAS__TIME_MASTER_LOCK
Me@65 171 _VMSMasterEnv->masterLockLowTimeHist = makeFixedBinHist( 50, 0, 2,
Me@65 172 "master lock low time hist");
Me@68 173 _VMSMasterEnv->masterLockHighTimeHist = makeFixedBinHist( 50, 0, 100,
Me@65 174 "master lock high time hist");
Me@65 175 #endif
Me@68 176
msach@76 177 MakeTheMeasHists();
Me@50 178 //========================================================================
Me@38 179
Me@0 180 }
Me@0 181
Me@31 182 SchedSlot **
Me@31 183 create_sched_slots()
Me@31 184 { SchedSlot **schedSlots;
Me@0 185 int i;
Me@0 186
Me@200 187 schedSlots = VMS_int__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
Me@8 188
Me@1 189 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@0 190 {
Me@200 191 schedSlots[i] = VMS_int__malloc( sizeof(SchedSlot) );
Me@8 192
Me@1 193 //Set state to mean "handling requests done, slot needs filling"
Me@8 194 schedSlots[i]->workIsDone = FALSE;
Me@8 195 schedSlots[i]->needsProcrAssigned = TRUE;
Me@0 196 }
Me@31 197 return schedSlots;
Me@31 198 }
Me@31 199
Me@31 200
Me@31 201 void
Me@31 202 freeSchedSlots( SchedSlot **schedSlots )
Me@31 203 { int i;
Me@31 204 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@31 205 {
Me@200 206 VMS_int__free( schedSlots[i] );
Me@31 207 }
Me@200 208 VMS_int__free( schedSlots );
Me@0 209 }
Me@0 210
Me@8 211
Me@28 212 void
Me@28 213 create_the_coreLoop_OS_threads()
Me@28 214 {
Me@28 215 //========================================================================
Me@28 216 // Create the Threads
Me@28 217 int coreIdx, retCode;
Me@28 218
Me@28 219 //Need the threads to be created suspended, and wait for a signal
Me@28 220 // before proceeding -- gives time after creating to initialize other
Me@28 221 // stuff before the coreLoops set off.
Me@28 222 _VMSMasterEnv->setupComplete = 0;
Me@28 223
Me@28 224 //Make the threads that animate the core loops
Me@28 225 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@200 226 { coreLoopThdParams[coreIdx] = VMS_int__malloc( sizeof(ThdParams) );
Me@28 227 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
Me@28 228
Me@28 229 retCode =
Me@28 230 pthread_create( &(coreLoopThdHandles[coreIdx]),
Me@28 231 thdAttrs,
Me@28 232 &coreLoop,
Me@28 233 (void *)(coreLoopThdParams[coreIdx]) );
Me@50 234 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
Me@28 235 }
Me@28 236 }
Me@28 237
Me@0 238 /*Semantic layer calls this when it want the system to start running..
Me@0 239 *
Me@24 240 *This starts the core loops running then waits for them to exit.
Me@0 241 */
Me@12 242 void
Me@200 243 VMS_WL__start_the_work_then_wait_until_done()
Me@12 244 { int coreIdx;
Me@24 245 //Start the core loops running
Me@25 246
Me@25 247 //tell the core loop threads that setup is complete
Me@25 248 //get lock, to lock out any threads still starting up -- they'll see
Me@25 249 // that setupComplete is true before entering while loop, and so never
Me@25 250 // wait on the condition
Me@26 251 pthread_mutex_lock( &suspendLock );
Me@25 252 _VMSMasterEnv->setupComplete = 1;
Me@26 253 pthread_mutex_unlock( &suspendLock );
Me@26 254 pthread_cond_broadcast( &suspend_cond );
Me@25 255
Me@25 256
Me@24 257 //wait for all to complete
Me@8 258 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@8 259 {
Me@25 260 pthread_join( coreLoopThdHandles[coreIdx], NULL );
Me@24 261 }
Me@25 262
Me@24 263 //NOTE: do not clean up VMS env here -- semantic layer has to have
Me@24 264 // a chance to clean up its environment first, then do a call to free
Me@24 265 // the Master env and rest of VMS locations
Me@8 266 }
Me@0 267
msach@70 268 #ifdef SEQUENTIAL
Me@28 269 /*Only difference between version with an OS thread pinned to each core and
Me@28 270 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
Me@28 271 */
Me@28 272 void
Me@200 273 VMS_WL__start_the_work_then_wait_until_done_Seq()
Me@28 274 {
Me@28 275 //Instead of un-suspending threads, just call the one and only
Me@28 276 // core loop (sequential version), in the main thread.
Me@28 277 coreLoop_Seq( NULL );
msach@75 278 flushRegisters();
Me@28 279
Me@28 280 }
msach@70 281 #endif
Me@28 282
Me@200 283 inline SlaveVP *
Me@200 284 VMS_int__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
Me@200 285 { SlaveVP *newPr;
msach@76 286 void *stackLocs;
Me@50 287
Me@200 288 newPr = VMS_int__malloc( sizeof(SlaveVP) );
Me@200 289 stackLocs = VMS_int__malloc( VIRT_PROCR_STACK_SIZE );
Me@50 290 if( stackLocs == 0 )
Me@50 291 { perror("VMS__malloc stack"); exit(1); }
Me@50 292
msach@69 293 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
Me@50 294 }
Me@50 295
Me@50 296 /* "ext" designates that it's for use outside the VMS system -- should only
Me@50 297 * be called from main thread or other thread -- never from code animated by
Me@50 298 * a VMS virtual processor.
Me@50 299 */
Me@200 300 inline SlaveVP *
Me@50 301 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
Me@200 302 { SlaveVP *newPr;
Me@50 303 char *stackLocs;
Me@50 304
Me@200 305 newPr = malloc( sizeof(SlaveVP) );
Me@50 306 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
Me@50 307 if( stackLocs == 0 )
Me@50 308 { perror("malloc stack"); exit(1); }
Me@50 309
msach@69 310 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
Me@50 311 }
Me@50 312
Me@8 313
Me@64 314 /*Anticipating multi-tasking
Me@64 315 */
Me@64 316 void *
Me@200 317 VMS_WL__give_sem_env_for( SlaveVP *animPr )
Me@64 318 {
Me@64 319 return _VMSMasterEnv->semanticEnv;
Me@64 320 }
Me@64 321 //===========================================================================
Me@26 322 /*there is a label inside this function -- save the addr of this label in
Me@0 323 * the callingPr struc, as the pick-up point from which to start the next
Me@0 324 * work-unit for that procr. If turns out have to save registers, then
Me@0 325 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
Me@0 326 * "done with work-unit" label. The procr struc is in the request in the
Me@0 327 * slave that animated the just-ended work-unit, so all the state is saved
Me@0 328 * there, and will get passed along, inside the request handler, to the
Me@0 329 * next work-unit for that procr.
Me@0 330 */
Me@8 331 void
Me@200 332 VMS_int__suspend_procr( SlaveVP *animatingPr )
Me@55 333 {
Me@30 334
Me@30 335 //The request to master will cause this suspended virt procr to get
Me@30 336 // scheduled again at some future point -- to resume, core loop jumps
Me@30 337 // to the resume point (below), which causes restore of saved regs and
Me@30 338 // "return" from this call.
msach@71 339 //animatingPr->nextInstrPt = &&ResumePt;
Me@30 340
Me@30 341 //return ownership of the virt procr and sched slot to Master virt pr
Me@38 342 animatingPr->schedSlot->workIsDone = TRUE;
Me@1 343
Me@41 344 //=========================== Measurement stuff ========================
Me@38 345 #ifdef MEAS__TIME_STAMP_SUSP
Me@41 346 //record time stamp: compare to time-stamp recorded below
Me@38 347 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
Me@38 348 #endif
Me@41 349 //=======================================================================
Me@30 350
msach@71 351 switchToCoreLoop(animatingPr);
msach@71 352 flushRegisters();
Me@55 353
Me@55 354 //=======================================================================
Me@30 355
Me@38 356 #ifdef MEAS__TIME_STAMP_SUSP
Me@41 357 //NOTE: only take low part of count -- do sanity check when take diff
Me@38 358 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
Me@38 359 #endif
Me@30 360
Me@0 361 return;
Me@0 362 }
Me@0 363
Me@22 364
Me@22 365
Me@50 366 /*For this implementation of VMS, it may not make much sense to have the
Me@50 367 * system of requests for creating a new processor done this way.. but over
Me@50 368 * the scope of single-master, multi-master, mult-tasking, OS-implementing,
Me@50 369 * distributed-memory, and so on, this gives VMS implementation a chance to
Me@50 370 * do stuff before suspend, in the AppVP, and in the Master before the plugin
Me@50 371 * is called, as well as in the lang-lib before this is called, and in the
Me@50 372 * plugin. So, this gives both VMS and language implementations a chance to
Me@50 373 * intercept at various points and do order-dependent stuff.
Me@50 374 *Having a standard VMSNewPrReqData struc allows the language to create and
Me@50 375 * free the struc, while VMS knows how to get the newPr if it wants it, and
Me@50 376 * it lets the lang have lang-specific data related to creation transported
Me@50 377 * to the plugin.
Me@50 378 */
Me@50 379 void
Me@200 380 VMS_WL__send_create_procr_req( void *semReqData, SlaveVP *reqstingPr )
Me@50 381 { VMSReqst req;
Me@50 382
Me@50 383 req.reqType = createReq;
Me@50 384 req.semReqData = semReqData;
Me@50 385 req.nextReqst = reqstingPr->requests;
Me@50 386 reqstingPr->requests = &req;
Me@50 387
Me@200 388 VMS_int__suspend_procr( reqstingPr );
Me@50 389 }
Me@50 390
Me@22 391
Me@38 392 /*
Me@22 393 *This adds a request to dissipate, then suspends the processor so that the
Me@22 394 * request handler will receive the request. The request handler is what
Me@22 395 * does the work of freeing memory and removing the processor from the
Me@22 396 * semantic environment's data structures.
Me@22 397 *The request handler also is what figures out when to shutdown the VMS
Me@22 398 * system -- which causes all the core loop threads to die, and returns from
Me@22 399 * the call that started up VMS to perform the work.
Me@22 400 *
Me@22 401 *This form is a bit misleading to understand if one is trying to figure out
Me@22 402 * how VMS works -- it looks like a normal function call, but inside it
Me@22 403 * sends a request to the request handler and suspends the processor, which
Me@22 404 * jumps out of the VMS__dissipate_procr function, and out of all nestings
Me@22 405 * above it, transferring the work of dissipating to the request handler,
Me@22 406 * which then does the actual work -- causing the processor that animated
Me@22 407 * the call of this function to disappear and the "hanging" state of this
Me@22 408 * function to just poof into thin air -- the virtual processor's trace
Me@22 409 * never returns from this call, but instead the virtual processor's trace
Me@22 410 * gets suspended in this call and all the virt processor's state disap-
Me@22 411 * pears -- making that suspend the last thing in the virt procr's trace.
Me@8 412 */
Me@8 413 void
Me@200 414 VMS_WL__send_dissipate_req( SlaveVP *procrToDissipate )
Me@50 415 { VMSReqst req;
Me@22 416
Me@50 417 req.reqType = dissipate;
Me@50 418 req.nextReqst = procrToDissipate->requests;
Me@50 419 procrToDissipate->requests = &req;
Me@50 420
Me@200 421 VMS_int__suspend_procr( procrToDissipate );
Me@50 422 }
Me@50 423
Me@50 424
Me@50 425 /* "ext" designates that it's for use outside the VMS system -- should only
Me@50 426 * be called from main thread or other thread -- never from code animated by
Me@50 427 * a VMS virtual processor.
Me@50 428 *
Me@50 429 *Use this version to dissipate VPs created outside the VMS system.
Me@50 430 */
Me@50 431 void
Me@200 432 VMS_ext__dissipate_procr( SlaveVP *procrToDissipate )
Me@50 433 {
Me@50 434 //NOTE: initialData was given to the processor, so should either have
Me@50 435 // been alloc'd with VMS__malloc, or freed by the level above animPr.
Me@50 436 //So, all that's left to free here is the stack and the VirtProcr struc
Me@50 437 // itself
Me@50 438 //Note, should not stack-allocate initial data -- no guarantee, in
Me@50 439 // general that creating processor will outlive ones it creates.
Me@50 440 free( procrToDissipate->startOfStack );
Me@50 441 free( procrToDissipate );
Me@50 442 }
Me@50 443
Me@22 444
Me@22 445
Me@53 446 /*This call's name indicates that request is malloc'd -- so req handler
Me@53 447 * has to free any extra requests tacked on before a send, using this.
Me@53 448 *
Me@53 449 * This inserts the semantic-layer's request data into standard VMS carrier
Me@53 450 * request data-struct that is mallocd. The sem request doesn't need to
Me@53 451 * be malloc'd if this is called inside the same call chain before the
Me@53 452 * send of the last request is called.
Me@53 453 *
Me@53 454 *The request handler has to call VMS__free_VMSReq for any of these
Me@22 455 */
Me@22 456 inline void
Me@200 457 VMS_WL__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
Me@200 458 SlaveVP *callingPr )
Me@53 459 { VMSReqst *req;
Me@22 460
Me@200 461 req = VMS_int__malloc( sizeof(VMSReqst) );
Me@53 462 req->reqType = semantic;
Me@53 463 req->semReqData = semReqData;
Me@53 464 req->nextReqst = callingPr->requests;
Me@53 465 callingPr->requests = req;
Me@22 466 }
Me@22 467
Me@50 468 /*This inserts the semantic-layer's request data into standard VMS carrier
Me@50 469 * request data-struct is allocated on stack of this call & ptr to it sent
Me@50 470 * to plugin
Me@50 471 *Then it does suspend, to cause request to be sent.
Me@50 472 */
Me@50 473 inline void
Me@200 474 VMS_WL__send_sem_request( void *semReqData, SlaveVP *callingPr )
Me@50 475 { VMSReqst req;
Me@22 476
Me@50 477 req.reqType = semantic;
Me@50 478 req.semReqData = semReqData;
Me@50 479 req.nextReqst = callingPr->requests;
Me@50 480 callingPr->requests = &req;
Me@50 481
Me@200 482 VMS_int__suspend_procr( callingPr );
Me@50 483 }
Me@50 484
Me@50 485
Me@50 486 inline void
Me@200 487 VMS_WL__send_VMSSem_request( void *semReqData, SlaveVP *callingPr )
Me@50 488 { VMSReqst req;
Me@50 489
Me@50 490 req.reqType = VMSSemantic;
Me@50 491 req.semReqData = semReqData;
Me@50 492 req.nextReqst = callingPr->requests; //gab any other preceeding
Me@50 493 callingPr->requests = &req;
Me@50 494
Me@200 495 VMS_int__suspend_procr( callingPr );
Me@50 496 }
Me@50 497
Me@50 498
Me@50 499 /*
Me@38 500 */
Me@24 501 VMSReqst *
Me@200 502 VMS_PI__take_next_request_out_of( SlaveVP *procrWithReq )
Me@31 503 { VMSReqst *req;
Me@31 504
Me@31 505 req = procrWithReq->requests;
Me@38 506 if( req == NULL ) return NULL;
Me@31 507
Me@31 508 procrWithReq->requests = procrWithReq->requests->nextReqst;
Me@50 509 return req;
Me@24 510 }
Me@22 511
Me@24 512
Me@24 513 inline void *
Me@200 514 VMS_PI__take_sem_reqst_from( VMSReqst *req )
Me@24 515 {
Me@24 516 return req->semReqData;
Me@24 517 }
Me@24 518
Me@24 519
Me@24 520
Me@50 521 /* This is for OS requests and VMS infrastructure requests, such as to create
Me@50 522 * a probe -- a probe is inside the heart of VMS-core, it's not part of any
Me@50 523 * language -- but it's also a semantic thing that's triggered from and used
Me@50 524 * in the application.. so it crosses abstractions.. so, need some special
Me@50 525 * pattern here for handling such requests.
Me@52 526 * Doing this just like it were a second language sharing VMS-core.
Me@52 527 *
Me@50 528 * This is called from the language's request handler when it sees a request
Me@50 529 * of type VMSSemReq
Me@52 530 *
Me@52 531 * TODO: Later change this, to give probes their own separate plugin & have
Me@52 532 * VMS-core steer the request to appropriate plugin
Me@52 533 * Do the same for OS calls -- look later at it..
Me@50 534 */
Me@50 535 void inline
Me@200 536 VMS_PI__handle_VMSSemReq( VMSReqst *req, SlaveVP *requestingPr, void *semEnv,
Me@200 537 ResumeVPFnPtr resumePrFnPtr )
Me@50 538 { VMSSemReq *semReq;
Me@50 539 IntervalProbe *newProbe;
Me@24 540
Me@50 541 semReq = req->semReqData;
Me@24 542
Me@200 543 newProbe = VMS_int__malloc( sizeof(IntervalProbe) );
Me@200 544 newProbe->nameStr = VMS_int__strDup( semReq->nameStr );
Me@50 545 newProbe->hist = NULL;
Me@50 546 newProbe->schedChoiceWasRecorded = FALSE;
Me@53 547
Me@53 548 //This runs in masterVP, so no race-condition worries
Me@50 549 newProbe->probeID =
Me@50 550 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
Me@50 551
Me@53 552 requestingPr->dataRetFromReq = newProbe;
Me@50 553
Me@50 554 (*resumePrFnPtr)( requestingPr, semEnv );
Me@22 555 }
Me@22 556
Me@22 557
Me@22 558
Me@24 559 /*This must be called by the request handler plugin -- it cannot be called
Me@24 560 * from the semantic library "dissipate processor" function -- instead, the
Me@50 561 * semantic layer has to generate a request, and the plug-in calls this
Me@24 562 * function.
Me@24 563 *The reason is that this frees the virtual processor's stack -- which is
Me@24 564 * still in use inside semantic library calls!
Me@24 565 *
Me@24 566 *This frees or recycles all the state owned by and comprising the VMS
Me@24 567 * portion of the animating virtual procr. The request handler must first
Me@24 568 * free any semantic data created for the processor that didn't use the
Me@24 569 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
Me@24 570 * system to disown any state that did use VMS_malloc, and then frees the
Me@24 571 * statck and the processor-struct itself.
Me@24 572 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
Me@24 573 * state, then that state gets freed (or sent to recycling) as a side-effect
Me@24 574 * of dis-owning it.
Me@24 575 */
Me@24 576 void
Me@200 577 VMS_int__dissipate_procr( SlaveVP *animatingPr )
Me@24 578 {
Me@24 579 //dis-own all locations owned by this processor, causing to be freed
Me@24 580 // any locations that it is (was) sole owner of
Me@29 581 //TODO: implement VMS__malloc system, including "give up ownership"
Me@24 582
Me@24 583
Me@24 584 //NOTE: initialData was given to the processor, so should either have
Me@24 585 // been alloc'd with VMS__malloc, or freed by the level above animPr.
Me@24 586 //So, all that's left to free here is the stack and the VirtProcr struc
Me@24 587 // itself
Me@50 588 //Note, should not stack-allocate initial data -- no guarantee, in
Me@50 589 // general that creating processor will outlive ones it creates.
Me@200 590 VMS_int__free( animatingPr->startOfStack );
Me@200 591 VMS_int__free( animatingPr );
Me@24 592 }
Me@24 593
Me@24 594
Me@53 595 //TODO: look at architecting cleanest separation between request handler
Me@29 596 // and master loop, for dissipate, create, shutdown, and other non-semantic
Me@29 597 // requests. Issue is chain: one removes requests from AppVP, one dispatches
Me@29 598 // on type of request, and one handles each type.. but some types require
Me@29 599 // action from both request handler and master loop -- maybe just give the
Me@29 600 // request handler calls like: VMS__handle_X_request_type
Me@24 601
Me@29 602
Me@29 603 /*This is called by the semantic layer's request handler when it decides its
Me@29 604 * time to shut down the VMS system. Calling this causes the core loop OS
Me@29 605 * threads to exit, which unblocks the entry-point function that started up
Me@29 606 * VMS, and allows it to grab the result and return to the original single-
Me@29 607 * threaded application.
Me@22 608 *
Me@29 609 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
Me@29 610 * and-wait function has to free a bunch of stuff after it detects the
Me@29 611 * threads have all died: the masterEnv, the thread-related locations,
Me@29 612 * masterVP any AppVPs that might still be allocated and sitting in the
Me@29 613 * semantic environment, or have been orphaned in the _VMSWorkQ.
Me@29 614 *
Me@53 615 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
Me@29 616 * locations it needs, and give ownership to masterVP. Then, they will be
Me@53 617 * automatically freed.
Me@22 618 *
Me@29 619 *In here,create one core-loop shut-down processor for each core loop and put
Me@31 620 * them all directly into the readyToAnimateQ.
Me@29 621 *Note, this function can ONLY be called after the semantic environment no
Me@29 622 * longer cares if AppVPs get animated after the point this is called. In
Me@29 623 * other words, this can be used as an abort, or else it should only be
Me@29 624 * called when all AppVPs have finished dissipate requests -- only at that
Me@29 625 * point is it sure that all results have completed.
Me@22 626 */
Me@22 627 void
Me@200 628 VMS_int__shutdown()
Me@8 629 { int coreIdx;
Me@200 630 SlaveVP *shutDownPr;
Me@22 631
Me@29 632 //create the shutdown processors, one for each core loop -- put them
Me@31 633 // directly into the Q -- each core will die when gets one
Me@8 634 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@50 635 { //Note, this is running in the master
Me@200 636 shutDownPr = VMS_int__create_procr( &endOSThreadFn, NULL );
Me@55 637 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
Me@8 638 }
Me@22 639
Me@12 640 }
Me@12 641
Me@12 642
Me@29 643 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
Me@29 644 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
Me@29 645 *This function has the sole purpose of setting the stack and framePtr
Me@29 646 * to the coreLoop's stack and framePtr.. it does that then jumps to the
Me@29 647 * core loop's shutdown point -- might be able to just call Pthread_exit
Me@30 648 * from here, but am going back to the pthread's stack and setting everything
Me@29 649 * up just as if it never jumped out, before calling pthread_exit.
Me@29 650 *The end-point of core loop will free the stack and so forth of the
Me@29 651 * processor that animates this function, (this fn is transfering the
Me@29 652 * animator of the AppVP that is in turn animating this function over
Me@29 653 * to core loop function -- note that this slices out a level of virtual
Me@29 654 * processors).
Me@29 655 */
Me@29 656 void
Me@200 657 endOSThreadFn( void *initData, SlaveVP *animatingPr )
msach@71 658 {
msach@75 659 #ifdef SEQUENTIAL
msach@75 660 asmTerminateCoreLoopSeq(animatingPr);
msach@75 661 #else
msach@71 662 asmTerminateCoreLoop(animatingPr);
msach@75 663 #endif
Me@30 664 }
Me@29 665
Me@29 666
Me@53 667 /*This is called from the startup & shutdown
Me@24 668 */
Me@24 669 void
Me@200 670 VMS_int__cleanup_at_end_of_shutdown()
Me@31 671 {
msach@78 672 //unused
msach@78 673 //VMSQueueStruc **readyToAnimateQs;
msach@78 674 //int coreIdx;
msach@78 675 //VirtProcr **masterVPs;
msach@78 676 //SchedSlot ***allSchedSlots; //ptr to array of ptrs
Me@31 677
Me@65 678 //Before getting rid of everything, print out any measurements made
msach@69 679 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
msach@78 680 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
msach@143 681 forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, &freeHist );
Me@65 682 #ifdef MEAS__TIME_PLUGIN
Me@68 683 printHist( _VMSMasterEnv->reqHdlrLowTimeHist );
msach@84 684 saveHistToFile( _VMSMasterEnv->reqHdlrLowTimeHist );
Me@68 685 printHist( _VMSMasterEnv->reqHdlrHighTimeHist );
msach@79 686 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
Me@68 687 freeHistExt( _VMSMasterEnv->reqHdlrLowTimeHist );
Me@68 688 freeHistExt( _VMSMasterEnv->reqHdlrHighTimeHist );
Me@65 689 #endif
Me@65 690 #ifdef MEAS__TIME_MALLOC
Me@65 691 printHist( _VMSMasterEnv->mallocTimeHist );
msach@79 692 saveHistToFile( _VMSMasterEnv->mallocTimeHist );
Me@65 693 printHist( _VMSMasterEnv->freeTimeHist );
msach@79 694 saveHistToFile( _VMSMasterEnv->freeTimeHist );
Me@65 695 freeHistExt( _VMSMasterEnv->mallocTimeHist );
Me@65 696 freeHistExt( _VMSMasterEnv->freeTimeHist );
Me@65 697 #endif
Me@65 698 #ifdef MEAS__TIME_MASTER_LOCK
Me@65 699 printHist( _VMSMasterEnv->masterLockLowTimeHist );
Me@65 700 printHist( _VMSMasterEnv->masterLockHighTimeHist );
Me@65 701 #endif
Me@65 702 #ifdef MEAS__TIME_MASTER
Me@65 703 printHist( _VMSMasterEnv->pluginTimeHist );
Me@65 704 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@65 705 {
Me@65 706 freeVMSQ( readyToAnimateQs[ coreIdx ] );
Me@65 707 //master VPs were created external to VMS, so use external free
Me@200 708 VMS_int__dissipate_procr( masterVPs[ coreIdx ] );
Me@65 709
Me@65 710 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@65 711 }
Me@65 712 #endif
Me@65 713 #ifdef MEAS__TIME_STAMP_SUSP
Me@65 714 printHist( _VMSMasterEnv->pluginTimeHist );
Me@65 715 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@65 716 {
Me@65 717 freeVMSQ( readyToAnimateQs[ coreIdx ] );
Me@65 718 //master VPs were created external to VMS, so use external free
Me@200 719 VMS_int__dissipate_procr( masterVPs[ coreIdx ] );
Me@65 720
Me@65 721 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@65 722 }
Me@65 723 #endif
Me@65 724
Me@53 725 //All the environment data has been allocated with VMS__malloc, so just
Me@53 726 // free its internal big-chunk and all inside it disappear.
Me@53 727 /*
Me@31 728 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
Me@31 729 masterVPs = _VMSMasterEnv->masterVPs;
Me@31 730 allSchedSlots = _VMSMasterEnv->allSchedSlots;
Me@31 731
Me@31 732 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@24 733 {
Me@55 734 freeVMSQ( readyToAnimateQs[ coreIdx ] );
Me@50 735 //master VPs were created external to VMS, so use external free
Me@53 736 VMS__dissipate_procr( masterVPs[ coreIdx ] );
Me@31 737
Me@31 738 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@24 739 }
Me@31 740
Me@53 741 VMS__free( _VMSMasterEnv->readyToAnimateQs );
Me@53 742 VMS__free( _VMSMasterEnv->masterVPs );
Me@53 743 VMS__free( _VMSMasterEnv->allSchedSlots );
Me@50 744
Me@50 745 //============================= MEASUREMENT STUFF ========================
Me@50 746 #ifdef STATS__TURN_ON_PROBES
Me@53 747 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
Me@50 748 #endif
Me@50 749 //========================================================================
Me@53 750 */
Me@53 751 //These are the only two that use system free
Me@53 752 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead );
Me@53 753 free( (void *)_VMSMasterEnv );
Me@24 754 }
Me@24 755
Me@54 756
Me@54 757 //================================
Me@54 758
Me@54 759
Me@54 760 /*Later, improve this -- for now, just exits the application after printing
Me@54 761 * the error message.
Me@54 762 */
Me@54 763 void
Me@200 764 VMS_PI__throw_exception( char *msgStr, SlaveVP *reqstPr, VMSExcp *excpData )
Me@54 765 {
msach@69 766 printf("%s",msgStr);
Me@54 767 fflush(stdin);
Me@54 768 exit(1);
Me@54 769 }
Me@54 770