annotate VMS.c @ 212:df00af7eb307

try 40 cores
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Fri, 09 Mar 2012 18:58:33 +0100
parents f6d81915512c
children
rev   line source
Me@0 1 /*
Me@38 2 * Copyright 2010 OpenSourceStewardshipFoundation
Me@0 3 *
Me@0 4 * Licensed under BSD
Me@0 5 */
Me@0 6
Me@0 7 #include <stdio.h>
Me@0 8 #include <stdlib.h>
Me@50 9 #include <string.h>
Me@0 10 #include <malloc.h>
msach@76 11 #include <inttypes.h>
Me@50 12 #include <sys/time.h>
Me@0 13
Me@0 14 #include "VMS.h"
msach@77 15 #include "ProcrContext.h"
nengel@197 16 #include "../../C_Libraries/Queue_impl/PrivateQueue.h"
nengel@197 17 #include "../../C_Libraries/Histogram/Histogram.h"
Me@0 18
Nina@109 19 #include <unistd.h>
Nina@109 20 #include <fcntl.h>
Nina@109 21 #include <linux/types.h>
engelhardt@108 22 #include <linux/perf_event.h>
Nina@109 23 #include <errno.h>
Nina@109 24 #include <sys/syscall.h>
Nina@109 25 #include <linux/prctl.h>
Nina@109 26
Me@0 27
Me@26 28 #define thdAttrs NULL
Me@26 29
Me@22 30 //===========================================================================
Me@22 31 void
Me@22 32 shutdownFn( void *dummy, VirtProcr *dummy2 );
Me@22 33
Me@31 34 SchedSlot **
Me@31 35 create_sched_slots();
Me@22 36
Me@28 37 void
Me@28 38 create_masterEnv();
Me@28 39
Me@28 40 void
Me@28 41 create_the_coreLoop_OS_threads();
Me@28 42
Me@50 43 MallocProlog *
Me@50 44 create_free_list();
Me@50 45
Me@53 46 void
Me@53 47 endOSThreadFn( void *initData, VirtProcr *animatingPr );
Me@50 48
Me@26 49 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
Me@26 50 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER;
Me@26 51
Me@22 52 //===========================================================================
Me@22 53
Me@0 54 /*Setup has two phases:
Me@0 55 * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
Me@8 56 * the master virt procr into the work-queue, ready for first "call"
Me@8 57 * 2) Semantic layer then does its own init, which creates the seed virt
Me@8 58 * procr inside the semantic layer, ready to schedule it when
Me@0 59 * asked by the first run of the masterLoop.
Me@0 60 *
Me@0 61 *This part is bit weird because VMS really wants to be "always there", and
Me@0 62 * have applications attach and detach.. for now, this VMS is part of
Me@0 63 * the app, so the VMS system starts up as part of running the app.
Me@0 64 *
Me@8 65 *The semantic layer is isolated from the VMS internals by making the
Me@8 66 * semantic layer do setup to a state that it's ready with its
Me@8 67 * initial virt procrs, ready to schedule them to slots when the masterLoop
Me@0 68 * asks. Without this pattern, the semantic layer's setup would
Me@8 69 * have to modify slots directly to assign the initial virt-procrs, and put
Me@31 70 * them into the readyToAnimateQ itself, breaking the isolation completely.
Me@0 71 *
Me@0 72 *
Me@8 73 *The semantic layer creates the initial virt procr(s), and adds its
Me@8 74 * own environment to masterEnv, and fills in the pointers to
Me@0 75 * the requestHandler and slaveScheduler plug-in functions
Me@8 76 */
Me@8 77
Me@8 78 /*This allocates VMS data structures, populates the master VMSProc,
Me@0 79 * and master environment, and returns the master environment to the semantic
Me@0 80 * layer.
Me@0 81 */
Me@8 82 void
Me@8 83 VMS__init()
Me@28 84 {
Me@28 85 create_masterEnv();
Me@28 86 create_the_coreLoop_OS_threads();
Me@28 87 }
Me@28 88
msach@71 89 #ifdef SEQUENTIAL
msach@71 90
Me@28 91 /*To initialize the sequential version, just don't create the threads
Me@28 92 */
Me@28 93 void
Me@28 94 VMS__init_Seq()
Me@28 95 {
Me@28 96 create_masterEnv();
Me@28 97 }
Me@28 98
msach@71 99 #endif
msach@71 100
Me@28 101 void
Me@28 102 create_masterEnv()
Me@31 103 { MasterEnv *masterEnv;
Me@55 104 VMSQueueStruc **readyToAnimateQs;
Me@31 105 int coreIdx;
Me@31 106 VirtProcr **masterVPs;
Me@31 107 SchedSlot ***allSchedSlots; //ptr to array of ptrs
Me@53 108
Me@53 109
Me@31 110 //Make the master env, which holds everything else
Me@1 111 _VMSMasterEnv = malloc( sizeof(MasterEnv) );
Me@53 112
Me@53 113 //Very first thing put into the master env is the free-list, seeded
Me@53 114 // with a massive initial chunk of memory.
Me@53 115 //After this, all other mallocs are VMS__malloc.
Me@53 116 _VMSMasterEnv->freeListHead = VMS_ext__create_free_list();
Me@53 117
Me@65 118
Me@65 119 //============================= MEASUREMENT STUFF ========================
Me@65 120 #ifdef MEAS__TIME_MALLOC
msach@160 121 _VMSMasterEnv->mallocTimeHist = makeFixedBinHistExt( 100, 0, 30,
msach@79 122 "malloc_time_hist");
msach@160 123 _VMSMasterEnv->freeTimeHist = makeFixedBinHistExt( 100, 0, 30,
msach@79 124 "free_time_hist");
Me@65 125 #endif
Me@68 126 #ifdef MEAS__TIME_PLUGIN
msach@160 127 _VMSMasterEnv->reqHdlrLowTimeHist = makeFixedBinHistExt( 100, 0, 200,
msach@79 128 "plugin_low_time_hist");
msach@160 129 _VMSMasterEnv->reqHdlrHighTimeHist = makeFixedBinHistExt( 100, 0, 200,
msach@79 130 "plugin_high_time_hist");
Me@68 131 #endif
Me@65 132 //========================================================================
Me@65 133
Me@53 134 //===================== Only VMS__malloc after this ====================
msach@69 135 masterEnv = (MasterEnv*)_VMSMasterEnv;
Me@31 136
Me@31 137 //Make a readyToAnimateQ for each core loop
Me@55 138 readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
Me@53 139 masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
Me@0 140
Me@31 141 //One array for each core, 3 in array, core's masterVP scheds all
Me@53 142 allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
Me@0 143
Me@53 144 _VMSMasterEnv->numProcrsCreated = 0; //used by create procr
Me@31 145 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@53 146 {
Me@55 147 readyToAnimateQs[ coreIdx ] = makeVMSQ();
Me@31 148
Me@50 149 //Q: should give masterVP core-specific info as its init data?
msach@76 150 masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
Me@31 151 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
nengel@193 152 masterVPs[ coreIdx ]->isMasterVP = TRUE;
Me@31 153 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
Me@53 154 _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
Me@55 155 _VMSMasterEnv->workStealingGates[ coreIdx ] = NULL;
Me@31 156 }
Me@31 157 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs;
Me@31 158 _VMSMasterEnv->masterVPs = masterVPs;
Me@50 159 _VMSMasterEnv->masterLock = UNLOCKED;
Me@31 160 _VMSMasterEnv->allSchedSlots = allSchedSlots;
Me@55 161 _VMSMasterEnv->workStealingLock = UNLOCKED;
Me@28 162
Me@12 163
Me@31 164 //Aug 19, 2010: no longer need to place initial masterVP into queue
Me@31 165 // because coreLoop now controls -- animates its masterVP when no work
Me@31 166
Me@30 167
Me@50 168 //============================= MEASUREMENT STUFF ========================
Me@50 169 #ifdef STATS__TURN_ON_PROBES
Me@50 170 _VMSMasterEnv->dynIntervalProbesInfo =
msach@69 171 makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
Me@30 172
Me@53 173 _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
Me@53 174
Me@53 175 //put creation time directly into master env, for fast retrieval
Me@50 176 struct timeval timeStamp;
Me@50 177 gettimeofday( &(timeStamp), NULL);
Me@50 178 _VMSMasterEnv->createPtInSecs =
Me@50 179 timeStamp.tv_sec +(timeStamp.tv_usec/1000000.0);
Me@50 180 #endif
Me@65 181 #ifdef MEAS__TIME_MASTER_LOCK
Me@65 182 _VMSMasterEnv->masterLockLowTimeHist = makeFixedBinHist( 50, 0, 2,
Me@65 183 "master lock low time hist");
Me@68 184 _VMSMasterEnv->masterLockHighTimeHist = makeFixedBinHist( 50, 0, 100,
Me@65 185 "master lock high time hist");
Me@65 186 #endif
Me@68 187
msach@76 188 MakeTheMeasHists();
engelhardt@108 189
Nina@166 190
Nina@166 191 #ifdef DETECT_LOOP_GRAPH
nengel@177 192
Nina@129 193 #endif
Nina@166 194
Nina@109 195 #ifdef MEAS__PERF_COUNTERS
engelhardt@108 196 struct perf_event_attr hw_event;
Nina@109 197 memset(&hw_event,0,sizeof(hw_event));
engelhardt@108 198 hw_event.type = PERF_TYPE_HARDWARE;
Nina@109 199 hw_event.size = sizeof(hw_event);
engelhardt@108 200 hw_event.disabled = 1;
Nina@109 201 hw_event.freq = 0;
engelhardt@108 202 hw_event.inherit = 1; /* children inherit it */
engelhardt@108 203 hw_event.pinned = 1; /* must always be on PMU */
engelhardt@108 204 hw_event.exclusive = 0; /* only group on PMU */
engelhardt@108 205 hw_event.exclude_user = 0; /* don't count user */
nengel@211 206 hw_event.exclude_kernel = 0; /* ditto kernel */
nengel@211 207 hw_event.exclude_hv = 0; /* ditto hypervisor */
engelhardt@108 208 hw_event.exclude_idle = 0; /* don't count when idle */
engelhardt@108 209 hw_event.mmap = 0; /* include mmap data */
engelhardt@108 210 hw_event.comm = 0; /* include comm data */
engelhardt@108 211
Nina@109 212
engelhardt@108 213 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
engelhardt@108 214 {
Nina@109 215 hw_event.config = 0x0000000000000000; //cycles
engelhardt@108 216 _VMSMasterEnv->cycles_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
engelhardt@108 217 0,//pid_t pid,
Nina@167 218 coreIdx,//int cpu,
engelhardt@108 219 -1,//int group_fd,
engelhardt@108 220 0//unsigned long flags
engelhardt@108 221 );
Nina@109 222 if (_VMSMasterEnv->cycles_counter_fd[coreIdx]<0){
Nina@109 223 fprintf(stderr,"On core %d: ",coreIdx);
Nina@109 224 perror("Failed to open cycles counter");
Nina@109 225 }
Nina@109 226 hw_event.config = 0x0000000000000001; //instrs
engelhardt@108 227 _VMSMasterEnv->instrs_counter_fd[coreIdx] = syscall(__NR_perf_event_open, &hw_event,
engelhardt@108 228 0,//pid_t pid,
Nina@167 229 coreIdx,//int cpu,
engelhardt@108 230 -1,//int group_fd,
engelhardt@108 231 0//unsigned long flags
engelhardt@108 232 );
Nina@109 233 if (_VMSMasterEnv->instrs_counter_fd[coreIdx]<0){
Nina@109 234 fprintf(stderr,"On core %d: ",coreIdx);
Nina@109 235 perror("Failed to open instrs counter");
Nina@109 236 }
engelhardt@108 237 }
nengel@211 238 //uint64 tmpc,tmpi;
nengel@211 239 //saveCyclesAndInstrs(0,tmpc,tmpi);
nengel@211 240 //printf("Start: cycles = %llu, instrs = %llu\n",tmpc,tmpi);
engelhardt@108 241 #endif
engelhardt@108 242
Me@50 243 //========================================================================
Me@38 244
Me@0 245 }
Me@0 246
Me@31 247 SchedSlot **
Me@31 248 create_sched_slots()
Me@31 249 { SchedSlot **schedSlots;
Me@0 250 int i;
Me@0 251
Me@53 252 schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
Me@8 253
Me@1 254 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@0 255 {
Me@53 256 schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
Me@8 257
Me@1 258 //Set state to mean "handling requests done, slot needs filling"
Me@8 259 schedSlots[i]->workIsDone = FALSE;
Me@8 260 schedSlots[i]->needsProcrAssigned = TRUE;
Me@0 261 }
Me@31 262 return schedSlots;
Me@31 263 }
Me@31 264
Me@31 265
Me@31 266 void
Me@31 267 freeSchedSlots( SchedSlot **schedSlots )
Me@31 268 { int i;
Me@31 269 for( i = 0; i < NUM_SCHED_SLOTS; i++ )
Me@31 270 {
Me@53 271 VMS__free( schedSlots[i] );
Me@31 272 }
Me@53 273 VMS__free( schedSlots );
Me@0 274 }
Me@0 275
Me@8 276
Me@28 277 void
Me@28 278 create_the_coreLoop_OS_threads()
Me@28 279 {
Me@28 280 //========================================================================
Me@28 281 // Create the Threads
Me@28 282 int coreIdx, retCode;
Me@28 283
Me@28 284 //Need the threads to be created suspended, and wait for a signal
Me@28 285 // before proceeding -- gives time after creating to initialize other
Me@28 286 // stuff before the coreLoops set off.
Me@28 287 _VMSMasterEnv->setupComplete = 0;
Me@28 288
Me@28 289 //Make the threads that animate the core loops
Me@28 290 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
nengel@211 291 { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) + CACHE_LINE ); //make sure there is no false sharing
Me@28 292 coreLoopThdParams[coreIdx]->coreNum = coreIdx;
nengel@211 293 coreLoopThdParams[coreIdx]->sent_ctr = 0;
nengel@211 294 coreLoopThdParams[coreIdx]->ret_tsc = 0;
Me@28 295
Me@28 296 retCode =
Me@28 297 pthread_create( &(coreLoopThdHandles[coreIdx]),
Me@28 298 thdAttrs,
Me@28 299 &coreLoop,
Me@28 300 (void *)(coreLoopThdParams[coreIdx]) );
Me@50 301 if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(1);}
Me@28 302 }
nengel@211 303 prctl(PR_TASK_PERF_EVENTS_ENABLE);
Me@28 304 }
Me@28 305
Me@0 306 /*Semantic layer calls this when it want the system to start running..
Me@0 307 *
Me@24 308 *This starts the core loops running then waits for them to exit.
Me@0 309 */
Me@12 310 void
Me@24 311 VMS__start_the_work_then_wait_until_done()
Me@12 312 { int coreIdx;
Me@24 313 //Start the core loops running
Me@25 314
Me@25 315 //tell the core loop threads that setup is complete
Me@25 316 //get lock, to lock out any threads still starting up -- they'll see
Me@25 317 // that setupComplete is true before entering while loop, and so never
Me@25 318 // wait on the condition
Me@26 319 pthread_mutex_lock( &suspendLock );
Me@25 320 _VMSMasterEnv->setupComplete = 1;
Me@26 321 pthread_mutex_unlock( &suspendLock );
Me@26 322 pthread_cond_broadcast( &suspend_cond );
Me@25 323
Me@25 324
Me@24 325 //wait for all to complete
Me@8 326 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@8 327 {
Me@25 328 pthread_join( coreLoopThdHandles[coreIdx], NULL );
Me@24 329 }
Me@25 330
Me@24 331 //NOTE: do not clean up VMS env here -- semantic layer has to have
Me@24 332 // a chance to clean up its environment first, then do a call to free
Me@24 333 // the Master env and rest of VMS locations
Me@8 334 }
Me@0 335
msach@70 336 #ifdef SEQUENTIAL
Me@28 337 /*Only difference between version with an OS thread pinned to each core and
Me@28 338 * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
Me@28 339 */
Me@28 340 void
Me@28 341 VMS__start_the_work_then_wait_until_done_Seq()
Me@28 342 {
Me@28 343 //Instead of un-suspending threads, just call the one and only
Me@28 344 // core loop (sequential version), in the main thread.
Me@28 345 coreLoop_Seq( NULL );
msach@75 346 flushRegisters();
Me@28 347
Me@28 348 }
msach@70 349 #endif
Me@28 350
Me@50 351 inline VirtProcr *
Me@50 352 VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
Me@50 353 { VirtProcr *newPr;
msach@76 354 void *stackLocs;
Me@50 355
Me@50 356 newPr = VMS__malloc( sizeof(VirtProcr) );
Me@50 357 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE );
Me@50 358 if( stackLocs == 0 )
Me@50 359 { perror("VMS__malloc stack"); exit(1); }
Me@50 360
msach@69 361 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
Me@50 362 }
Me@50 363
Me@50 364 /* "ext" designates that it's for use outside the VMS system -- should only
Me@50 365 * be called from main thread or other thread -- never from code animated by
Me@50 366 * a VMS virtual processor.
Me@50 367 */
Me@50 368 inline VirtProcr *
Me@50 369 VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
Me@50 370 { VirtProcr *newPr;
Me@50 371 char *stackLocs;
Me@50 372
Me@50 373 newPr = malloc( sizeof(VirtProcr) );
Me@50 374 stackLocs = malloc( VIRT_PROCR_STACK_SIZE );
Me@50 375 if( stackLocs == 0 )
Me@50 376 { perror("malloc stack"); exit(1); }
Me@50 377
msach@69 378 return create_procr_helper( newPr, fnPtr, initialData, stackLocs );
Me@50 379 }
Me@50 380
Me@8 381
Me@64 382 /*Anticipating multi-tasking
Me@64 383 */
Me@64 384 void *
Me@64 385 VMS__give_sem_env_for( VirtProcr *animPr )
Me@64 386 {
Me@64 387 return _VMSMasterEnv->semanticEnv;
Me@64 388 }
Me@64 389 //===========================================================================
Me@26 390 /*there is a label inside this function -- save the addr of this label in
Me@0 391 * the callingPr struc, as the pick-up point from which to start the next
Me@0 392 * work-unit for that procr. If turns out have to save registers, then
Me@0 393 * save them in the procr struc too. Then do assembly jump to the CoreLoop's
Me@0 394 * "done with work-unit" label. The procr struc is in the request in the
Me@0 395 * slave that animated the just-ended work-unit, so all the state is saved
Me@0 396 * there, and will get passed along, inside the request handler, to the
Me@0 397 * next work-unit for that procr.
Me@0 398 */
Me@8 399 void
Me@38 400 VMS__suspend_procr( VirtProcr *animatingPr )
Me@55 401 {
Me@30 402
Me@30 403 //The request to master will cause this suspended virt procr to get
Me@30 404 // scheduled again at some future point -- to resume, core loop jumps
Me@30 405 // to the resume point (below), which causes restore of saved regs and
Me@30 406 // "return" from this call.
msach@71 407 //animatingPr->nextInstrPt = &&ResumePt;
Me@30 408
Me@30 409 //return ownership of the virt procr and sched slot to Master virt pr
Me@38 410 animatingPr->schedSlot->workIsDone = TRUE;
Me@1 411
Me@41 412 //=========================== Measurement stuff ========================
Me@38 413 #ifdef MEAS__TIME_STAMP_SUSP
Me@41 414 //record time stamp: compare to time-stamp recorded below
Me@38 415 saveLowTimeStampCountInto( animatingPr->preSuspTSCLow );
Me@38 416 #endif
nengel@186 417 #ifdef MEAS__PERF_COUNTERS
nengel@186 418 //start work
nengel@186 419 uint64 cycles,instrs;
nengel@186 420 saveCyclesAndInstrs(animatingPr->coreAnimatedBy,cycles, instrs);
nengel@193 421 (*(_VMSMasterEnv->counterHandler))(HwResponderInvocation_start,animatingPr->procrID,animatingPr->numTimesScheduled,animatingPr,cycles,instrs);
nengel@186 422 #endif
Me@41 423 //=======================================================================
Me@30 424
msach@71 425 switchToCoreLoop(animatingPr);
msach@71 426 flushRegisters();
Me@55 427
Me@55 428 //=======================================================================
Me@30 429
Me@38 430 #ifdef MEAS__TIME_STAMP_SUSP
Me@41 431 //NOTE: only take low part of count -- do sanity check when take diff
Me@38 432 saveLowTimeStampCountInto( animatingPr->postSuspTSCLow );
Me@38 433 #endif
Me@30 434
Me@0 435 return;
Me@0 436 }
Me@0 437
Me@22 438
Me@22 439
Me@50 440 /*For this implementation of VMS, it may not make much sense to have the
Me@50 441 * system of requests for creating a new processor done this way.. but over
Me@50 442 * the scope of single-master, multi-master, mult-tasking, OS-implementing,
Me@50 443 * distributed-memory, and so on, this gives VMS implementation a chance to
Me@50 444 * do stuff before suspend, in the AppVP, and in the Master before the plugin
Me@50 445 * is called, as well as in the lang-lib before this is called, and in the
Me@50 446 * plugin. So, this gives both VMS and language implementations a chance to
Me@50 447 * intercept at various points and do order-dependent stuff.
Me@50 448 *Having a standard VMSNewPrReqData struc allows the language to create and
Me@50 449 * free the struc, while VMS knows how to get the newPr if it wants it, and
Me@50 450 * it lets the lang have lang-specific data related to creation transported
Me@50 451 * to the plugin.
Me@50 452 */
msach@160 453 void
msach@160 454 VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr )
Me@50 455 { VMSReqst req;
Me@50 456
Me@50 457 req.reqType = createReq;
Me@50 458 req.semReqData = semReqData;
Me@50 459 req.nextReqst = reqstingPr->requests;
Me@50 460 reqstingPr->requests = &req;
Me@50 461
Me@50 462 VMS__suspend_procr( reqstingPr );
Me@50 463 }
Me@50 464
Me@22 465
Me@38 466 /*
Me@22 467 *This adds a request to dissipate, then suspends the processor so that the
Me@22 468 * request handler will receive the request. The request handler is what
Me@22 469 * does the work of freeing memory and removing the processor from the
Me@22 470 * semantic environment's data structures.
Me@22 471 *The request handler also is what figures out when to shutdown the VMS
Me@22 472 * system -- which causes all the core loop threads to die, and returns from
Me@22 473 * the call that started up VMS to perform the work.
Me@22 474 *
Me@22 475 *This form is a bit misleading to understand if one is trying to figure out
Me@22 476 * how VMS works -- it looks like a normal function call, but inside it
Me@22 477 * sends a request to the request handler and suspends the processor, which
Me@22 478 * jumps out of the VMS__dissipate_procr function, and out of all nestings
Me@22 479 * above it, transferring the work of dissipating to the request handler,
Me@22 480 * which then does the actual work -- causing the processor that animated
Me@22 481 * the call of this function to disappear and the "hanging" state of this
Me@22 482 * function to just poof into thin air -- the virtual processor's trace
Me@22 483 * never returns from this call, but instead the virtual processor's trace
Me@22 484 * gets suspended in this call and all the virt processor's state disap-
Me@22 485 * pears -- making that suspend the last thing in the virt procr's trace.
Me@8 486 */
msach@160 487 void
msach@160 488 VMS__send_dissipate_req( VirtProcr *procrToDissipate )
Me@50 489 { VMSReqst req;
Me@22 490
Me@50 491 req.reqType = dissipate;
Me@50 492 req.nextReqst = procrToDissipate->requests;
Me@50 493 procrToDissipate->requests = &req;
Me@50 494
Me@22 495 VMS__suspend_procr( procrToDissipate );
Me@50 496 }
Me@50 497
Me@50 498
Me@50 499 /* "ext" designates that it's for use outside the VMS system -- should only
Me@50 500 * be called from main thread or other thread -- never from code animated by
Me@50 501 * a VMS virtual processor.
Me@50 502 *
Me@50 503 *Use this version to dissipate VPs created outside the VMS system.
Me@50 504 */
Me@50 505 void
Me@50 506 VMS_ext__dissipate_procr( VirtProcr *procrToDissipate )
Me@50 507 {
Me@50 508 //NOTE: initialData was given to the processor, so should either have
Me@50 509 // been alloc'd with VMS__malloc, or freed by the level above animPr.
Me@50 510 //So, all that's left to free here is the stack and the VirtProcr struc
Me@50 511 // itself
Me@50 512 //Note, should not stack-allocate initial data -- no guarantee, in
Me@50 513 // general that creating processor will outlive ones it creates.
Me@50 514 free( procrToDissipate->startOfStack );
Me@50 515 free( procrToDissipate );
Me@50 516 }
Me@50 517
Me@22 518
Me@22 519
Me@53 520 /*This call's name indicates that request is malloc'd -- so req handler
Me@53 521 * has to free any extra requests tacked on before a send, using this.
Me@53 522 *
Me@53 523 * This inserts the semantic-layer's request data into standard VMS carrier
Me@53 524 * request data-struct that is mallocd. The sem request doesn't need to
Me@53 525 * be malloc'd if this is called inside the same call chain before the
Me@53 526 * send of the last request is called.
Me@53 527 *
Me@53 528 *The request handler has to call VMS__free_VMSReq for any of these
Me@22 529 */
Me@22 530 inline void
Me@53 531 VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
Me@53 532 VirtProcr *callingPr )
Me@53 533 { VMSReqst *req;
Me@22 534
Me@53 535 req = VMS__malloc( sizeof(VMSReqst) );
Me@53 536 req->reqType = semantic;
Me@53 537 req->semReqData = semReqData;
Me@53 538 req->nextReqst = callingPr->requests;
Me@53 539 callingPr->requests = req;
Me@22 540 }
Me@22 541
Me@50 542 /*This inserts the semantic-layer's request data into standard VMS carrier
Me@50 543 * request data-struct is allocated on stack of this call & ptr to it sent
Me@50 544 * to plugin
Me@50 545 *Then it does suspend, to cause request to be sent.
Me@50 546 */
msach@160 547 inline void
msach@160 548 VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
Me@50 549 { VMSReqst req;
Me@22 550
Me@50 551 req.reqType = semantic;
Me@50 552 req.semReqData = semReqData;
Me@50 553 req.nextReqst = callingPr->requests;
Me@50 554 callingPr->requests = &req;
Me@50 555
Me@50 556 VMS__suspend_procr( callingPr );
Me@50 557 }
Me@50 558
Me@50 559
msach@160 560 inline void
msach@160 561 VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr )
Me@50 562 { VMSReqst req;
Me@50 563
Me@50 564 req.reqType = VMSSemantic;
Me@50 565 req.semReqData = semReqData;
Me@50 566 req.nextReqst = callingPr->requests; //gab any other preceeding
Me@50 567 callingPr->requests = &req;
Me@50 568
Me@50 569 VMS__suspend_procr( callingPr );
Me@50 570 }
Me@50 571
Me@50 572
Me@50 573 /*
Me@38 574 */
Me@24 575 VMSReqst *
Me@50 576 VMS__take_next_request_out_of( VirtProcr *procrWithReq )
Me@31 577 { VMSReqst *req;
Me@31 578
Me@31 579 req = procrWithReq->requests;
Me@38 580 if( req == NULL ) return NULL;
Me@31 581
Me@31 582 procrWithReq->requests = procrWithReq->requests->nextReqst;
Me@50 583 return req;
Me@24 584 }
Me@22 585
Me@24 586
Me@24 587 inline void *
Me@24 588 VMS__take_sem_reqst_from( VMSReqst *req )
Me@24 589 {
Me@24 590 return req->semReqData;
Me@24 591 }
Me@24 592
Me@24 593
Me@24 594
Me@50 595 /* This is for OS requests and VMS infrastructure requests, such as to create
Me@50 596 * a probe -- a probe is inside the heart of VMS-core, it's not part of any
Me@50 597 * language -- but it's also a semantic thing that's triggered from and used
Me@50 598 * in the application.. so it crosses abstractions.. so, need some special
Me@50 599 * pattern here for handling such requests.
Me@52 600 * Doing this just like it were a second language sharing VMS-core.
Me@52 601 *
Me@50 602 * This is called from the language's request handler when it sees a request
Me@50 603 * of type VMSSemReq
Me@52 604 *
Me@52 605 * TODO: Later change this, to give probes their own separate plugin & have
Me@52 606 * VMS-core steer the request to appropriate plugin
Me@52 607 * Do the same for OS calls -- look later at it..
Me@50 608 */
Me@50 609 void inline
Me@50 610 VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv,
Me@50 611 ResumePrFnPtr resumePrFnPtr )
Me@50 612 { VMSSemReq *semReq;
Me@50 613 IntervalProbe *newProbe;
Me@24 614
Me@50 615 semReq = req->semReqData;
Me@24 616
Me@50 617 newProbe = VMS__malloc( sizeof(IntervalProbe) );
Me@65 618 newProbe->nameStr = VMS__strDup( semReq->nameStr );
Me@50 619 newProbe->hist = NULL;
Me@50 620 newProbe->schedChoiceWasRecorded = FALSE;
Me@53 621
Me@53 622 //This runs in masterVP, so no race-condition worries
Me@50 623 newProbe->probeID =
Me@50 624 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo );
Me@50 625
Me@53 626 requestingPr->dataRetFromReq = newProbe;
Me@50 627
Me@50 628 (*resumePrFnPtr)( requestingPr, semEnv );
Me@22 629 }
Me@22 630
Me@22 631
Me@22 632
Me@24 633 /*This must be called by the request handler plugin -- it cannot be called
Me@24 634 * from the semantic library "dissipate processor" function -- instead, the
Me@50 635 * semantic layer has to generate a request, and the plug-in calls this
Me@24 636 * function.
Me@24 637 *The reason is that this frees the virtual processor's stack -- which is
Me@24 638 * still in use inside semantic library calls!
Me@24 639 *
Me@24 640 *This frees or recycles all the state owned by and comprising the VMS
Me@24 641 * portion of the animating virtual procr. The request handler must first
Me@24 642 * free any semantic data created for the processor that didn't use the
Me@24 643 * VMS_malloc mechanism. Then it calls this, which first asks the malloc
Me@24 644 * system to disown any state that did use VMS_malloc, and then frees the
Me@24 645 * statck and the processor-struct itself.
Me@24 646 *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
Me@24 647 * state, then that state gets freed (or sent to recycling) as a side-effect
Me@24 648 * of dis-owning it.
Me@24 649 */
Me@24 650 void
Me@53 651 VMS__dissipate_procr( VirtProcr *animatingPr )
Me@24 652 {
Me@24 653 //dis-own all locations owned by this processor, causing to be freed
Me@24 654 // any locations that it is (was) sole owner of
Me@29 655 //TODO: implement VMS__malloc system, including "give up ownership"
Me@24 656
Me@24 657
Me@24 658 //NOTE: initialData was given to the processor, so should either have
Me@24 659 // been alloc'd with VMS__malloc, or freed by the level above animPr.
Me@24 660 //So, all that's left to free here is the stack and the VirtProcr struc
Me@24 661 // itself
Me@50 662 //Note, should not stack-allocate initial data -- no guarantee, in
Me@50 663 // general that creating processor will outlive ones it creates.
Me@50 664 VMS__free( animatingPr->startOfStack );
Me@50 665 VMS__free( animatingPr );
Me@24 666 }
Me@24 667
Me@24 668
Me@53 669 //TODO: look at architecting cleanest separation between request handler
Me@29 670 // and master loop, for dissipate, create, shutdown, and other non-semantic
Me@29 671 // requests. Issue is chain: one removes requests from AppVP, one dispatches
Me@29 672 // on type of request, and one handles each type.. but some types require
Me@29 673 // action from both request handler and master loop -- maybe just give the
Me@29 674 // request handler calls like: VMS__handle_X_request_type
Me@24 675
Me@29 676
Me@29 677 /*This is called by the semantic layer's request handler when it decides its
Me@29 678 * time to shut down the VMS system. Calling this causes the core loop OS
Me@29 679 * threads to exit, which unblocks the entry-point function that started up
Me@29 680 * VMS, and allows it to grab the result and return to the original single-
Me@29 681 * threaded application.
Me@22 682 *
Me@29 683 *The _VMSMasterEnv is needed by this shut down function, so the create-seed-
Me@29 684 * and-wait function has to free a bunch of stuff after it detects the
Me@29 685 * threads have all died: the masterEnv, the thread-related locations,
Me@29 686 * masterVP any AppVPs that might still be allocated and sitting in the
Me@29 687 * semantic environment, or have been orphaned in the _VMSWorkQ.
Me@29 688 *
Me@53 689 *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
Me@29 690 * locations it needs, and give ownership to masterVP. Then, they will be
Me@53 691 * automatically freed.
Me@22 692 *
Me@29 693 *In here,create one core-loop shut-down processor for each core loop and put
Me@31 694 * them all directly into the readyToAnimateQ.
Me@29 695 *Note, this function can ONLY be called after the semantic environment no
Me@29 696 * longer cares if AppVPs get animated after the point this is called. In
Me@29 697 * other words, this can be used as an abort, or else it should only be
Me@29 698 * called when all AppVPs have finished dissipate requests -- only at that
Me@29 699 * point is it sure that all results have completed.
Me@22 700 */
Me@22 701 void
Me@53 702 VMS__shutdown()
Me@8 703 { int coreIdx;
Me@14 704 VirtProcr *shutDownPr;
Me@22 705
Me@29 706 //create the shutdown processors, one for each core loop -- put them
Me@31 707 // directly into the Q -- each core will die when gets one
Me@8 708 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
Me@50 709 { //Note, this is running in the master
Me@29 710 shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
nengel@193 711 shutDownPr->isShutdownVP = TRUE;
Me@55 712 writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
Me@8 713 }
Nina@109 714 #ifdef MEAS__PERF_COUNTERS
Nina@109 715 uint64 tmpc,tmpi;
Nina@109 716 saveCyclesAndInstrs(0,tmpc,tmpi);
nengel@211 717 //printf("End: cycles = %llu, instrs = %llu\n",tmpc,tmpi);
Nina@109 718 /*
Nina@109 719 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ){
Nina@109 720 close(_VMSMasterEnv->cycles_counter_fd[coreIdx]);
Nina@109 721 close(_VMSMasterEnv->instrs_counter_fd[coreIdx]);
Nina@109 722 }
Nina@109 723 */
Nina@109 724 #endif
Me@12 725 }
Me@12 726
Me@12 727
Me@29 728 /*Am trying to be cute, avoiding IF statement in coreLoop that checks for
Me@29 729 * a special shutdown procr. Ended up with extra-complex shutdown sequence.
Me@29 730 *This function has the sole purpose of setting the stack and framePtr
Me@29 731 * to the coreLoop's stack and framePtr.. it does that then jumps to the
Me@29 732 * core loop's shutdown point -- might be able to just call Pthread_exit
Me@30 733 * from here, but am going back to the pthread's stack and setting everything
Me@29 734 * up just as if it never jumped out, before calling pthread_exit.
Me@29 735 *The end-point of core loop will free the stack and so forth of the
Me@29 736 * processor that animates this function, (this fn is transfering the
Me@29 737 * animator of the AppVP that is in turn animating this function over
Me@29 738 * to core loop function -- note that this slices out a level of virtual
Me@29 739 * processors).
Me@29 740 */
Me@29 741 void
Me@29 742 endOSThreadFn( void *initData, VirtProcr *animatingPr )
msach@71 743 {
msach@75 744 #ifdef SEQUENTIAL
msach@75 745 asmTerminateCoreLoopSeq(animatingPr);
msach@75 746 #else
msach@71 747 asmTerminateCoreLoop(animatingPr);
msach@75 748 #endif
Me@30 749 }
Me@29 750
Me@29 751
Me@53 752 /*This is called from the startup & shutdown
Me@24 753 */
Me@24 754 void
Me@53 755 VMS__cleanup_at_end_of_shutdown()
Me@31 756 {
msach@78 757 //unused
msach@78 758 //VMSQueueStruc **readyToAnimateQs;
msach@78 759 //int coreIdx;
msach@78 760 //VirtProcr **masterVPs;
msach@78 761 //SchedSlot ***allSchedSlots; //ptr to array of ptrs
Me@31 762
Me@65 763 //Before getting rid of everything, print out any measurements made
Nina@109 764 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&printHist );
Nina@109 765 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, (DynArrayFnPtr)&saveHistToFile);
nengel@182 766 //forAllInDynArrayDo( _VMSMasterEnv->measHistsInfo, &freeHist );
Nina@167 767
nengel@211 768 prctl(PR_TASK_PERF_EVENTS_DISABLE);
Me@65 769 #ifdef MEAS__TIME_PLUGIN
Me@68 770 printHist( _VMSMasterEnv->reqHdlrLowTimeHist );
msach@84 771 saveHistToFile( _VMSMasterEnv->reqHdlrLowTimeHist );
Me@68 772 printHist( _VMSMasterEnv->reqHdlrHighTimeHist );
msach@79 773 saveHistToFile( _VMSMasterEnv->reqHdlrHighTimeHist );
Me@68 774 freeHistExt( _VMSMasterEnv->reqHdlrLowTimeHist );
Me@68 775 freeHistExt( _VMSMasterEnv->reqHdlrHighTimeHist );
Me@65 776 #endif
Me@65 777 #ifdef MEAS__TIME_MALLOC
Me@65 778 printHist( _VMSMasterEnv->mallocTimeHist );
msach@79 779 saveHistToFile( _VMSMasterEnv->mallocTimeHist );
Me@65 780 printHist( _VMSMasterEnv->freeTimeHist );
msach@79 781 saveHistToFile( _VMSMasterEnv->freeTimeHist );
Me@65 782 freeHistExt( _VMSMasterEnv->mallocTimeHist );
Me@65 783 freeHistExt( _VMSMasterEnv->freeTimeHist );
Me@65 784 #endif
Me@65 785 #ifdef MEAS__TIME_MASTER_LOCK
Me@65 786 printHist( _VMSMasterEnv->masterLockLowTimeHist );
Me@65 787 printHist( _VMSMasterEnv->masterLockHighTimeHist );
Me@65 788 #endif
Me@65 789 #ifdef MEAS__TIME_MASTER
Me@65 790 printHist( _VMSMasterEnv->pluginTimeHist );
Me@65 791 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@65 792 {
Me@65 793 freeVMSQ( readyToAnimateQs[ coreIdx ] );
Me@65 794 //master VPs were created external to VMS, so use external free
Me@65 795 VMS__dissipate_procr( masterVPs[ coreIdx ] );
Me@65 796
Me@65 797 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@65 798 }
Me@65 799 #endif
Me@65 800 #ifdef MEAS__TIME_STAMP_SUSP
Me@65 801 printHist( _VMSMasterEnv->pluginTimeHist );
Me@65 802 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@65 803 {
Me@65 804 freeVMSQ( readyToAnimateQs[ coreIdx ] );
Me@65 805 //master VPs were created external to VMS, so use external free
Me@65 806 VMS__dissipate_procr( masterVPs[ coreIdx ] );
Me@65 807
Me@65 808 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@65 809 }
Me@65 810 #endif
Me@65 811
Me@53 812 //All the environment data has been allocated with VMS__malloc, so just
Me@53 813 // free its internal big-chunk and all inside it disappear.
Me@53 814 /*
Me@31 815 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs;
Me@31 816 masterVPs = _VMSMasterEnv->masterVPs;
Me@31 817 allSchedSlots = _VMSMasterEnv->allSchedSlots;
Me@31 818
Me@31 819 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
Me@24 820 {
Me@55 821 freeVMSQ( readyToAnimateQs[ coreIdx ] );
Me@50 822 //master VPs were created external to VMS, so use external free
Me@53 823 VMS__dissipate_procr( masterVPs[ coreIdx ] );
Me@31 824
Me@31 825 freeSchedSlots( allSchedSlots[ coreIdx ] );
Me@24 826 }
Me@31 827
Me@53 828 VMS__free( _VMSMasterEnv->readyToAnimateQs );
Me@53 829 VMS__free( _VMSMasterEnv->masterVPs );
Me@53 830 VMS__free( _VMSMasterEnv->allSchedSlots );
Me@50 831
Me@50 832 //============================= MEASUREMENT STUFF ========================
Me@50 833 #ifdef STATS__TURN_ON_PROBES
Me@53 834 freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe);
Me@50 835 #endif
Me@50 836 //========================================================================
Me@53 837 */
Me@53 838 //These are the only two that use system free
Me@53 839 VMS_ext__free_free_list( _VMSMasterEnv->freeListHead );
Me@53 840 free( (void *)_VMSMasterEnv );
Me@24 841 }
Me@24 842
Me@54 843
Me@54 844 //================================
Me@54 845
Me@54 846
Me@54 847 /*Later, improve this -- for now, just exits the application after printing
Me@54 848 * the error message.
Me@54 849 */
Me@54 850 void
Me@54 851 VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
Me@54 852 {
msach@69 853 printf("%s",msgStr);
Me@54 854 fflush(stdin);
Me@54 855 exit(1);
Me@54 856 }
Me@54 857
nengel@211 858 __inline__ uint64_t rdtsc(void){
nengel@211 859 uint32_t lo, hi;
nengel@211 860 __asm__ __volatile__ ( // serialize
nengel@211 861 "xorl %%eax,%%eax \n cpuid"
nengel@211 862 ::: "%rax", "%rbx", "%rcx", "%rdx");
nengel@211 863 __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
nengel@211 864 /* asm volatile("RDTSC;"
nengel@211 865 "movl %%eax, %0;"
nengel@211 866 "movl %%edx, %1;"
nengel@211 867 : "=m" (lo), "=m" (hi)
nengel@211 868 :
nengel@211 869 : "%eax", "%edx"
nengel@211 870 ); */
nengel@211 871 return (uint64_t)hi << 32 | lo;
nengel@211 872 }
nengel@211 873
nengel@211 874 uint64 tsc_offset_send(ThdParams* thisCoresThdParams, uint64 initval){
nengel@211 875 uint64 ret_tsc_curr; //local copy of coreLoopThdParams->ret_tsc
nengel@211 876 uint64 ret_tsc_prev;
nengel@211 877 uint64 local_before;
nengel@211 878 uint64 local_after;
nengel@211 879
nengel@211 880 ret_tsc_prev = initval;
nengel@211 881 ret_tsc_curr = initval;
nengel@211 882 local_before = rdtsc();
nengel@211 883 thisCoresThdParams->sent_ctr++;
nengel@211 884 while(ret_tsc_curr == ret_tsc_prev)
nengel@211 885 ret_tsc_curr = thisCoresThdParams->ret_tsc;
nengel@211 886 local_after = rdtsc();
nengel@211 887 ret_tsc_prev = ret_tsc_curr;
nengel@211 888
nengel@211 889 int i;
nengel@211 890 for(i=0;i<3;++i){
nengel@211 891 local_before = rdtsc();
nengel@211 892 thisCoresThdParams->sent_ctr++;
nengel@211 893 while(ret_tsc_curr == ret_tsc_prev)
nengel@211 894 ret_tsc_curr = thisCoresThdParams->ret_tsc;
nengel@211 895 local_after = rdtsc();
nengel@211 896 int64 midpoint = local_before + (local_after-local_before)/2;
nengel@211 897 int64 difference;
nengel@211 898 if (midpoint > ret_tsc_curr)
nengel@211 899 difference = midpoint - (int64)ret_tsc_curr;
nengel@211 900 else
nengel@211 901 difference = (int64)ret_tsc_curr - midpoint;
nengel@211 902 //printf("TSC: %llu (Core %d) = %llu (Core %d) // difference=%llu\n",midpoint,thisCoresThdParams->coreNum,ret_tsc_curr,thisCoresThdParams->coreNum + 1,difference);
nengel@211 903 ret_tsc_prev = ret_tsc_curr;
nengel@211 904 }
nengel@211 905
nengel@211 906 return ret_tsc_curr;
nengel@211 907 }
nengel@211 908
nengel@211 909 int tsc_offset_resp(ThdParams* sendCoresThdParams, int initialctrval){
nengel@211 910
nengel@211 911 int send_ctr_curr = initialctrval;
nengel@211 912 int send_ctr_prev = initialctrval;
nengel@211 913
nengel@211 914
nengel@211 915 while(send_ctr_curr == send_ctr_prev)
nengel@211 916 send_ctr_curr = sendCoresThdParams->sent_ctr;
nengel@211 917 sendCoresThdParams->ret_tsc = rdtsc();
nengel@211 918 send_ctr_prev = send_ctr_curr;
nengel@211 919
nengel@211 920 int i;
nengel@211 921 for(i=0;i<3;++i){
nengel@211 922 while(send_ctr_curr == send_ctr_prev)
nengel@211 923 send_ctr_curr = sendCoresThdParams->sent_ctr;
nengel@211 924 sendCoresThdParams->ret_tsc = rdtsc();
nengel@211 925 send_ctr_prev = send_ctr_curr;
nengel@211 926 }
nengel@211 927 return send_ctr_curr;
nengel@211 928 }