diff VMS.c @ 200:6db9e4898978

VMS name chgs -- added "WL" "PI" and "int" and split vms.h up
author Me@portablequad
date Sun, 12 Feb 2012 01:49:33 -0800
parents ad8213a8e916
children
line diff
     1.1 --- a/VMS.c	Sat Feb 11 21:43:43 2012 -0800
     1.2 +++ b/VMS.c	Sun Feb 12 01:49:33 2012 -0800
     1.3 @@ -13,15 +13,13 @@
     1.4  
     1.5  #include "VMS.h"
     1.6  #include "ProcrContext.h"
     1.7 -#include "Queue_impl/BlockingQueue.h"
     1.8 -#include "Histogram/Histogram.h"
     1.9  
    1.10  
    1.11  #define thdAttrs NULL
    1.12  
    1.13  //===========================================================================
    1.14  void
    1.15 -shutdownFn( void *dummy, VirtProcr *dummy2 );
    1.16 +shutdownFn( void *dummy, SlaveVP *dummy2 );
    1.17  
    1.18  SchedSlot **
    1.19  create_sched_slots();
    1.20 @@ -36,7 +34,7 @@
    1.21  create_free_list();
    1.22  
    1.23  void
    1.24 -endOSThreadFn( void *initData, VirtProcr *animatingPr );
    1.25 +endOSThreadFn( void *initData, SlaveVP *animatingPr );
    1.26  
    1.27  pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
    1.28  pthread_cond_t  suspend_cond  = PTHREAD_COND_INITIALIZER;
    1.29 @@ -72,7 +70,7 @@
    1.30   * layer.
    1.31   */
    1.32  void
    1.33 -VMS__init()
    1.34 +VMS_int__init()
    1.35   {
    1.36     create_masterEnv();
    1.37     create_the_coreLoop_OS_threads();
    1.38 @@ -83,7 +81,7 @@
    1.39  /*To initialize the sequential version, just don't create the threads
    1.40   */
    1.41  void
    1.42 -VMS__init_Seq()
    1.43 +VMS_int__init_Seq()
    1.44   {
    1.45     create_masterEnv();
    1.46   }
    1.47 @@ -95,7 +93,7 @@
    1.48   { MasterEnv       *masterEnv;
    1.49     VMSQueueStruc **readyToAnimateQs;
    1.50     int              coreIdx;
    1.51 -   VirtProcr      **masterVPs;
    1.52 +   SlaveVP      **masterVPs;
    1.53     SchedSlot     ***allSchedSlots; //ptr to array of ptrs
    1.54  
    1.55  
    1.56 @@ -127,19 +125,19 @@
    1.57     masterEnv     = (MasterEnv*)_VMSMasterEnv;
    1.58     
    1.59        //Make a readyToAnimateQ for each core loop
    1.60 -   readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
    1.61 -   masterVPs        = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) );
    1.62 +   readyToAnimateQs = VMS_int__malloc( NUM_CORES * sizeof(VMSQueueStruc *) );
    1.63 +   masterVPs        = VMS_int__malloc( NUM_CORES * sizeof(SlaveVP *) );
    1.64  
    1.65        //One array for each core, 3 in array, core's masterVP scheds all
    1.66 -   allSchedSlots    = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) );
    1.67 +   allSchedSlots    = VMS_int__malloc( NUM_CORES * sizeof(SchedSlot *) );
    1.68  
    1.69 -   _VMSMasterEnv->numProcrsCreated = 0;  //used by create procr
    1.70 +   _VMSMasterEnv->numVPsCreated = 0;  //used by create procr
    1.71     for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
    1.72      {    
    1.73        readyToAnimateQs[ coreIdx ] = makeVMSQ();
    1.74        
    1.75           //Q: should give masterVP core-specific info as its init data?
    1.76 -      masterVPs[ coreIdx ] = VMS__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
    1.77 +      masterVPs[ coreIdx ] = VMS_int__create_procr( (VirtProcrFnPtr)&masterLoop, (void*)masterEnv );
    1.78        masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx;
    1.79        allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core
    1.80        _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0;
    1.81 @@ -161,7 +159,7 @@
    1.82     _VMSMasterEnv->dynIntervalProbesInfo =
    1.83                makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->intervalProbes), 200);
    1.84  
    1.85 -   _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free );
    1.86 +   _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS_int__free );
    1.87     
    1.88        //put creation time directly into master env, for fast retrieval
    1.89     struct timeval timeStamp;
    1.90 @@ -186,11 +184,11 @@
    1.91   { SchedSlot  **schedSlots;
    1.92     int i;
    1.93  
    1.94 -   schedSlots  = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
    1.95 +   schedSlots  = VMS_int__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) );
    1.96  
    1.97     for( i = 0; i < NUM_SCHED_SLOTS; i++ )
    1.98      {
    1.99 -      schedSlots[i] = VMS__malloc( sizeof(SchedSlot) );
   1.100 +      schedSlots[i] = VMS_int__malloc( sizeof(SchedSlot) );
   1.101  
   1.102           //Set state to mean "handling requests done, slot needs filling"
   1.103        schedSlots[i]->workIsDone         = FALSE;
   1.104 @@ -205,9 +203,9 @@
   1.105   { int i;
   1.106     for( i = 0; i < NUM_SCHED_SLOTS; i++ )
   1.107      {
   1.108 -      VMS__free( schedSlots[i] );
   1.109 +      VMS_int__free( schedSlots[i] );
   1.110      }
   1.111 -   VMS__free( schedSlots );
   1.112 +   VMS_int__free( schedSlots );
   1.113   }
   1.114  
   1.115  
   1.116 @@ -225,7 +223,7 @@
   1.117  
   1.118        //Make the threads that animate the core loops
   1.119     for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   1.120 -    { coreLoopThdParams[coreIdx]          = VMS__malloc( sizeof(ThdParams) );
   1.121 +    { coreLoopThdParams[coreIdx]          = VMS_int__malloc( sizeof(ThdParams) );
   1.122        coreLoopThdParams[coreIdx]->coreNum = coreIdx;
   1.123  
   1.124        retCode =
   1.125 @@ -242,7 +240,7 @@
   1.126   *This starts the core loops running then waits for them to exit.
   1.127   */
   1.128  void
   1.129 -VMS__start_the_work_then_wait_until_done()
   1.130 +VMS_WL__start_the_work_then_wait_until_done()
   1.131   { int coreIdx;
   1.132        //Start the core loops running
   1.133     
   1.134 @@ -272,7 +270,7 @@
   1.135   * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
   1.136   */
   1.137  void
   1.138 -VMS__start_the_work_then_wait_until_done_Seq()
   1.139 +VMS_WL__start_the_work_then_wait_until_done_Seq()
   1.140   {
   1.141           //Instead of un-suspending threads, just call the one and only
   1.142           // core loop (sequential version), in the main thread.
   1.143 @@ -282,13 +280,13 @@
   1.144   }
   1.145  #endif
   1.146  
   1.147 -inline VirtProcr *
   1.148 -VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
   1.149 - { VirtProcr *newPr;
   1.150 +inline SlaveVP *
   1.151 +VMS_int__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
   1.152 + { SlaveVP *newPr;
   1.153     void      *stackLocs;
   1.154  
   1.155 -   newPr      = VMS__malloc( sizeof(VirtProcr) );
   1.156 -   stackLocs  = VMS__malloc( VIRT_PROCR_STACK_SIZE );
   1.157 +   newPr      = VMS_int__malloc( sizeof(SlaveVP) );
   1.158 +   stackLocs  = VMS_int__malloc( VIRT_PROCR_STACK_SIZE );
   1.159     if( stackLocs == 0 )
   1.160      { perror("VMS__malloc stack"); exit(1); }
   1.161  
   1.162 @@ -299,12 +297,12 @@
   1.163   * be called from main thread or other thread -- never from code animated by
   1.164   * a VMS virtual processor.
   1.165   */
   1.166 -inline VirtProcr *
   1.167 +inline SlaveVP *
   1.168  VMS_ext__create_procr( VirtProcrFnPtr fnPtr, void *initialData )
   1.169 - { VirtProcr *newPr;
   1.170 + { SlaveVP *newPr;
   1.171     char      *stackLocs;
   1.172  
   1.173 -   newPr      = malloc( sizeof(VirtProcr) );
   1.174 +   newPr      = malloc( sizeof(SlaveVP) );
   1.175     stackLocs  = malloc( VIRT_PROCR_STACK_SIZE );
   1.176     if( stackLocs == 0 )
   1.177      { perror("malloc stack"); exit(1); }
   1.178 @@ -316,7 +314,7 @@
   1.179  /*Anticipating multi-tasking
   1.180   */
   1.181  void *
   1.182 -VMS__give_sem_env_for( VirtProcr *animPr )
   1.183 +VMS_WL__give_sem_env_for( SlaveVP *animPr )
   1.184   {
   1.185     return _VMSMasterEnv->semanticEnv;
   1.186   }
   1.187 @@ -331,7 +329,7 @@
   1.188   * next work-unit for that procr.
   1.189   */
   1.190  void
   1.191 -VMS__suspend_procr( VirtProcr *animatingPr )
   1.192 +VMS_int__suspend_procr( SlaveVP *animatingPr )
   1.193   { 
   1.194  
   1.195        //The request to master will cause this suspended virt procr to get
   1.196 @@ -379,7 +377,7 @@
   1.197   * to the plugin.
   1.198   */
   1.199  void
   1.200 -VMS__send_create_procr_req( void *semReqData, VirtProcr *reqstingPr )
   1.201 +VMS_WL__send_create_procr_req( void *semReqData, SlaveVP *reqstingPr )
   1.202   { VMSReqst req;
   1.203  
   1.204     req.reqType          = createReq;
   1.205 @@ -387,7 +385,7 @@
   1.206     req.nextReqst        = reqstingPr->requests;
   1.207     reqstingPr->requests = &req;
   1.208  
   1.209 -   VMS__suspend_procr( reqstingPr );
   1.210 +   VMS_int__suspend_procr( reqstingPr );
   1.211   }
   1.212  
   1.213  
   1.214 @@ -413,14 +411,14 @@
   1.215   * pears -- making that suspend the last thing in the virt procr's trace.
   1.216   */
   1.217  void
   1.218 -VMS__send_dissipate_req( VirtProcr *procrToDissipate )
   1.219 +VMS_WL__send_dissipate_req( SlaveVP *procrToDissipate )
   1.220   { VMSReqst req;
   1.221  
   1.222     req.reqType                = dissipate;
   1.223     req.nextReqst              = procrToDissipate->requests;
   1.224     procrToDissipate->requests = &req;
   1.225  
   1.226 -   VMS__suspend_procr( procrToDissipate );
   1.227 +   VMS_int__suspend_procr( procrToDissipate );
   1.228   }
   1.229  
   1.230  
   1.231 @@ -431,7 +429,7 @@
   1.232   *Use this version to dissipate VPs created outside the VMS system.
   1.233   */
   1.234  void
   1.235 -VMS_ext__dissipate_procr( VirtProcr *procrToDissipate )
   1.236 +VMS_ext__dissipate_procr( SlaveVP *procrToDissipate )
   1.237   {
   1.238        //NOTE: initialData was given to the processor, so should either have
   1.239        // been alloc'd with VMS__malloc, or freed by the level above animPr.
   1.240 @@ -456,11 +454,11 @@
   1.241   *The request handler has to call VMS__free_VMSReq for any of these
   1.242   */
   1.243  inline void
   1.244 -VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
   1.245 -                                          VirtProcr *callingPr )
   1.246 +VMS_WL__add_sem_request_in_mallocd_VMSReqst( void *semReqData,
   1.247 +                                          SlaveVP *callingPr )
   1.248   { VMSReqst *req;
   1.249  
   1.250 -   req = VMS__malloc( sizeof(VMSReqst) );
   1.251 +   req = VMS_int__malloc( sizeof(VMSReqst) );
   1.252     req->reqType         = semantic;
   1.253     req->semReqData      = semReqData;
   1.254     req->nextReqst       = callingPr->requests;
   1.255 @@ -473,7 +471,7 @@
   1.256   *Then it does suspend, to cause request to be sent.
   1.257   */
   1.258  inline void
   1.259 -VMS__send_sem_request( void *semReqData, VirtProcr *callingPr )
   1.260 +VMS_WL__send_sem_request( void *semReqData, SlaveVP *callingPr )
   1.261   { VMSReqst req;
   1.262  
   1.263     req.reqType         = semantic;
   1.264 @@ -481,12 +479,12 @@
   1.265     req.nextReqst       = callingPr->requests;
   1.266     callingPr->requests = &req;
   1.267     
   1.268 -   VMS__suspend_procr( callingPr );
   1.269 +   VMS_int__suspend_procr( callingPr );
   1.270   }
   1.271  
   1.272  
   1.273  inline void
   1.274 -VMS__send_VMSSem_request( void *semReqData, VirtProcr *callingPr )
   1.275 +VMS_WL__send_VMSSem_request( void *semReqData, SlaveVP *callingPr )
   1.276   { VMSReqst req;
   1.277  
   1.278     req.reqType         = VMSSemantic;
   1.279 @@ -494,14 +492,14 @@
   1.280     req.nextReqst       = callingPr->requests; //gab any other preceeding 
   1.281     callingPr->requests = &req;
   1.282  
   1.283 -   VMS__suspend_procr( callingPr );
   1.284 +   VMS_int__suspend_procr( callingPr );
   1.285   }
   1.286  
   1.287  
   1.288  /*
   1.289   */
   1.290  VMSReqst *
   1.291 -VMS__take_next_request_out_of( VirtProcr *procrWithReq )
   1.292 +VMS_PI__take_next_request_out_of( SlaveVP *procrWithReq )
   1.293   { VMSReqst *req;
   1.294  
   1.295     req = procrWithReq->requests;
   1.296 @@ -513,7 +511,7 @@
   1.297  
   1.298  
   1.299  inline void *
   1.300 -VMS__take_sem_reqst_from( VMSReqst *req )
   1.301 +VMS_PI__take_sem_reqst_from( VMSReqst *req )
   1.302   {
   1.303     return req->semReqData;
   1.304   }
   1.305 @@ -535,15 +533,15 @@
   1.306   * Do the same for OS calls -- look later at it..
   1.307   */
   1.308  void inline
   1.309 -VMS__handle_VMSSemReq( VMSReqst *req, VirtProcr *requestingPr, void *semEnv,
   1.310 -                       ResumePrFnPtr resumePrFnPtr )
   1.311 +VMS_PI__handle_VMSSemReq( VMSReqst *req, SlaveVP *requestingPr, void *semEnv,
   1.312 +                       ResumeVPFnPtr resumePrFnPtr )
   1.313   { VMSSemReq     *semReq;
   1.314     IntervalProbe *newProbe;
   1.315  
   1.316     semReq = req->semReqData;
   1.317  
   1.318 -   newProbe          = VMS__malloc( sizeof(IntervalProbe) );
   1.319 -   newProbe->nameStr = VMS__strDup( semReq->nameStr );
   1.320 +   newProbe          = VMS_int__malloc( sizeof(IntervalProbe) );
   1.321 +   newProbe->nameStr = VMS_int__strDup( semReq->nameStr );
   1.322     newProbe->hist    = NULL;
   1.323     newProbe->schedChoiceWasRecorded = FALSE;
   1.324  
   1.325 @@ -576,7 +574,7 @@
   1.326   * of dis-owning it.
   1.327   */
   1.328  void
   1.329 -VMS__dissipate_procr( VirtProcr *animatingPr )
   1.330 +VMS_int__dissipate_procr( SlaveVP *animatingPr )
   1.331   {
   1.332        //dis-own all locations owned by this processor, causing to be freed
   1.333        // any locations that it is (was) sole owner of
   1.334 @@ -589,8 +587,8 @@
   1.335        // itself
   1.336        //Note, should not stack-allocate initial data -- no guarantee, in
   1.337        // general that creating processor will outlive ones it creates.
   1.338 -   VMS__free( animatingPr->startOfStack );
   1.339 -   VMS__free( animatingPr );
   1.340 +   VMS_int__free( animatingPr->startOfStack );
   1.341 +   VMS_int__free( animatingPr );
   1.342   }
   1.343  
   1.344  
   1.345 @@ -627,15 +625,15 @@
   1.346   * point is it sure that all results have completed.
   1.347   */
   1.348  void
   1.349 -VMS__shutdown()
   1.350 +VMS_int__shutdown()
   1.351   { int coreIdx;
   1.352 -   VirtProcr *shutDownPr;
   1.353 +   SlaveVP *shutDownPr;
   1.354  
   1.355        //create the shutdown processors, one for each core loop -- put them
   1.356        // directly into the Q -- each core will die when gets one
   1.357     for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   1.358      {    //Note, this is running in the master
   1.359 -      shutDownPr = VMS__create_procr( &endOSThreadFn, NULL );
   1.360 +      shutDownPr = VMS_int__create_procr( &endOSThreadFn, NULL );
   1.361        writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] );
   1.362      }
   1.363  
   1.364 @@ -656,7 +654,7 @@
   1.365   * processors).
   1.366   */
   1.367  void
   1.368 -endOSThreadFn( void *initData, VirtProcr *animatingPr )
   1.369 +endOSThreadFn( void *initData, SlaveVP *animatingPr )
   1.370   { 
   1.371  #ifdef SEQUENTIAL
   1.372      asmTerminateCoreLoopSeq(animatingPr);
   1.373 @@ -669,7 +667,7 @@
   1.374  /*This is called from the startup & shutdown
   1.375   */
   1.376  void
   1.377 -VMS__cleanup_at_end_of_shutdown()
   1.378 +VMS_int__cleanup_at_end_of_shutdown()
   1.379   { 
   1.380     //unused
   1.381     //VMSQueueStruc **readyToAnimateQs;
   1.382 @@ -707,7 +705,7 @@
   1.383      {
   1.384        freeVMSQ( readyToAnimateQs[ coreIdx ] );
   1.385           //master VPs were created external to VMS, so use external free
   1.386 -      VMS__dissipate_procr( masterVPs[ coreIdx ] );
   1.387 +      VMS_int__dissipate_procr( masterVPs[ coreIdx ] );
   1.388  
   1.389        freeSchedSlots( allSchedSlots[ coreIdx ] );
   1.390      }
   1.391 @@ -718,7 +716,7 @@
   1.392      {
   1.393        freeVMSQ( readyToAnimateQs[ coreIdx ] );
   1.394           //master VPs were created external to VMS, so use external free
   1.395 -      VMS__dissipate_procr( masterVPs[ coreIdx ] );
   1.396 +      VMS_int__dissipate_procr( masterVPs[ coreIdx ] );
   1.397  
   1.398        freeSchedSlots( allSchedSlots[ coreIdx ] );
   1.399      }
   1.400 @@ -763,7 +761,7 @@
   1.401   * the error message.
   1.402   */
   1.403  void
   1.404 -VMS__throw_exception( char *msgStr, VirtProcr *reqstPr, VMSExcp *excpData )
   1.405 +VMS_PI__throw_exception( char *msgStr, SlaveVP *reqstPr, VMSExcp *excpData )
   1.406   {
   1.407     printf("%s",msgStr);
   1.408     fflush(stdin);