VMS/VMS_Implementations/Vthread_impls/Vthread_MC_shared_impl

changeset 29:b94dc57e4455 tip

refactored many files -- chgd names, moved code around -- doesn't compile
author Some Random Person <seanhalle@yahoo.com>
date Wed, 09 May 2012 13:24:19 -0700
parents b3a881f25c5a
children
files Vthread.c Vthread.h Vthread_Measurement.h Vthread_PluginFns.c Vthread__Measurement.h Vthread__PluginFns.c Vthread__startup_and_shutdown.c Vthread_helper.c Vthread_helper.h Vthread_lib.c
diffstat 10 files changed, 992 insertions(+), 978 deletions(-) [+]
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/Vthread.c	Wed May 09 13:24:19 2012 -0700
     1.3 @@ -0,0 +1,464 @@
     1.4 +/*
     1.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     1.6 + *
     1.7 + * Licensed under BSD
     1.8 + */
     1.9 +
    1.10 +#include <stdio.h>
    1.11 +#include <stdlib.h>
    1.12 +
    1.13 +#include "VMS_impl/VMS.h"
    1.14 +#include "Vthread.h"
    1.15 +#include "Vthread_helper.h"
    1.16 +#include "C_Libraries/Queue_impl/PrivateQueue.h"
    1.17 +#include "C_Libraries/Hash_impl/PrivateHash.h"
    1.18 +
    1.19 +
    1.20 +//==========================================================================
    1.21 +
    1.22 +void
    1.23 +Vthread__init();
    1.24 +
    1.25 +void
    1.26 +Vthread__init_Seq();
    1.27 +
    1.28 +void
    1.29 +Vthread__init_Helper();
    1.30 +
    1.31 +
    1.32 +//===========================================================================
    1.33 +
    1.34 +/*These are the library functions *called in the application*
    1.35 + * 
    1.36 + */
    1.37 +
    1.38 +
    1.39 +
    1.40 +//===========================================================================
    1.41 +
    1.42 +inline int32
    1.43 +Vthread__giveMinWorkUnitCycles( float32 percentOverhead )
    1.44 + {
    1.45 +   return MIN_WORK_UNIT_CYCLES;
    1.46 + }
    1.47 +
    1.48 +inline int32
    1.49 +Vthread__giveIdealNumWorkUnits()
    1.50 + {
    1.51 +   return NUM_SCHED_SLOTS * NUM_CORES;
    1.52 + }
    1.53 +
    1.54 +inline int32
    1.55 +Vthread__give_number_of_cores_to_schedule_onto()
    1.56 + {
    1.57 +   return NUM_CORES;
    1.58 + }
    1.59 +
    1.60 +/*For now, use TSC -- later, make these two macros with assembly that first
    1.61 + * saves jump point, and second jumps back several times to get reliable time
    1.62 + */
    1.63 +inline void
    1.64 +Vthread__start_primitive()
    1.65 + { saveLowTimeStampCountInto( ((VthdSemEnv *)(_VMSMasterEnv->semanticEnv))->
    1.66 +                              primitiveStartTime );
    1.67 + }
    1.68 +
    1.69 +/*Just quick and dirty for now -- make reliable later
    1.70 + * will want this to jump back several times -- to be sure cache is warm
    1.71 + * because don't want comm time included in calc-time measurement -- and
    1.72 + * also to throw out any "weird" values due to OS interrupt or TSC rollover
    1.73 + */
    1.74 +inline int32
    1.75 +Vthread__end_primitive_and_give_cycles()
    1.76 + { int32 endTime, startTime;
    1.77 +   //TODO: fix by repeating time-measurement
    1.78 +   saveLowTimeStampCountInto( endTime );
    1.79 +   startTime=((VthdSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
    1.80 +   return (endTime - startTime);
    1.81 + }
    1.82 +
    1.83 +
    1.84 +
    1.85 +//===========================================================================
    1.86 +
    1.87 +/*Re-use this in the entry-point fn
    1.88 + */
    1.89 +inline SlaveVP *
    1.90 +Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData,
    1.91 +                          VthdSemEnv *semEnv,    int32 coreToScheduleOnto )
    1.92 + { SlaveVP      *newSlv;
    1.93 +   VthdSemData   *semData;
    1.94 +
    1.95 +      //This is running in master, so use internal version
    1.96 +   newSlv = VMS_WL__create_slaveVP( fnPtr, initData );
    1.97 +
    1.98 +   semData = VMS_WL__malloc( sizeof(VthdSemData) );
    1.99 +   semData->highestTransEntered = -1;
   1.100 +   semData->lastTransEntered    = NULL;
   1.101 +
   1.102 +   newSlv->semanticData = semData;
   1.103 +
   1.104 +   //=================== Assign new processor to a core =====================
   1.105 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   1.106 +   newSlv->coreAnimatedBy = 0;
   1.107 +
   1.108 +   #else
   1.109 +
   1.110 +   if(coreToScheduleOnto < 0 || coreToScheduleOnto >= NUM_CORES )
   1.111 +    {    //out-of-range, so round-robin assignment
   1.112 +      newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
   1.113 +
   1.114 +      if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 )
   1.115 +          semEnv->nextCoreToGetNewSlv  = 0;
   1.116 +      else
   1.117 +          semEnv->nextCoreToGetNewSlv += 1;
   1.118 +    }
   1.119 +   else //core num in-range, so use it
   1.120 +    { newSlv->coreAnimatedBy = coreToScheduleOnto;
   1.121 +    }
   1.122 +   #endif
   1.123 +   //========================================================================
   1.124 +
   1.125 +   return newSlv;
   1.126 + }
   1.127 +
   1.128 +
   1.129 +/*
   1.130 + */
   1.131 +inline SlaveVP *
   1.132 +Vthread__create_thread( TopLevelFnPtr fnPtr, void *initData,
   1.133 +                          SlaveVP *creatingSlv )
   1.134 + { VthdSemReq  reqData;
   1.135 +
   1.136 +      //the semantic request data is on the stack and disappears when this
   1.137 +      // call returns -- it's guaranteed to remain in the Slv's stack for as
   1.138 +      // long as the Slv is suspended.
   1.139 +   reqData.reqType            = 0; //know the type because is a VMS create req
   1.140 +   reqData.coreToScheduleOnto = -1; //means round-robin schedule
   1.141 +   reqData.fnPtr              = fnPtr;
   1.142 +   reqData.initData           = initData;
   1.143 +   reqData.requestingSlv       = creatingSlv;
   1.144 +
   1.145 +   VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
   1.146 +
   1.147 +   return creatingSlv->dataRetFromReq;
   1.148 + }
   1.149 +
   1.150 +
   1.151 +inline SlaveVP *
   1.152 +Vthread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData,
   1.153 +                           SlaveVP *creatingSlv,  int32  coreToScheduleOnto )
   1.154 + { VthdSemReq  reqData;
   1.155 +
   1.156 +      //the semantic request data is on the stack and disappears when this
   1.157 +      // call returns -- it's guaranteed to remain in the Slv's stack for as
   1.158 +      // long as the Slv is suspended.
   1.159 +   reqData.reqType            = 0; //know type because in a VMS create req
   1.160 +   reqData.coreToScheduleOnto = coreToScheduleOnto;
   1.161 +   reqData.fnPtr              = fnPtr;
   1.162 +   reqData.initData           = initData;
   1.163 +   reqData.requestingSlv       = creatingSlv;
   1.164 +
   1.165 +   VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
   1.166 + }
   1.167 +
   1.168 +inline void
   1.169 +Vthread__dissipate_thread( SlaveVP *procrToDissipate )
   1.170 + {
   1.171 +   VMS_WL__send_dissipate_req( procrToDissipate );
   1.172 + }
   1.173 +
   1.174 +
   1.175 +//===========================================================================
   1.176 +
   1.177 +void *
   1.178 +Vthread__malloc( size_t sizeToMalloc, SlaveVP *animSlv )
   1.179 + { VthdSemReq  reqData;
   1.180 +
   1.181 +   reqData.reqType      = malloc_req;
   1.182 +   reqData.sizeToMalloc = sizeToMalloc;
   1.183 +   reqData.requestingSlv = animSlv;
   1.184 +
   1.185 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.186 +
   1.187 +   return animSlv->dataRetFromReq;
   1.188 + }
   1.189 +
   1.190 +
   1.191 +/*Sends request to Master, which does the work of freeing
   1.192 + */
   1.193 +void
   1.194 +Vthread__free( void *ptrToFree, SlaveVP *animSlv )
   1.195 + { VthdSemReq  reqData;
   1.196 +
   1.197 +   reqData.reqType      = free_req;
   1.198 +   reqData.ptrToFree    = ptrToFree;
   1.199 +   reqData.requestingSlv = animSlv;
   1.200 +
   1.201 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.202 + }
   1.203 +
   1.204 +
   1.205 +//===========================================================================
   1.206 +
   1.207 +inline void
   1.208 +Vthread__set_globals_to( void *globals )
   1.209 + {
   1.210 +   ((VthdSemEnv *)
   1.211 +    (_VMSMasterEnv->semanticEnv))->applicationGlobals = globals;
   1.212 + }
   1.213 +
   1.214 +inline void *
   1.215 +Vthread__give_globals()
   1.216 + {
   1.217 +   return((VthdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals;
   1.218 + }
   1.219 +
   1.220 +
   1.221 +//===========================================================================
   1.222 +
   1.223 +inline int32
   1.224 +Vthread__make_mutex( SlaveVP *animSlv )
   1.225 + { VthdSemReq  reqData;
   1.226 +
   1.227 +   reqData.reqType      = make_mutex;
   1.228 +   reqData.requestingSlv = animSlv;
   1.229 +
   1.230 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.231 +
   1.232 +   return (int32)animSlv->dataRetFromReq; //mutexid is 32bit wide
   1.233 + }
   1.234 +
   1.235 +inline void
   1.236 +Vthread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringSlv )
   1.237 + { VthdSemReq  reqData;
   1.238 +
   1.239 +   reqData.reqType      = mutex_lock;
   1.240 +   reqData.mutexIdx     = mutexIdx;
   1.241 +   reqData.requestingSlv = acquiringSlv;
   1.242 +
   1.243 +   VMS_WL__send_sem_request( &reqData, acquiringSlv );
   1.244 + }
   1.245 +
   1.246 +inline void
   1.247 +Vthread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingSlv )
   1.248 + { VthdSemReq  reqData;
   1.249 +
   1.250 +   reqData.reqType      = mutex_unlock;
   1.251 +   reqData.mutexIdx     = mutexIdx;
   1.252 +   reqData.requestingSlv = releasingSlv;
   1.253 +
   1.254 +   VMS_WL__send_sem_request( &reqData, releasingSlv );
   1.255 + }
   1.256 +
   1.257 +
   1.258 +//=======================
   1.259 +inline int32
   1.260 +Vthread__make_cond( int32 ownedMutexIdx, SlaveVP *animSlv)
   1.261 + { VthdSemReq  reqData;
   1.262 +
   1.263 +   reqData.reqType      = make_cond;
   1.264 +   reqData.mutexIdx     = ownedMutexIdx;
   1.265 +   reqData.requestingSlv = animSlv;
   1.266 +
   1.267 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.268 +
   1.269 +   return (int32)animSlv->dataRetFromReq; //condIdx is 32 bit wide
   1.270 + }
   1.271 +
   1.272 +inline void
   1.273 +Vthread__cond_wait( int32 condIdx, SlaveVP *waitingSlv)
   1.274 + { VthdSemReq  reqData;
   1.275 +
   1.276 +   reqData.reqType      = cond_wait;
   1.277 +   reqData.condIdx      = condIdx;
   1.278 +   reqData.requestingSlv = waitingSlv;
   1.279 +
   1.280 +   VMS_WL__send_sem_request( &reqData, waitingSlv );
   1.281 + }
   1.282 +
   1.283 +inline void *
   1.284 +Vthread__cond_signal( int32 condIdx, SlaveVP *signallingSlv )
   1.285 + { VthdSemReq  reqData;
   1.286 +
   1.287 +   reqData.reqType      = cond_signal;
   1.288 +   reqData.condIdx      = condIdx;
   1.289 +   reqData.requestingSlv = signallingSlv;
   1.290 +
   1.291 +   VMS_WL__send_sem_request( &reqData, signallingSlv );
   1.292 + }
   1.293 +
   1.294 +
   1.295 +//===========================================================================
   1.296 +//
   1.297 +/*A function singleton is a function whose body executes exactly once, on a
   1.298 + * single core, no matter how many times the fuction is called and no
   1.299 + * matter how many cores or the timing of cores calling it.
   1.300 + *
   1.301 + *A data singleton is a ticket attached to data.  That ticket can be used
   1.302 + * to get the data through the function exactly once, no matter how many
   1.303 + * times the data is given to the function, and no matter the timing of
   1.304 + * trying to get the data through from different cores.
   1.305 + */
   1.306 +
   1.307 +/*Fn singleton uses ID as index into array of singleton structs held in the
   1.308 + * semantic environment.
   1.309 + */
   1.310 +void
   1.311 +Vthread__start_fn_singleton( int32 singletonID,   SlaveVP *animSlv )
   1.312 + {
   1.313 +   VthdSemReq  reqData;
   1.314 +
   1.315 +      //
   1.316 +   reqData.reqType     = singleton_fn_start;
   1.317 +   reqData.singletonID = singletonID;
   1.318 +
   1.319 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.320 +   if( animSlv->dataRetFromReq != 0 ) //addr of matching end-singleton
   1.321 +    {
   1.322 +      VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); //not protected!
   1.323 +      VMS_int__return_to_addr_in_ptd_to_loc(
   1.324 +                         &((semEnv->fnSingletons[singletonID]).savedRetAddr) );
   1.325 +    }
   1.326 + }
   1.327 +
   1.328 +/*Data singleton hands addr of loc holding a pointer to a singleton struct.
   1.329 + * The start_data_singleton makes the structure and puts its addr into the
   1.330 + * location.
   1.331 + */
   1.332 +void
   1.333 +Vthread__start_data_singleton( VthdSingleton *singleton,  SlaveVP *animSlv )
   1.334 + {
   1.335 +   VthdSemReq  reqData;
   1.336 +
   1.337 +   if( singleton->savedRetAddr && singleton->hasFinished )
   1.338 +      goto JmpToEndSingleton;
   1.339 +      
   1.340 +   reqData.reqType       = singleton_data_start;
   1.341 +   reqData.singleton = singleton;
   1.342 +
   1.343 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.344 +   if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
   1.345 +    {    
   1.346 +       JmpToEndSingleton:
   1.347 +       VMS_int__return_to_addr_in_ptd_to_loc(&(singleton->savedRetAddr));
   1.348 +    }
   1.349 +   //now, simply return
   1.350 +   //will exit either from the start singleton call or the end-singleton call
   1.351 + }
   1.352 +
   1.353 +/*Uses ID as index into array of flags.  If flag already set, resumes from
   1.354 + * end-label.  Else, sets flag and resumes normally.
   1.355 + *
   1.356 + *Note, this call cannot be inlined because the instr addr at the label
   1.357 + * inside is shared by all invocations of a given singleton ID.
   1.358 + */
   1.359 +void
   1.360 +Vthread__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
   1.361 + {
   1.362 +   VthdSemReq  reqData;
   1.363 +
   1.364 +   //don't need this addr until after at least one singleton has reached
   1.365 +   // this function
   1.366 +   VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
   1.367 +   VMS_int__return_to_addr_in_ptd_to_loc(
   1.368 +                         &((semEnv->fnSingletons[singletonID]).savedRetAddr) );
   1.369 +
   1.370 +   reqData.reqType     = singleton_fn_end;
   1.371 +   reqData.singletonID = singletonID;
   1.372 +
   1.373 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.374 + }
   1.375 +
   1.376 +void
   1.377 +Vthread__end_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv )
   1.378 + {
   1.379 +   VthdSemReq  reqData;
   1.380 +
   1.381 +      //don't need this addr until after singleton struct has reached
   1.382 +      // this function for first time
   1.383 +      //do assembly that saves the return addr of this fn call into the
   1.384 +      // data singleton -- that data-singleton can only be given to exactly
   1.385 +      // one instance in the code of this function.  However, can use this
   1.386 +      // function in different places for different data-singletons.
   1.387 +
   1.388 +   VMS_int__save_return_into_ptd_to_loc_then_do_ret(&(singleton->savedRetAddr));
   1.389 +
   1.390 +   reqData.reqType    = singleton_data_end;
   1.391 +   reqData.singleton  = singleton;
   1.392 +
   1.393 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.394 + }
   1.395 +
   1.396 +
   1.397 +/*This executes the function in the masterVP, so it executes in isolation
   1.398 + * from any other copies -- only one copy of the function can ever execute
   1.399 + * at a time.
   1.400 + *
   1.401 + *It suspends to the master, and the request handler takes the function
   1.402 + * pointer out of the request and calls it, then resumes the Slv.
   1.403 + *Only very short functions should be called this way -- for longer-running
   1.404 + * isolation, use transaction-start and transaction-end, which run the code
   1.405 + * between as work-code.
   1.406 + */
   1.407 +void
   1.408 +Vthread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   1.409 +                                    void *data, SlaveVP *animSlv )
   1.410 + {
   1.411 +   VthdSemReq  reqData;
   1.412 +
   1.413 +      //
   1.414 +   reqData.reqType          = atomic;
   1.415 +   reqData.fnToExecInMaster = ptrToFnToExecInMaster;
   1.416 +   reqData.dataForFn        = data;
   1.417 +
   1.418 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.419 + }
   1.420 +
   1.421 +
   1.422 +/*This suspends to the master.
   1.423 + *First, it looks at the Slv's data, to see the highest transactionID that Slv
   1.424 + * already has entered.  If the current ID is not larger, it throws an
   1.425 + * exception stating a bug in the code.  Otherwise it puts the current ID
   1.426 + * there, and adds the ID to a linked list of IDs entered -- the list is
   1.427 + * used to check that exits are properly ordered.
   1.428 + *Next it is uses transactionID as index into an array of transaction
   1.429 + * structures.
   1.430 + *If the "Slv_currently_executing" field is non-null, then put requesting Slv
   1.431 + * into queue in the struct.  (At some point a holder will request
   1.432 + * end-transaction, which will take this Slv from the queue and resume it.)
   1.433 + *If NULL, then write requesting into the field and resume.
   1.434 + */
   1.435 +void
   1.436 +Vthread__start_transaction( int32 transactionID, SlaveVP *animSlv )
   1.437 + {
   1.438 +   VthdSemReq  reqData;
   1.439 +
   1.440 +      //
   1.441 +   reqData.reqType     = trans_start;
   1.442 +   reqData.transID     = transactionID;
   1.443 +
   1.444 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.445 + }
   1.446 +
   1.447 +/*This suspends to the master, then uses transactionID as index into an
   1.448 + * array of transaction structures.
   1.449 + *It looks at Slv_currently_executing to be sure it's same as requesting Slv.
   1.450 + * If different, throws an exception, stating there's a bug in the code.
   1.451 + *Next it looks at the queue in the structure.
   1.452 + *If it's empty, it sets Slv_currently_executing field to NULL and resumes.
   1.453 + *If something in, gets it, sets Slv_currently_executing to that Slv, then
   1.454 + * resumes both.
   1.455 + */
   1.456 +void
   1.457 +Vthread__end_transaction( int32 transactionID, SlaveVP *animSlv )
   1.458 + {
   1.459 +   VthdSemReq  reqData;
   1.460 +
   1.461 +      //
   1.462 +   reqData.reqType     = trans_end;
   1.463 +   reqData.transID     = transactionID;
   1.464 +
   1.465 +   VMS_WL__send_sem_request( &reqData, animSlv );
   1.466 + }
   1.467 +//===========================================================================
     2.1 --- a/Vthread.h	Sun Mar 04 14:29:42 2012 -0800
     2.2 +++ b/Vthread.h	Wed May 09 13:24:19 2012 -0700
     2.3 @@ -23,7 +23,7 @@
     2.4  //===========================================================================
     2.5     //turn on the counter measurements of language overhead -- comment to turn off
     2.6  #define MEAS__TURN_ON_LANG_MEAS
     2.7 -#include "Vthread_Overhead_Meas.h"
     2.8 +#include "Vthread__Measurement.h"
     2.9  
    2.10  #define INIT_NUM_MUTEX 10000
    2.11  #define INIT_NUM_COND  10000
    2.12 @@ -159,12 +159,27 @@
    2.13   }
    2.14  VthdSemEnv;
    2.15  
    2.16 +//==========================================================================
    2.17 +
    2.18 +void
    2.19 +Vthread__init();
    2.20 +
    2.21 +void
    2.22 +Vthread__init_Seq();
    2.23 +
    2.24 +void
    2.25 +Vthread__init_Helper();
    2.26 +
    2.27  
    2.28  //===========================================================================
    2.29  
    2.30  inline void
    2.31  Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fn, void *initData );
    2.32  
    2.33 +inline SlaveVP *
    2.34 +Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData,
    2.35 +                          VthdSemEnv *semEnv,    int32 coreToScheduleOnto );
    2.36 +
    2.37  //=======================
    2.38  
    2.39  inline SlaveVP *
     3.1 --- a/Vthread_Measurement.h	Sun Mar 04 14:29:42 2012 -0800
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,108 +0,0 @@
     3.4 -/* 
     3.5 - * 
     3.6 - *
     3.7 - * Created on June 10, 2011, 12:20 PM
     3.8 - */
     3.9 -
    3.10 -#ifndef VTHREAD_MEAS_H
    3.11 -#define VTHREAD_MEAS_H
    3.12 -
    3.13 -#ifdef MEAS__TURN_ON_LANG_MEAS
    3.14 -
    3.15 -   #ifdef MEAS__Make_Meas_Hists_for_Language
    3.16 -   #undef MEAS__Make_Meas_Hists_for_Language
    3.17 -   #endif
    3.18 -
    3.19 -//===================  Language-specific Measurement Stuff ===================
    3.20 -//
    3.21 -//
    3.22 -   #define createHistIdx      1  //note: starts at 1
    3.23 -   #define mutexLockHistIdx   2
    3.24 -   #define mutexUnlockHistIdx 3
    3.25 -   #define condWaitHistIdx    4
    3.26 -   #define condSignalHistIdx  5
    3.27 -
    3.28 -   #define MEAS__Make_Meas_Hists_for_Language() \
    3.29 -   _VMSMasterEnv->measHistsInfo = \
    3.30 -              makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->measHists), 200); \
    3.31 -   makeAMeasHist( createHistIdx,      "create",        250, 0, 100 ) \
    3.32 -   makeAMeasHist( mutexLockHistIdx,   "mutex_lock",    50, 0, 100 ) \
    3.33 -   makeAMeasHist( mutexUnlockHistIdx, "mutex_unlock",  50, 0, 100 ) \
    3.34 -   makeAMeasHist( condWaitHistIdx,    "cond_wait",     50, 0, 100 ) \
    3.35 -   makeAMeasHist( condSignalHistIdx,  "cond_signal",   50, 0, 100 )
    3.36 -
    3.37 -   
    3.38 -   #define Meas_startCreate \
    3.39 -    int32 startStamp, endStamp; \
    3.40 -    saveLowTimeStampCountInto( startStamp ); 
    3.41 -
    3.42 -   #define Meas_endCreate \
    3.43 -    saveLowTimeStampCountInto( endStamp ); \
    3.44 -    addIntervalToHist( startStamp, endStamp, \
    3.45 -                                 _VMSMasterEnv->measHists[ createHistIdx ] );
    3.46 -
    3.47 -   #define Meas_startMutexLock \
    3.48 -    int32 startStamp, endStamp; \
    3.49 -    saveLowTimeStampCountInto( startStamp ); 
    3.50 -
    3.51 -   #define Meas_endMutexLock \
    3.52 -    saveLowTimeStampCountInto( endStamp ); \
    3.53 -    addIntervalToHist( startStamp, endStamp, \
    3.54 -                              _VMSMasterEnv->measHists[ mutexLockHistIdx ] );
    3.55 -
    3.56 -   #define Meas_startMutexUnlock \
    3.57 -    int32 startStamp, endStamp; \
    3.58 -    saveLowTimeStampCountInto( startStamp ); 
    3.59 -
    3.60 -   #define Meas_endMutexUnlock \
    3.61 -    saveLowTimeStampCountInto( endStamp ); \
    3.62 -    addIntervalToHist( startStamp, endStamp, \
    3.63 -                            _VMSMasterEnv->measHists[ mutexUnlockHistIdx ] );
    3.64 -
    3.65 -   #define Meas_startCondWait \
    3.66 -    int32 startStamp, endStamp; \
    3.67 -    saveLowTimeStampCountInto( startStamp ); 
    3.68 -
    3.69 -   #define Meas_endCondWait \
    3.70 -    saveLowTimeStampCountInto( endStamp ); \
    3.71 -    addIntervalToHist( startStamp, endStamp, \
    3.72 -                               _VMSMasterEnv->measHists[ condWaitHistIdx ] );
    3.73 -
    3.74 -   #define Meas_startCondSignal \
    3.75 -    int32 startStamp, endStamp; \
    3.76 -    saveLowTimeStampCountInto( startStamp ); 
    3.77 -
    3.78 -   #define Meas_endCondSignal \
    3.79 -    saveLowTimeStampCountInto( endStamp ); \
    3.80 -    addIntervalToHist( startStamp, endStamp, \
    3.81 -                             _VMSMasterEnv->measHists[ condSignalHistIdx ] );
    3.82 -
    3.83 -#else //===================== turned off ==========================
    3.84 -
    3.85 -   #define MEAS__Make_Meas_Hists_for_Language() 
    3.86 -   
    3.87 -   #define Meas_startCreate 
    3.88 -
    3.89 -   #define Meas_endCreate 
    3.90 -
    3.91 -   #define Meas_startMutexLock
    3.92 -
    3.93 -   #define Meas_endMutexLock
    3.94 -
    3.95 -   #define Meas_startMutexUnlock
    3.96 -
    3.97 -   #define Meas_endMutexUnlock
    3.98 -
    3.99 -   #define Meas_startCondWait
   3.100 -
   3.101 -   #define Meas_endCondWait 
   3.102 -
   3.103 -   #define Meas_startCondSignal 
   3.104 -
   3.105 -   #define Meas_endCondSignal 
   3.106 -
   3.107 -#endif  /* MEAS__TURN_ON_LANG_MEAS */
   3.108 -
   3.109 -
   3.110 -#endif	/* VTHREAD_MEAS_H */
   3.111 -
     4.1 --- a/Vthread_PluginFns.c	Sun Mar 04 14:29:42 2012 -0800
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,186 +0,0 @@
     4.4 -/*
     4.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
     4.6 - *
     4.7 - * Licensed under BSD
     4.8 - */
     4.9 -
    4.10 -#include <stdio.h>
    4.11 -#include <stdlib.h>
    4.12 -#include <malloc.h>
    4.13 -
    4.14 -#include "C_Libraries/Queue_impl/PrivateQueue.h"
    4.15 -#include "Vthread.h"
    4.16 -#include "Vthread_Request_Handlers.h"
    4.17 -#include "Vthread_helper.h"
    4.18 -
    4.19 -//=========================== Local Fn Prototypes ===========================
    4.20 -
    4.21 -void inline
    4.22 -handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv );
    4.23 -
    4.24 -inline void
    4.25 -handleDissipate(             SlaveVP *requestingSlv, VthdSemEnv *semEnv );
    4.26 -
    4.27 -inline void
    4.28 -handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv  );
    4.29 -
    4.30 -
    4.31 -//============================== Scheduler ==================================
    4.32 -//
    4.33 -/*For Vthread, scheduling a slave simply takes the next work-unit off the
    4.34 - * ready-to-go work-unit queue and assigns it to the slaveToSched.
    4.35 - *If the ready-to-go work-unit queue is empty, then nothing to schedule
    4.36 - * to the slave -- return FALSE to let Master loop know scheduling that
    4.37 - * slave failed.
    4.38 - */
    4.39 -char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram
    4.40 -SlaveVP *
    4.41 -Vthread__schedule_slaveVP( void *_semEnv, int coreNum )
    4.42 - { SlaveVP   *schedSlv;
    4.43 -   VthdSemEnv *semEnv;
    4.44 -
    4.45 -   semEnv = (VthdSemEnv *)_semEnv;
    4.46 -
    4.47 -   schedSlv = readPrivQ( semEnv->readySlvQs[coreNum] );
    4.48 -      //Note, using a non-blocking queue -- it returns NULL if queue empty
    4.49 -
    4.50 -   return( schedSlv );
    4.51 - }
    4.52 -
    4.53 -
    4.54 -
    4.55 -//===========================  Request Handler  =============================
    4.56 -//
    4.57 -/*Will get requests to send, to receive, and to create new processors.
    4.58 - * Upon send, check the hash to see if a receive is waiting.
    4.59 - * Upon receive, check hash to see if a send has already happened.
    4.60 - * When other is not there, put in.  When other is there, the comm.
    4.61 - *  completes, which means the receiver P gets scheduled and
    4.62 - *  picks up right after the receive request.  So make the work-unit
    4.63 - *  and put it into the queue of work-units ready to go.
    4.64 - * Other request is create a new Processor, with the function to run in the
    4.65 - *  Processor, and initial data.
    4.66 - */
    4.67 -void
    4.68 -Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv )
    4.69 - { VthdSemEnv *semEnv;
    4.70 -   VMSReqst    *req;
    4.71 - 
    4.72 -   semEnv = (VthdSemEnv *)_semEnv;
    4.73 -
    4.74 -   req    = VMS_PI__take_next_request_out_of( requestingSlv );
    4.75 -
    4.76 -   while( req != NULL )
    4.77 -    {
    4.78 -      switch( req->reqType )
    4.79 -       { case semantic:     handleSemReq(         req, requestingSlv, semEnv);
    4.80 -            break;
    4.81 -         case createReq:    handleCreate(         req, requestingSlv, semEnv);
    4.82 -            break;
    4.83 -         case dissipate:    handleDissipate(           requestingSlv, semEnv);
    4.84 -            break;
    4.85 -         case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
    4.86 -                                                (ResumeSlvFnPtr)&resume_slaveVP);
    4.87 -            break;
    4.88 -         default:
    4.89 -            break;
    4.90 -       }
    4.91 -
    4.92 -      req = VMS_PI__take_next_request_out_of( requestingSlv );
    4.93 -    } //while( req != NULL )
    4.94 - }
    4.95 -
    4.96 -
    4.97 -void inline
    4.98 -handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VthdSemEnv *semEnv )
    4.99 - { VthdSemReq *semReq;
   4.100 -
   4.101 -   semReq = VMS_PI__take_sem_reqst_from(req);
   4.102 -   if( semReq == NULL ) return;
   4.103 -   switch( semReq->reqType )
   4.104 -    {
   4.105 -      case make_mutex:     handleMakeMutex(  semReq, semEnv);
   4.106 -         break;
   4.107 -      case mutex_lock:     handleMutexLock(  semReq, semEnv);
   4.108 -         break;
   4.109 -      case mutex_unlock:   handleMutexUnlock(semReq, semEnv);
   4.110 -         break;
   4.111 -      case make_cond:      handleMakeCond(   semReq, semEnv);
   4.112 -         break;
   4.113 -      case cond_wait:      handleCondWait(   semReq, semEnv);
   4.114 -         break;
   4.115 -      case cond_signal:    handleCondSignal( semReq, semEnv);
   4.116 -         break;
   4.117 -      case malloc_req:    handleMalloc( semReq, reqSlv, semEnv);
   4.118 -         break;
   4.119 -      case free_req:    handleFree( semReq, reqSlv, semEnv);
   4.120 -         break;
   4.121 -      case singleton_fn_start:  handleStartFnSingleton(semReq, reqSlv, semEnv);
   4.122 -         break;
   4.123 -      case singleton_fn_end:    handleEndFnSingleton(  semReq, reqSlv, semEnv);
   4.124 -         break;
   4.125 -      case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
   4.126 -         break;
   4.127 -      case singleton_data_end:  handleEndDataSingleton(semReq, reqSlv, semEnv);
   4.128 -         break;
   4.129 -      case atomic:    handleAtomic( semReq, reqSlv, semEnv);
   4.130 -         break;
   4.131 -      case trans_start:    handleTransStart( semReq, reqSlv, semEnv);
   4.132 -         break;
   4.133 -      case trans_end:    handleTransEnd( semReq, reqSlv, semEnv);
   4.134 -         break;
   4.135 -    }
   4.136 - }
   4.137 -
   4.138 -//=========================== VMS Request Handlers ===========================
   4.139 -//
   4.140 -inline void
   4.141 -handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv )
   4.142 - {
   4.143 -      //free any semantic data allocated to the virt procr
   4.144 -   VMS_PI__free( requestingSlv->semanticData );
   4.145 -
   4.146 -      //Now, call VMS to free_all AppSlv state -- stack and so on
   4.147 -   VMS_PI__dissipate_slaveVP( requestingSlv );
   4.148 - }
   4.149 -
   4.150 -inline void
   4.151 -handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv  )
   4.152 - { VthdSemReq *semReq;
   4.153 -   SlaveVP    *newSlv;
   4.154 -    
   4.155 -    //========================= MEASUREMENT STUFF ======================
   4.156 -    Meas_startCreate
   4.157 -    //==================================================================
   4.158 -     
   4.159 -   semReq = VMS_PI__take_sem_reqst_from( req );
   4.160 -   
   4.161 -   newSlv = Vthread__create_slaveVP_helper( semReq->fnPtr, semReq->initData, 
   4.162 -                                          semEnv, semReq->coreToScheduleOnto);
   4.163 -
   4.164 -      //For Vthread, caller needs ptr to created processor returned to it
   4.165 -   requestingSlv->dataRetFromReq = newSlv;
   4.166 -
   4.167 -   resume_slaveVP( newSlv,        semEnv );
   4.168 -   resume_slaveVP( requestingSlv, semEnv );
   4.169 -
   4.170 -     //========================= MEASUREMENT STUFF ======================
   4.171 -         Meas_endCreate
   4.172 -     #ifdef MEAS__TIME_PLUGIN
   4.173 -     #ifdef MEAS__SUB_CREATE
   4.174 -         subIntervalFromHist( startStamp, endStamp,
   4.175 -                                        _VMSMasterEnv->reqHdlrHighTimeHist );
   4.176 -     #endif
   4.177 -     #endif
   4.178 -     //==================================================================
   4.179 - }
   4.180 -
   4.181 -
   4.182 -//=========================== Helper ==============================
   4.183 -void inline
   4.184 -resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv )
   4.185 - {
   4.186 -   writePrivQ( procr, semEnv->readySlvQs[ procr->coreAnimatedBy] );
   4.187 - }
   4.188 -
   4.189 -//===========================================================================
   4.190 \ No newline at end of file
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/Vthread__Measurement.h	Wed May 09 13:24:19 2012 -0700
     5.3 @@ -0,0 +1,98 @@
     5.4 +/* 
     5.5 + * 
     5.6 + *
     5.7 + * Created on June 10, 2011, 12:20 PM
     5.8 + */
     5.9 +
    5.10 +#ifndef VTHREAD_MEAS_H
    5.11 +#define VTHREAD_MEAS_H
    5.12 +
    5.13 +#ifdef MEAS__TURN_ON_LANG_MEAS
    5.14 +
    5.15 +   #ifdef MEAS__Make_Meas_Hists_for_Language
    5.16 +   #undef MEAS__Make_Meas_Hists_for_Language
    5.17 +   #endif
    5.18 +
    5.19 +//===================  Language-specific Measurement Stuff ===================
    5.20 +//
    5.21 +//
    5.22 +   #define createHistIdx      1  //note: starts at 1
    5.23 +   #define mutexLockHistIdx   2
    5.24 +   #define mutexUnlockHistIdx 3
    5.25 +   #define condWaitHistIdx    4
    5.26 +   #define condSignalHistIdx  5
    5.27 +
    5.28 +   #define MEAS__Make_Meas_Hists_for_Language \
    5.29 +   _VMSMasterEnv->measHistsInfo = \
    5.30 +   makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->measHists), 200); \
    5.31 +   makeAMeasHist( createHistIdx,      "create",        250, 0, 100 ) \
    5.32 +   makeAMeasHist( mutexLockHistIdx,   "mutex_lock",    50, 0, 100 ) \
    5.33 +   makeAMeasHist( mutexUnlockHistIdx, "mutex_unlock",  50, 0, 100 ) \
    5.34 +   makeAMeasHist( condWaitHistIdx,    "cond_wait",     50, 0, 100 ) \
    5.35 +   makeAMeasHist( condSignalHistIdx,  "cond_signal",   50, 0, 100 )
    5.36 +
    5.37 +   
    5.38 +   #define Meas_startCreate \
    5.39 +    int32 startStamp, endStamp; \
    5.40 +    saveLowTimeStampCountInto( startStamp ); 
    5.41 +
    5.42 +   #define Meas_endCreate \
    5.43 +    saveLowTimeStampCountInto( endStamp ); \
    5.44 +    addIntervalToHist( startStamp, endStamp, \
    5.45 +                                 _VMSMasterEnv->measHists[ createHistIdx ] );
    5.46 +
    5.47 +   #define Meas_startMutexLock \
    5.48 +    int32 startStamp, endStamp; \
    5.49 +    saveLowTimeStampCountInto( startStamp ); 
    5.50 +
    5.51 +   #define Meas_endMutexLock \
    5.52 +    saveLowTimeStampCountInto( endStamp ); \
    5.53 +    addIntervalToHist( startStamp, endStamp, \
    5.54 +                              _VMSMasterEnv->measHists[ mutexLockHistIdx ] );
    5.55 +
    5.56 +   #define Meas_startMutexUnlock \
    5.57 +    int32 startStamp, endStamp; \
    5.58 +    saveLowTimeStampCountInto( startStamp ); 
    5.59 +
    5.60 +   #define Meas_endMutexUnlock \
    5.61 +    saveLowTimeStampCountInto( endStamp ); \
    5.62 +    addIntervalToHist( startStamp, endStamp, \
    5.63 +                            _VMSMasterEnv->measHists[ mutexUnlockHistIdx ] );
    5.64 +
    5.65 +   #define Meas_startCondWait \
    5.66 +    int32 startStamp, endStamp; \
    5.67 +    saveLowTimeStampCountInto( startStamp ); 
    5.68 +
    5.69 +   #define Meas_endCondWait \
    5.70 +    saveLowTimeStampCountInto( endStamp ); \
    5.71 +    addIntervalToHist( startStamp, endStamp, \
    5.72 +                               _VMSMasterEnv->measHists[ condWaitHistIdx ] );
    5.73 +
    5.74 +   #define Meas_startCondSignal \
    5.75 +    int32 startStamp, endStamp; \
    5.76 +    saveLowTimeStampCountInto( startStamp ); 
    5.77 +
    5.78 +   #define Meas_endCondSignal \
    5.79 +    saveLowTimeStampCountInto( endStamp ); \
    5.80 +    addIntervalToHist( startStamp, endStamp, \
    5.81 +                             _VMSMasterEnv->measHists[ condSignalHistIdx ] );
    5.82 +
    5.83 +#else //===================== turned off ==========================
    5.84 +
    5.85 +   #define MEAS__Make_Meas_Hists_for_Language 
    5.86 +   #define Meas_startCreate 
    5.87 +   #define Meas_endCreate 
    5.88 +   #define Meas_startMutexLock
    5.89 +   #define Meas_endMutexLock
    5.90 +   #define Meas_startMutexUnlock
    5.91 +   #define Meas_endMutexUnlock
    5.92 +   #define Meas_startCondWait
    5.93 +   #define Meas_endCondWait 
    5.94 +   #define Meas_startCondSignal 
    5.95 +   #define Meas_endCondSignal 
    5.96 +
    5.97 +#endif  /* MEAS__TURN_ON_LANG_MEAS */
    5.98 +
    5.99 +
   5.100 +#endif	/* VTHREAD_MEAS_H */
   5.101 +
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/Vthread__PluginFns.c	Wed May 09 13:24:19 2012 -0700
     6.3 @@ -0,0 +1,186 @@
     6.4 +/*
     6.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     6.6 + *
     6.7 + * Licensed under BSD
     6.8 + */
     6.9 +
    6.10 +#include <stdio.h>
    6.11 +#include <stdlib.h>
    6.12 +#include <malloc.h>
    6.13 +
    6.14 +#include "C_Libraries/Queue_impl/PrivateQueue.h"
    6.15 +#include "Vthread.h"
    6.16 +#include "Vthread_Request_Handlers.h"
    6.17 +#include "Vthread_helper.h"
    6.18 +
    6.19 +//=========================== Local Fn Prototypes ===========================
    6.20 +
    6.21 +void inline
    6.22 +handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv );
    6.23 +
    6.24 +inline void
    6.25 +handleDissipate(             SlaveVP *requestingSlv, VthdSemEnv *semEnv );
    6.26 +
    6.27 +inline void
    6.28 +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv  );
    6.29 +
    6.30 +
    6.31 +//============================== Scheduler ==================================
    6.32 +//
    6.33 +/*For Vthread, scheduling a slave simply takes the next work-unit off the
    6.34 + * ready-to-go work-unit queue and assigns it to the slaveToSched.
    6.35 + *If the ready-to-go work-unit queue is empty, then nothing to schedule
    6.36 + * to the slave -- return FALSE to let Master loop know scheduling that
    6.37 + * slave failed.
    6.38 + */
    6.39 +char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram
    6.40 +SlaveVP *
    6.41 +Vthread__schedule_slaveVP( void *_semEnv, int coreNum )
    6.42 + { SlaveVP   *schedSlv;
    6.43 +   VthdSemEnv *semEnv;
    6.44 +
    6.45 +   semEnv = (VthdSemEnv *)_semEnv;
    6.46 +
    6.47 +   schedSlv = readPrivQ( semEnv->readySlvQs[coreNum] );
    6.48 +      //Note, using a non-blocking queue -- it returns NULL if queue empty
    6.49 +
    6.50 +   return( schedSlv );
    6.51 + }
    6.52 +
    6.53 +
    6.54 +
    6.55 +//===========================  Request Handler  =============================
    6.56 +//
    6.57 +/*Will get requests to send, to receive, and to create new processors.
    6.58 + * Upon send, check the hash to see if a receive is waiting.
    6.59 + * Upon receive, check hash to see if a send has already happened.
    6.60 + * When other is not there, put in.  When other is there, the comm.
    6.61 + *  completes, which means the receiver P gets scheduled and
    6.62 + *  picks up right after the receive request.  So make the work-unit
    6.63 + *  and put it into the queue of work-units ready to go.
    6.64 + * Other request is create a new Processor, with the function to run in the
    6.65 + *  Processor, and initial data.
    6.66 + */
    6.67 +void
    6.68 +Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv )
    6.69 + { VthdSemEnv *semEnv;
    6.70 +   VMSReqst    *req;
    6.71 + 
    6.72 +   semEnv = (VthdSemEnv *)_semEnv;
    6.73 +
    6.74 +   req    = VMS_PI__take_next_request_out_of( requestingSlv );
    6.75 +
    6.76 +   while( req != NULL )
    6.77 +    {
    6.78 +      switch( req->reqType )
    6.79 +       { case semantic:     handleSemReq(         req, requestingSlv, semEnv);
    6.80 +            break;
    6.81 +         case createReq:    handleCreate(         req, requestingSlv, semEnv);
    6.82 +            break;
    6.83 +         case dissipate:    handleDissipate(           requestingSlv, semEnv);
    6.84 +            break;
    6.85 +         case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
    6.86 +                                                (ResumeSlvFnPtr)&resume_slaveVP);
    6.87 +            break;
    6.88 +         default:
    6.89 +            break;
    6.90 +       }
    6.91 +
    6.92 +      req = VMS_PI__take_next_request_out_of( requestingSlv );
    6.93 +    } //while( req != NULL )
    6.94 + }
    6.95 +
    6.96 +
    6.97 +void inline
    6.98 +handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VthdSemEnv *semEnv )
    6.99 + { VthdSemReq *semReq;
   6.100 +
   6.101 +   semReq = VMS_PI__take_sem_reqst_from(req);
   6.102 +   if( semReq == NULL ) return;
   6.103 +   switch( semReq->reqType )
   6.104 +    {
   6.105 +      case make_mutex:     handleMakeMutex(  semReq, semEnv);
   6.106 +         break;
   6.107 +      case mutex_lock:     handleMutexLock(  semReq, semEnv);
   6.108 +         break;
   6.109 +      case mutex_unlock:   handleMutexUnlock(semReq, semEnv);
   6.110 +         break;
   6.111 +      case make_cond:      handleMakeCond(   semReq, semEnv);
   6.112 +         break;
   6.113 +      case cond_wait:      handleCondWait(   semReq, semEnv);
   6.114 +         break;
   6.115 +      case cond_signal:    handleCondSignal( semReq, semEnv);
   6.116 +         break;
   6.117 +      case malloc_req:    handleMalloc( semReq, reqSlv, semEnv);
   6.118 +         break;
   6.119 +      case free_req:    handleFree( semReq, reqSlv, semEnv);
   6.120 +         break;
   6.121 +      case singleton_fn_start:  handleStartFnSingleton(semReq, reqSlv, semEnv);
   6.122 +         break;
   6.123 +      case singleton_fn_end:    handleEndFnSingleton(  semReq, reqSlv, semEnv);
   6.124 +         break;
   6.125 +      case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
   6.126 +         break;
   6.127 +      case singleton_data_end:  handleEndDataSingleton(semReq, reqSlv, semEnv);
   6.128 +         break;
   6.129 +      case atomic:    handleAtomic( semReq, reqSlv, semEnv);
   6.130 +         break;
   6.131 +      case trans_start:    handleTransStart( semReq, reqSlv, semEnv);
   6.132 +         break;
   6.133 +      case trans_end:    handleTransEnd( semReq, reqSlv, semEnv);
   6.134 +         break;
   6.135 +    }
   6.136 + }
   6.137 +
   6.138 +//=========================== VMS Request Handlers ===========================
   6.139 +//
   6.140 +inline void
   6.141 +handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv )
   6.142 + {
   6.143 +      //free any semantic data allocated to the virt procr
   6.144 +   VMS_PI__free( requestingSlv->semanticData );
   6.145 +
   6.146 +      //Now, call VMS to free_all AppSlv state -- stack and so on
   6.147 +   VMS_PI__dissipate_slaveVP( requestingSlv );
   6.148 + }
   6.149 +
   6.150 +inline void
   6.151 +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv  )
   6.152 + { VthdSemReq *semReq;
   6.153 +   SlaveVP    *newSlv;
   6.154 +    
   6.155 +    //========================= MEASUREMENT STUFF ======================
   6.156 +    Meas_startCreate
   6.157 +    //==================================================================
   6.158 +     
   6.159 +   semReq = VMS_PI__take_sem_reqst_from( req );
   6.160 +   
   6.161 +   newSlv = Vthread__create_slaveVP_helper( semReq->fnPtr, semReq->initData, 
   6.162 +                                          semEnv, semReq->coreToScheduleOnto);
   6.163 +
   6.164 +      //For Vthread, caller needs ptr to created processor returned to it
   6.165 +   requestingSlv->dataRetFromReq = newSlv;
   6.166 +
   6.167 +   resume_slaveVP( newSlv,        semEnv );
   6.168 +   resume_slaveVP( requestingSlv, semEnv );
   6.169 +
   6.170 +     //========================= MEASUREMENT STUFF ======================
   6.171 +         Meas_endCreate
   6.172 +     #ifdef MEAS__TIME_PLUGIN
   6.173 +     #ifdef MEAS__SUB_CREATE
   6.174 +         subIntervalFromHist( startStamp, endStamp,
   6.175 +                                        _VMSMasterEnv->reqHdlrHighTimeHist );
   6.176 +     #endif
   6.177 +     #endif
   6.178 +     //==================================================================
   6.179 + }
   6.180 +
   6.181 +
   6.182 +//=========================== Helper ==============================
   6.183 +void inline
   6.184 +resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv )
   6.185 + {
   6.186 +   writePrivQ( procr, semEnv->readySlvQs[ procr->coreAnimatedBy] );
   6.187 + }
   6.188 +
   6.189 +//===========================================================================
   6.190 \ No newline at end of file
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/Vthread__startup_and_shutdown.c	Wed May 09 13:24:19 2012 -0700
     7.3 @@ -0,0 +1,228 @@
     7.4 +/*
     7.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     7.6 + *
     7.7 + * Licensed under BSD
     7.8 + */
     7.9 +
    7.10 +#include <stdio.h>
    7.11 +#include <stdlib.h>
    7.12 +
    7.13 +#include "VMS_impl/VMS.h"
    7.14 +#include "Vthread.h"
    7.15 +#include "C_Libraries/Queue_impl/PrivateQueue.h"
    7.16 +#include "C_Libraries/Hash_impl/PrivateHash.h"
    7.17 +
    7.18 +
    7.19 +
    7.20 +//===========================================================================
    7.21 +
    7.22 +//TODO: update these comments!
    7.23 +/*These are the library functions *called in the application*
    7.24 + * 
    7.25 + *There's a pattern for the outside sequential code to interact with the
    7.26 + * VMS_HW code.
    7.27 + *The VMS_HW system is inside a boundary..  every Vthread system is in its
    7.28 + * own directory that contains the functions for each of the processor types.
    7.29 + * One of the processor types is the "seed" processor that starts the
    7.30 + * cascade of creating all the processors that do the work.
    7.31 + *So, in the directory is a file called "EntryPoint.c" that contains the
    7.32 + * function, named appropriately to the work performed, that the outside
    7.33 + * sequential code calls.  This function follows a pattern:
    7.34 + *1) it calls Vthread__init()
    7.35 + *2) it creates the initial data for the seed processor, which is passed
    7.36 + *    in to the function
    7.37 + *3) it creates the seed Vthread processor, with the data to start it with.
    7.38 + *4) it calls startVthreadThenWaitUntilWorkDone
    7.39 + *5) it gets the returnValue from the transfer struc and returns that
    7.40 + *    from the function
    7.41 + *
    7.42 + *For now, a new Vthread system has to be created via Vthread__init every
    7.43 + * time an entry point function is called -- later, might add letting the
    7.44 + * Vthread system be created once, and let all the entry points just reuse
    7.45 + * it -- want to be as simple as possible now, and see by using what makes
    7.46 + * sense for later..
    7.47 + */
    7.48 +
    7.49 +
    7.50 +
    7.51 +//===========================================================================
    7.52 +
    7.53 +/*This is the "border crossing" function -- the thing that crosses from the
    7.54 + * outside world, into the VMS_HW world.  It initializes and starts up the
    7.55 + * VMS system, then creates one processor from the specified function and
    7.56 + * puts it into the readyQ.  From that point, that one function is resp.
    7.57 + * for creating all the other processors, that then create others, and so
    7.58 + * forth.
    7.59 + *When all the processors, including the seed, have dissipated, then this
    7.60 + * function returns.  The results will have been written by side-effect via
    7.61 + * pointers read from, or written into initData.
    7.62 + *
    7.63 + *NOTE: no Threads should exist in the outside program that might touch
    7.64 + * any of the data reachable from initData passed in to here
    7.65 + */
    7.66 +void
    7.67 +Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fnPtr, void *initData )
    7.68 + { VthdSemEnv *semEnv;
    7.69 +   SlaveVP *seedSlv;
    7.70 +
    7.71 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    7.72 +   Vthread__init_Seq();  //debug sequential exe
    7.73 +   #else
    7.74 +   Vthread__init();      //normal multi-thd
    7.75 +   #endif
    7.76 +   semEnv = _VMSMasterEnv->semanticEnv;
    7.77 +
    7.78 +      //Vthread starts with one processor, which is put into initial environ,
    7.79 +      // and which then calls create() to create more, thereby expanding work
    7.80 +   seedSlv = Vthread__create_slaveVP_helper( fnPtr, initData, semEnv, -1 );
    7.81 +
    7.82 +   resume_slaveVP( seedSlv, semEnv );
    7.83 +
    7.84 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    7.85 +   VMS_SS__start_the_work_then_wait_until_done_Seq();  //debug sequential exe
    7.86 +   #else
    7.87 +   VMS_SS__start_the_work_then_wait_until_done();      //normal multi-thd
    7.88 +   #endif
    7.89 +
    7.90 +   Vthread__cleanup_after_shutdown();
    7.91 + }
    7.92 +
    7.93 +
    7.94 +//===========================================================================
    7.95 +//
    7.96 +/*Initializes all the data-structures for a Vthread system -- but doesn't
    7.97 + * start it running yet!
    7.98 + *
    7.99 + * 
   7.100 + *This sets up the semantic layer over the VMS system
   7.101 + *
   7.102 + *First, calls VMS_Setup, then creates own environment, making it ready
   7.103 + * for creating the seed processor and then starting the work.
   7.104 + */
   7.105 +void
   7.106 +Vthread__init()
   7.107 + {
   7.108 +         MEAS__Make_Meas_Hists_for_Language;
   7.109 +
   7.110 +   VMS_SS__init();
   7.111 +   //masterEnv, a global var, now is partially set up by init_VMS
   7.112 +   
   7.113 +   Vthread__init_Helper();
   7.114 + }
   7.115 +
   7.116 +#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   7.117 +void
   7.118 +Vthread__init_Seq()
   7.119 + {
   7.120 +   VMS_SS__init_Seq();
   7.121 +   flushRegisters();
   7.122 +      //masterEnv, a global var, now is partially set up by init_VMS
   7.123 +
   7.124 +   Vthread__init_Helper();
   7.125 + }
   7.126 +#endif
   7.127 +
   7.128 +void
   7.129 +Vthread__init_Helper()
   7.130 + { VthdSemEnv       *semanticEnv;
   7.131 +   PrivQueueStruc **readySlvQs;
   7.132 +   int              coreIdx, i;
   7.133 + 
   7.134 +      //Hook up the semantic layer's plug-ins to the Master virt procr
   7.135 +   _VMSMasterEnv->requestHandler = &Vthread__Request_Handler;
   7.136 +   _VMSMasterEnv->slaveAssigner = &Vthread__schedule_slaveVP;
   7.137 +
   7.138 +      //create the semantic layer's environment (all its data) and add to
   7.139 +      // the master environment
   7.140 +   semanticEnv = VMS_WL__malloc( sizeof( VthdSemEnv ) );
   7.141 +   _VMSMasterEnv->semanticEnv = semanticEnv;
   7.142 +
   7.143 +      //create the ready queue
   7.144 +   readySlvQs = VMS_WL__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
   7.145 +
   7.146 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   7.147 +    {
   7.148 +      readySlvQs[ coreIdx ] = makeVMSQ();
   7.149 +    }
   7.150 +   
   7.151 +   semanticEnv->readySlvQs          = readySlvQs;
   7.152 +   
   7.153 +   semanticEnv->nextCoreToGetNewSlv = 0;
   7.154 +
   7.155 +   semanticEnv->mutexDynArrayInfo  =
   7.156 +      makePrivDynArrayOfSize( (void*)&(semanticEnv->mutexDynArray), INIT_NUM_MUTEX );
   7.157 +
   7.158 +   semanticEnv->condDynArrayInfo   =
   7.159 +      makePrivDynArrayOfSize( (void*)&(semanticEnv->condDynArray),  INIT_NUM_COND );
   7.160 +   
   7.161 +   //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
   7.162 +   //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
   7.163 +   //semanticEnv->transactionStrucs = makeDynArrayInfo( );
   7.164 +   for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
   7.165 +    {
   7.166 +      semanticEnv->fnSingletons[i].savedRetAddr      = NULL;
   7.167 +      semanticEnv->fnSingletons[i].hasBeenStarted    = FALSE;
   7.168 +      semanticEnv->fnSingletons[i].hasFinished       = FALSE;
   7.169 +      semanticEnv->fnSingletons[i].waitQ             = makeVMSQ();
   7.170 +      semanticEnv->transactionStrucs[i].waitingSlvQ   = makeVMSQ();
   7.171 +    }   
   7.172 + }
   7.173 +
   7.174 +
   7.175 +/*Frees any memory allocated by Vthread__init() then calls VMS__shutdown
   7.176 + */
   7.177 +void
   7.178 +Vthread__cleanup_after_shutdown()
   7.179 + { /*VthdSemEnv *semEnv;
   7.180 +   int32           coreIdx,     idx,   highestIdx;
   7.181 +   VthdMutex      **mutexArray, *mutex;
   7.182 +   VthdCond       **condArray, *cond; */
   7.183 + 
   7.184 + /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
   7.185 + *  nothing to do here
   7.186 +  semEnv = _VMSMasterEnv->semanticEnv;
   7.187 +
   7.188 +//TODO: double check that all sem env locations freed
   7.189 +
   7.190 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   7.191 +    {
   7.192 +      free( semEnv->readySlvQs[coreIdx]->startOfData );
   7.193 +      free( semEnv->readySlvQs[coreIdx] );
   7.194 +    }
   7.195 +   
   7.196 +   free( semEnv->readySlvQs );
   7.197 +
   7.198 +   
   7.199 +   //==== Free mutexes and mutex array ====
   7.200 +   mutexArray = semEnv->mutexDynArray->array;
   7.201 +   highestIdx = semEnv->mutexDynArray->highestIdxInArray;
   7.202 +   for( idx=0; idx < highestIdx; idx++ )
   7.203 +    { mutex = mutexArray[ idx ];
   7.204 +      if( mutex == NULL ) continue;
   7.205 +      free( mutex );
   7.206 +    }
   7.207 +   free( mutexArray );
   7.208 +   free( semEnv->mutexDynArray );
   7.209 +   //======================================
   7.210 +   
   7.211 +
   7.212 +   //==== Free conds and cond array ====
   7.213 +   condArray  = semEnv->condDynArray->array;
   7.214 +   highestIdx = semEnv->condDynArray->highestIdxInArray;
   7.215 +   for( idx=0; idx < highestIdx; idx++ )
   7.216 +    { cond = condArray[ idx ];
   7.217 +      if( cond == NULL ) continue;
   7.218 +      free( cond );
   7.219 +    }
   7.220 +   free( condArray );
   7.221 +   free( semEnv->condDynArray );
   7.222 +   //===================================
   7.223 +
   7.224 +   
   7.225 +   free( _VMSMasterEnv->semanticEnv );
   7.226 +  */
   7.227 +   VMS_SS__cleanup_at_end_of_shutdown();
   7.228 + }
   7.229 +
   7.230 +
   7.231 +//===========================================================================
     8.1 --- a/Vthread_helper.c	Sun Mar 04 14:29:42 2012 -0800
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,46 +0,0 @@
     8.4 -
     8.5 -#include <stddef.h>
     8.6 -
     8.7 -#include "VMS_impl/VMS.h"
     8.8 -#include "Vthread.h"
     8.9 -
    8.10 -/*Re-use this in the entry-point fn
    8.11 - */
    8.12 -inline SlaveVP *
    8.13 -Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData,
    8.14 -                          VthdSemEnv *semEnv,    int32 coreToScheduleOnto )
    8.15 - { SlaveVP      *newSlv;
    8.16 -   VthdSemData   *semData;
    8.17 -
    8.18 -      //This is running in master, so use internal version
    8.19 -   newSlv = VMS_WL__create_slaveVP( fnPtr, initData );
    8.20 -
    8.21 -   semData = VMS_WL__malloc( sizeof(VthdSemData) );
    8.22 -   semData->highestTransEntered = -1;
    8.23 -   semData->lastTransEntered    = NULL;
    8.24 -
    8.25 -   newSlv->semanticData = semData;
    8.26 -
    8.27 -   //=================== Assign new processor to a core =====================
    8.28 -   #ifdef SEQUENTIAL
    8.29 -   newSlv->coreAnimatedBy = 0;
    8.30 -
    8.31 -   #else
    8.32 -
    8.33 -   if(coreToScheduleOnto < 0 || coreToScheduleOnto >= NUM_CORES )
    8.34 -    {    //out-of-range, so round-robin assignment
    8.35 -      newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
    8.36 -
    8.37 -      if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 )
    8.38 -          semEnv->nextCoreToGetNewSlv  = 0;
    8.39 -      else
    8.40 -          semEnv->nextCoreToGetNewSlv += 1;
    8.41 -    }
    8.42 -   else //core num in-range, so use it
    8.43 -    { newSlv->coreAnimatedBy = coreToScheduleOnto;
    8.44 -    }
    8.45 -   #endif
    8.46 -   //========================================================================
    8.47 -
    8.48 -   return newSlv;
    8.49 - }
    8.50 \ No newline at end of file
     9.1 --- a/Vthread_helper.h	Sun Mar 04 14:29:42 2012 -0800
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,19 +0,0 @@
     9.4 -/* 
     9.5 - * File:   Vthread_helper.h
     9.6 - * Author: msach
     9.7 - *
     9.8 - * Created on June 10, 2011, 12:20 PM
     9.9 - */
    9.10 -
    9.11 -#include "VMS_impl/VMS.h"
    9.12 -#include "Vthread.h"
    9.13 -
    9.14 -#ifndef VTHREAD_HELPER_H
    9.15 -#define	VTHREAD_HELPER_H
    9.16 -
    9.17 -inline SlaveVP *
    9.18 -Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData,
    9.19 -                          VthdSemEnv *semEnv,    int32 coreToScheduleOnto );
    9.20 -
    9.21 -#endif	/* VTHREAD_HELPER_H */
    9.22 -
    10.1 --- a/Vthread_lib.c	Sun Mar 04 14:29:42 2012 -0800
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,618 +0,0 @@
    10.4 -/*
    10.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
    10.6 - *
    10.7 - * Licensed under BSD
    10.8 - */
    10.9 -
   10.10 -#include <stdio.h>
   10.11 -#include <stdlib.h>
   10.12 -
   10.13 -#include "VMS_impl/VMS.h"
   10.14 -#include "Vthread.h"
   10.15 -#include "Vthread_helper.h"
   10.16 -#include "C_Libraries/Queue_impl/PrivateQueue.h"
   10.17 -#include "C_Libraries/Hash_impl/PrivateHash.h"
   10.18 -
   10.19 -
   10.20 -//==========================================================================
   10.21 -
   10.22 -void
   10.23 -Vthread__init();
   10.24 -
   10.25 -void
   10.26 -Vthread__init_Seq();
   10.27 -
   10.28 -void
   10.29 -Vthread__init_Helper();
   10.30 -
   10.31 -
   10.32 -//===========================================================================
   10.33 -
   10.34 -
   10.35 -/*These are the library functions *called in the application*
   10.36 - * 
   10.37 - *There's a pattern for the outside sequential code to interact with the
   10.38 - * VMS_HW code.
   10.39 - *The VMS_HW system is inside a boundary..  every Vthread system is in its
   10.40 - * own directory that contains the functions for each of the processor types.
   10.41 - * One of the processor types is the "seed" processor that starts the
   10.42 - * cascade of creating all the processors that do the work.
   10.43 - *So, in the directory is a file called "EntryPoint.c" that contains the
   10.44 - * function, named appropriately to the work performed, that the outside
   10.45 - * sequential code calls.  This function follows a pattern:
   10.46 - *1) it calls Vthread__init()
   10.47 - *2) it creates the initial data for the seed processor, which is passed
   10.48 - *    in to the function
   10.49 - *3) it creates the seed Vthread processor, with the data to start it with.
   10.50 - *4) it calls startVthreadThenWaitUntilWorkDone
   10.51 - *5) it gets the returnValue from the transfer struc and returns that
   10.52 - *    from the function
   10.53 - *
   10.54 - *For now, a new Vthread system has to be created via Vthread__init every
   10.55 - * time an entry point function is called -- later, might add letting the
   10.56 - * Vthread system be created once, and let all the entry points just reuse
   10.57 - * it -- want to be as simple as possible now, and see by using what makes
   10.58 - * sense for later..
   10.59 - */
   10.60 -
   10.61 -
   10.62 -
   10.63 -//===========================================================================
   10.64 -
   10.65 -/*This is the "border crossing" function -- the thing that crosses from the
   10.66 - * outside world, into the VMS_HW world.  It initializes and starts up the
   10.67 - * VMS system, then creates one processor from the specified function and
   10.68 - * puts it into the readyQ.  From that point, that one function is resp.
   10.69 - * for creating all the other processors, that then create others, and so
   10.70 - * forth.
   10.71 - *When all the processors, including the seed, have dissipated, then this
   10.72 - * function returns.  The results will have been written by side-effect via
   10.73 - * pointers read from, or written into initData.
   10.74 - *
   10.75 - *NOTE: no Threads should exist in the outside program that might touch
   10.76 - * any of the data reachable from initData passed in to here
   10.77 - */
   10.78 -void
   10.79 -Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fnPtr, void *initData )
   10.80 - { VthdSemEnv *semEnv;
   10.81 -   SlaveVP *seedSlv;
   10.82 -
   10.83 -   #ifdef SEQUENTIAL
   10.84 -   Vthread__init_Seq();  //debug sequential exe
   10.85 -   #else
   10.86 -   Vthread__init();      //normal multi-thd
   10.87 -   #endif
   10.88 -   semEnv = _VMSMasterEnv->semanticEnv;
   10.89 -
   10.90 -      //Vthread starts with one processor, which is put into initial environ,
   10.91 -      // and which then calls create() to create more, thereby expanding work
   10.92 -   seedSlv = Vthread__create_slaveVP_helper( fnPtr, initData, semEnv, -1 );
   10.93 -
   10.94 -   resume_slaveVP( seedSlv, semEnv );
   10.95 -
   10.96 -   #ifdef SEQUENTIAL
   10.97 -   VMS_SS__start_the_work_then_wait_until_done_Seq();  //debug sequential exe
   10.98 -   #else
   10.99 -   VMS_SS__start_the_work_then_wait_until_done();      //normal multi-thd
  10.100 -   #endif
  10.101 -
  10.102 -   Vthread__cleanup_after_shutdown();
  10.103 - }
  10.104 -
  10.105 -
  10.106 -inline int32
  10.107 -Vthread__giveMinWorkUnitCycles( float32 percentOverhead )
  10.108 - {
  10.109 -   return MIN_WORK_UNIT_CYCLES;
  10.110 - }
  10.111 -
  10.112 -inline int32
  10.113 -Vthread__giveIdealNumWorkUnits()
  10.114 - {
  10.115 -   return NUM_SCHED_SLOTS * NUM_CORES;
  10.116 - }
  10.117 -
  10.118 -inline int32
  10.119 -Vthread__give_number_of_cores_to_schedule_onto()
  10.120 - {
  10.121 -   return NUM_CORES;
  10.122 - }
  10.123 -
  10.124 -/*For now, use TSC -- later, make these two macros with assembly that first
  10.125 - * saves jump point, and second jumps back several times to get reliable time
  10.126 - */
  10.127 -inline void
  10.128 -Vthread__start_primitive()
  10.129 - { saveLowTimeStampCountInto( ((VthdSemEnv *)(_VMSMasterEnv->semanticEnv))->
  10.130 -                              primitiveStartTime );
  10.131 - }
  10.132 -
  10.133 -/*Just quick and dirty for now -- make reliable later
  10.134 - * will want this to jump back several times -- to be sure cache is warm
  10.135 - * because don't want comm time included in calc-time measurement -- and
  10.136 - * also to throw out any "weird" values due to OS interrupt or TSC rollover
  10.137 - */
  10.138 -inline int32
  10.139 -Vthread__end_primitive_and_give_cycles()
  10.140 - { int32 endTime, startTime;
  10.141 -   //TODO: fix by repeating time-measurement
  10.142 -   saveLowTimeStampCountInto( endTime );
  10.143 -   startTime=((VthdSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
  10.144 -   return (endTime - startTime);
  10.145 - }
  10.146 -
  10.147 -//===========================================================================
  10.148 -//
  10.149 -/*Initializes all the data-structures for a Vthread system -- but doesn't
  10.150 - * start it running yet!
  10.151 - *
  10.152 - * 
  10.153 - *This sets up the semantic layer over the VMS system
  10.154 - *
  10.155 - *First, calls VMS_Setup, then creates own environment, making it ready
  10.156 - * for creating the seed processor and then starting the work.
  10.157 - */
  10.158 -void
  10.159 -Vthread__init()
  10.160 - {
  10.161 -   VMS_SS__init();
  10.162 -   //masterEnv, a global var, now is partially set up by init_VMS
  10.163 -   
  10.164 -   Vthread__init_Helper();
  10.165 - }
  10.166 -
  10.167 -#ifdef SEQUENTIAL
  10.168 -void
  10.169 -Vthread__init_Seq()
  10.170 - {
  10.171 -   VMS_SS__init_Seq();
  10.172 -   flushRegisters();
  10.173 -      //masterEnv, a global var, now is partially set up by init_VMS
  10.174 -
  10.175 -   Vthread__init_Helper();
  10.176 - }
  10.177 -#endif
  10.178 -
  10.179 -void
  10.180 -Vthread__init_Helper()
  10.181 - { VthdSemEnv       *semanticEnv;
  10.182 -   PrivQueueStruc **readySlvQs;
  10.183 -   int              coreIdx, i;
  10.184 - 
  10.185 -      //Hook up the semantic layer's plug-ins to the Master virt procr
  10.186 -   _VMSMasterEnv->requestHandler = &Vthread__Request_Handler;
  10.187 -   _VMSMasterEnv->slaveAssigner = &Vthread__schedule_slaveVP;
  10.188 -
  10.189 -      //create the semantic layer's environment (all its data) and add to
  10.190 -      // the master environment
  10.191 -   semanticEnv = VMS_WL__malloc( sizeof( VthdSemEnv ) );
  10.192 -   _VMSMasterEnv->semanticEnv = semanticEnv;
  10.193 -
  10.194 -      //create the ready queue
  10.195 -   readySlvQs = VMS_WL__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
  10.196 -
  10.197 -   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
  10.198 -    {
  10.199 -      readySlvQs[ coreIdx ] = makeVMSQ();
  10.200 -    }
  10.201 -   
  10.202 -   semanticEnv->readySlvQs          = readySlvQs;
  10.203 -   
  10.204 -   semanticEnv->nextCoreToGetNewSlv = 0;
  10.205 -
  10.206 -   semanticEnv->mutexDynArrayInfo  =
  10.207 -      makePrivDynArrayOfSize( (void*)&(semanticEnv->mutexDynArray), INIT_NUM_MUTEX );
  10.208 -
  10.209 -   semanticEnv->condDynArrayInfo   =
  10.210 -      makePrivDynArrayOfSize( (void*)&(semanticEnv->condDynArray),  INIT_NUM_COND );
  10.211 -   
  10.212 -   //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
  10.213 -   //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
  10.214 -   //semanticEnv->transactionStrucs = makeDynArrayInfo( );
  10.215 -   for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
  10.216 -    {
  10.217 -      semanticEnv->fnSingletons[i].savedRetAddr      = NULL;
  10.218 -      semanticEnv->fnSingletons[i].hasBeenStarted    = FALSE;
  10.219 -      semanticEnv->fnSingletons[i].hasFinished       = FALSE;
  10.220 -      semanticEnv->fnSingletons[i].waitQ             = makeVMSQ();
  10.221 -      semanticEnv->transactionStrucs[i].waitingSlvQ   = makeVMSQ();
  10.222 -    }   
  10.223 - }
  10.224 -
  10.225 -
  10.226 -/*Frees any memory allocated by Vthread__init() then calls VMS__shutdown
  10.227 - */
  10.228 -void
  10.229 -Vthread__cleanup_after_shutdown()
  10.230 - { /*VthdSemEnv *semEnv;
  10.231 -   int32           coreIdx,     idx,   highestIdx;
  10.232 -   VthdMutex      **mutexArray, *mutex;
  10.233 -   VthdCond       **condArray, *cond; */
  10.234 - 
  10.235 - /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
  10.236 - *  nothing to do here
  10.237 -  semEnv = _VMSMasterEnv->semanticEnv;
  10.238 -
  10.239 -//TODO: double check that all sem env locations freed
  10.240 -
  10.241 -   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
  10.242 -    {
  10.243 -      free( semEnv->readySlvQs[coreIdx]->startOfData );
  10.244 -      free( semEnv->readySlvQs[coreIdx] );
  10.245 -    }
  10.246 -   
  10.247 -   free( semEnv->readySlvQs );
  10.248 -
  10.249 -   
  10.250 -   //==== Free mutexes and mutex array ====
  10.251 -   mutexArray = semEnv->mutexDynArray->array;
  10.252 -   highestIdx = semEnv->mutexDynArray->highestIdxInArray;
  10.253 -   for( idx=0; idx < highestIdx; idx++ )
  10.254 -    { mutex = mutexArray[ idx ];
  10.255 -      if( mutex == NULL ) continue;
  10.256 -      free( mutex );
  10.257 -    }
  10.258 -   free( mutexArray );
  10.259 -   free( semEnv->mutexDynArray );
  10.260 -   //======================================
  10.261 -   
  10.262 -
  10.263 -   //==== Free conds and cond array ====
  10.264 -   condArray  = semEnv->condDynArray->array;
  10.265 -   highestIdx = semEnv->condDynArray->highestIdxInArray;
  10.266 -   for( idx=0; idx < highestIdx; idx++ )
  10.267 -    { cond = condArray[ idx ];
  10.268 -      if( cond == NULL ) continue;
  10.269 -      free( cond );
  10.270 -    }
  10.271 -   free( condArray );
  10.272 -   free( semEnv->condDynArray );
  10.273 -   //===================================
  10.274 -
  10.275 -   
  10.276 -   free( _VMSMasterEnv->semanticEnv );
  10.277 -  */
  10.278 -   VMS_SS__cleanup_at_end_of_shutdown();
  10.279 - }
  10.280 -
  10.281 -
  10.282 -//===========================================================================
  10.283 -
  10.284 -/*
  10.285 - */
  10.286 -inline SlaveVP *
  10.287 -Vthread__create_thread( TopLevelFnPtr fnPtr, void *initData,
  10.288 -                          SlaveVP *creatingSlv )
  10.289 - { VthdSemReq  reqData;
  10.290 -
  10.291 -      //the semantic request data is on the stack and disappears when this
  10.292 -      // call returns -- it's guaranteed to remain in the Slv's stack for as
  10.293 -      // long as the Slv is suspended.
  10.294 -   reqData.reqType            = 0; //know the type because is a VMS create req
  10.295 -   reqData.coreToScheduleOnto = -1; //means round-robin schedule
  10.296 -   reqData.fnPtr              = fnPtr;
  10.297 -   reqData.initData           = initData;
  10.298 -   reqData.requestingSlv       = creatingSlv;
  10.299 -
  10.300 -   VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
  10.301 -
  10.302 -   return creatingSlv->dataRetFromReq;
  10.303 - }
  10.304 -
  10.305 -inline SlaveVP *
  10.306 -Vthread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData,
  10.307 -                           SlaveVP *creatingSlv,  int32  coreToScheduleOnto )
  10.308 - { VthdSemReq  reqData;
  10.309 -
  10.310 -      //the semantic request data is on the stack and disappears when this
  10.311 -      // call returns -- it's guaranteed to remain in the Slv's stack for as
  10.312 -      // long as the Slv is suspended.
  10.313 -   reqData.reqType            = 0; //know type because in a VMS create req
  10.314 -   reqData.coreToScheduleOnto = coreToScheduleOnto;
  10.315 -   reqData.fnPtr              = fnPtr;
  10.316 -   reqData.initData           = initData;
  10.317 -   reqData.requestingSlv       = creatingSlv;
  10.318 -
  10.319 -   VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
  10.320 - }
  10.321 -
  10.322 -inline void
  10.323 -Vthread__dissipate_thread( SlaveVP *procrToDissipate )
  10.324 - {
  10.325 -   VMS_WL__send_dissipate_req( procrToDissipate );
  10.326 - }
  10.327 -
  10.328 -
  10.329 -//===========================================================================
  10.330 -
  10.331 -void *
  10.332 -Vthread__malloc( size_t sizeToMalloc, SlaveVP *animSlv )
  10.333 - { VthdSemReq  reqData;
  10.334 -
  10.335 -   reqData.reqType      = malloc_req;
  10.336 -   reqData.sizeToMalloc = sizeToMalloc;
  10.337 -   reqData.requestingSlv = animSlv;
  10.338 -
  10.339 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.340 -
  10.341 -   return animSlv->dataRetFromReq;
  10.342 - }
  10.343 -
  10.344 -
  10.345 -/*Sends request to Master, which does the work of freeing
  10.346 - */
  10.347 -void
  10.348 -Vthread__free( void *ptrToFree, SlaveVP *animSlv )
  10.349 - { VthdSemReq  reqData;
  10.350 -
  10.351 -   reqData.reqType      = free_req;
  10.352 -   reqData.ptrToFree    = ptrToFree;
  10.353 -   reqData.requestingSlv = animSlv;
  10.354 -
  10.355 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.356 - }
  10.357 -
  10.358 -
  10.359 -//===========================================================================
  10.360 -
  10.361 -inline void
  10.362 -Vthread__set_globals_to( void *globals )
  10.363 - {
  10.364 -   ((VthdSemEnv *)
  10.365 -    (_VMSMasterEnv->semanticEnv))->applicationGlobals = globals;
  10.366 - }
  10.367 -
  10.368 -inline void *
  10.369 -Vthread__give_globals()
  10.370 - {
  10.371 -   return((VthdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals;
  10.372 - }
  10.373 -
  10.374 -
  10.375 -//===========================================================================
  10.376 -
  10.377 -inline int32
  10.378 -Vthread__make_mutex( SlaveVP *animSlv )
  10.379 - { VthdSemReq  reqData;
  10.380 -
  10.381 -   reqData.reqType      = make_mutex;
  10.382 -   reqData.requestingSlv = animSlv;
  10.383 -
  10.384 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.385 -
  10.386 -   return (int32)animSlv->dataRetFromReq; //mutexid is 32bit wide
  10.387 - }
  10.388 -
  10.389 -inline void
  10.390 -Vthread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringSlv )
  10.391 - { VthdSemReq  reqData;
  10.392 -
  10.393 -   reqData.reqType      = mutex_lock;
  10.394 -   reqData.mutexIdx     = mutexIdx;
  10.395 -   reqData.requestingSlv = acquiringSlv;
  10.396 -
  10.397 -   VMS_WL__send_sem_request( &reqData, acquiringSlv );
  10.398 - }
  10.399 -
  10.400 -inline void
  10.401 -Vthread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingSlv )
  10.402 - { VthdSemReq  reqData;
  10.403 -
  10.404 -   reqData.reqType      = mutex_unlock;
  10.405 -   reqData.mutexIdx     = mutexIdx;
  10.406 -   reqData.requestingSlv = releasingSlv;
  10.407 -
  10.408 -   VMS_WL__send_sem_request( &reqData, releasingSlv );
  10.409 - }
  10.410 -
  10.411 -
  10.412 -//=======================
  10.413 -inline int32
  10.414 -Vthread__make_cond( int32 ownedMutexIdx, SlaveVP *animSlv)
  10.415 - { VthdSemReq  reqData;
  10.416 -
  10.417 -   reqData.reqType      = make_cond;
  10.418 -   reqData.mutexIdx     = ownedMutexIdx;
  10.419 -   reqData.requestingSlv = animSlv;
  10.420 -
  10.421 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.422 -
  10.423 -   return (int32)animSlv->dataRetFromReq; //condIdx is 32 bit wide
  10.424 - }
  10.425 -
  10.426 -inline void
  10.427 -Vthread__cond_wait( int32 condIdx, SlaveVP *waitingSlv)
  10.428 - { VthdSemReq  reqData;
  10.429 -
  10.430 -   reqData.reqType      = cond_wait;
  10.431 -   reqData.condIdx      = condIdx;
  10.432 -   reqData.requestingSlv = waitingSlv;
  10.433 -
  10.434 -   VMS_WL__send_sem_request( &reqData, waitingSlv );
  10.435 - }
  10.436 -
  10.437 -inline void *
  10.438 -Vthread__cond_signal( int32 condIdx, SlaveVP *signallingSlv )
  10.439 - { VthdSemReq  reqData;
  10.440 -
  10.441 -   reqData.reqType      = cond_signal;
  10.442 -   reqData.condIdx      = condIdx;
  10.443 -   reqData.requestingSlv = signallingSlv;
  10.444 -
  10.445 -   VMS_WL__send_sem_request( &reqData, signallingSlv );
  10.446 - }
  10.447 -
  10.448 -
  10.449 -//===========================================================================
  10.450 -//
  10.451 -/*A function singleton is a function whose body executes exactly once, on a
  10.452 - * single core, no matter how many times the fuction is called and no
  10.453 - * matter how many cores or the timing of cores calling it.
  10.454 - *
  10.455 - *A data singleton is a ticket attached to data.  That ticket can be used
  10.456 - * to get the data through the function exactly once, no matter how many
  10.457 - * times the data is given to the function, and no matter the timing of
  10.458 - * trying to get the data through from different cores.
  10.459 - */
  10.460 -
  10.461 -/*Fn singleton uses ID as index into array of singleton structs held in the
  10.462 - * semantic environment.
  10.463 - */
  10.464 -void
  10.465 -Vthread__start_fn_singleton( int32 singletonID,   SlaveVP *animSlv )
  10.466 - {
  10.467 -   VthdSemReq  reqData;
  10.468 -
  10.469 -      //
  10.470 -   reqData.reqType     = singleton_fn_start;
  10.471 -   reqData.singletonID = singletonID;
  10.472 -
  10.473 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.474 -   if( animSlv->dataRetFromReq != 0 ) //addr of matching end-singleton
  10.475 -    {
  10.476 -      VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); //not protected!
  10.477 -      VMS_int__return_to_addr_in_ptd_to_loc(
  10.478 -                         &((semEnv->fnSingletons[singletonID]).savedRetAddr) );
  10.479 -    }
  10.480 - }
  10.481 -
  10.482 -/*Data singleton hands addr of loc holding a pointer to a singleton struct.
  10.483 - * The start_data_singleton makes the structure and puts its addr into the
  10.484 - * location.
  10.485 - */
  10.486 -void
  10.487 -Vthread__start_data_singleton( VthdSingleton *singleton,  SlaveVP *animSlv )
  10.488 - {
  10.489 -   VthdSemReq  reqData;
  10.490 -
  10.491 -   if( singleton->savedRetAddr && singleton->hasFinished )
  10.492 -      goto JmpToEndSingleton;
  10.493 -      
  10.494 -   reqData.reqType       = singleton_data_start;
  10.495 -   reqData.singleton = singleton;
  10.496 -
  10.497 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.498 -   if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
  10.499 -    {    
  10.500 -       JmpToEndSingleton:
  10.501 -       VMS_int__return_to_addr_in_ptd_to_loc(&(singleton->savedRetAddr));
  10.502 -    }
  10.503 -   //now, simply return
  10.504 -   //will exit either from the start singleton call or the end-singleton call
  10.505 - }
  10.506 -
  10.507 -/*Uses ID as index into array of flags.  If flag already set, resumes from
  10.508 - * end-label.  Else, sets flag and resumes normally.
  10.509 - *
  10.510 - *Note, this call cannot be inlined because the instr addr at the label
  10.511 - * inside is shared by all invocations of a given singleton ID.
  10.512 - */
  10.513 -void
  10.514 -Vthread__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
  10.515 - {
  10.516 -   VthdSemReq  reqData;
  10.517 -
  10.518 -   //don't need this addr until after at least one singleton has reached
  10.519 -   // this function
  10.520 -   VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
  10.521 -   VMS_int__return_to_addr_in_ptd_to_loc(
  10.522 -                         &((semEnv->fnSingletons[singletonID]).savedRetAddr) );
  10.523 -
  10.524 -   reqData.reqType     = singleton_fn_end;
  10.525 -   reqData.singletonID = singletonID;
  10.526 -
  10.527 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.528 - }
  10.529 -
  10.530 -void
  10.531 -Vthread__end_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv )
  10.532 - {
  10.533 -   VthdSemReq  reqData;
  10.534 -
  10.535 -      //don't need this addr until after singleton struct has reached
  10.536 -      // this function for first time
  10.537 -      //do assembly that saves the return addr of this fn call into the
  10.538 -      // data singleton -- that data-singleton can only be given to exactly
  10.539 -      // one instance in the code of this function.  However, can use this
  10.540 -      // function in different places for different data-singletons.
  10.541 -
  10.542 -   VMS_int__save_return_into_ptd_to_loc_then_do_ret(&(singleton->savedRetAddr));
  10.543 -
  10.544 -   reqData.reqType    = singleton_data_end;
  10.545 -   reqData.singleton  = singleton;
  10.546 -
  10.547 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.548 - }
  10.549 -
  10.550 -
  10.551 -/*This executes the function in the masterVP, so it executes in isolation
  10.552 - * from any other copies -- only one copy of the function can ever execute
  10.553 - * at a time.
  10.554 - *
  10.555 - *It suspends to the master, and the request handler takes the function
  10.556 - * pointer out of the request and calls it, then resumes the Slv.
  10.557 - *Only very short functions should be called this way -- for longer-running
  10.558 - * isolation, use transaction-start and transaction-end, which run the code
  10.559 - * between as work-code.
  10.560 - */
  10.561 -void
  10.562 -Vthread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
  10.563 -                                    void *data, SlaveVP *animSlv )
  10.564 - {
  10.565 -   VthdSemReq  reqData;
  10.566 -
  10.567 -      //
  10.568 -   reqData.reqType          = atomic;
  10.569 -   reqData.fnToExecInMaster = ptrToFnToExecInMaster;
  10.570 -   reqData.dataForFn        = data;
  10.571 -
  10.572 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.573 - }
  10.574 -
  10.575 -
  10.576 -/*This suspends to the master.
  10.577 - *First, it looks at the Slv's data, to see the highest transactionID that Slv
  10.578 - * already has entered.  If the current ID is not larger, it throws an
  10.579 - * exception stating a bug in the code.  Otherwise it puts the current ID
  10.580 - * there, and adds the ID to a linked list of IDs entered -- the list is
  10.581 - * used to check that exits are properly ordered.
  10.582 - *Next it is uses transactionID as index into an array of transaction
  10.583 - * structures.
  10.584 - *If the "Slv_currently_executing" field is non-null, then put requesting Slv
  10.585 - * into queue in the struct.  (At some point a holder will request
  10.586 - * end-transaction, which will take this Slv from the queue and resume it.)
  10.587 - *If NULL, then write requesting into the field and resume.
  10.588 - */
  10.589 -void
  10.590 -Vthread__start_transaction( int32 transactionID, SlaveVP *animSlv )
  10.591 - {
  10.592 -   VthdSemReq  reqData;
  10.593 -
  10.594 -      //
  10.595 -   reqData.reqType     = trans_start;
  10.596 -   reqData.transID     = transactionID;
  10.597 -
  10.598 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.599 - }
  10.600 -
  10.601 -/*This suspends to the master, then uses transactionID as index into an
  10.602 - * array of transaction structures.
  10.603 - *It looks at Slv_currently_executing to be sure it's same as requesting Slv.
  10.604 - * If different, throws an exception, stating there's a bug in the code.
  10.605 - *Next it looks at the queue in the structure.
  10.606 - *If it's empty, it sets Slv_currently_executing field to NULL and resumes.
  10.607 - *If something in, gets it, sets Slv_currently_executing to that Slv, then
  10.608 - * resumes both.
  10.609 - */
  10.610 -void
  10.611 -Vthread__end_transaction( int32 transactionID, SlaveVP *animSlv )
  10.612 - {
  10.613 -   VthdSemReq  reqData;
  10.614 -
  10.615 -      //
  10.616 -   reqData.reqType     = trans_end;
  10.617 -   reqData.transID     = transactionID;
  10.618 -
  10.619 -   VMS_WL__send_sem_request( &reqData, animSlv );
  10.620 - }
  10.621 -//===========================================================================