# HG changeset patch # User Some Random Person # Date 1336595059 25200 # Node ID b94dc57e4455592afe8b31ada9590529724d4cfb # Parent b3a881f25c5a9afe23c64ea1244b54aafcdb5eb8 refactored many files -- chgd names, moved code around -- doesn't compile diff -r b3a881f25c5a -r b94dc57e4455 Vthread.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Vthread.c Wed May 09 13:24:19 2012 -0700 @@ -0,0 +1,464 @@ +/* + * Copyright 2010 OpenSourceCodeStewardshipFoundation + * + * Licensed under BSD + */ + +#include +#include + +#include "VMS_impl/VMS.h" +#include "Vthread.h" +#include "Vthread_helper.h" +#include "C_Libraries/Queue_impl/PrivateQueue.h" +#include "C_Libraries/Hash_impl/PrivateHash.h" + + +//========================================================================== + +void +Vthread__init(); + +void +Vthread__init_Seq(); + +void +Vthread__init_Helper(); + + +//=========================================================================== + +/*These are the library functions *called in the application* + * + */ + + + +//=========================================================================== + +inline int32 +Vthread__giveMinWorkUnitCycles( float32 percentOverhead ) + { + return MIN_WORK_UNIT_CYCLES; + } + +inline int32 +Vthread__giveIdealNumWorkUnits() + { + return NUM_SCHED_SLOTS * NUM_CORES; + } + +inline int32 +Vthread__give_number_of_cores_to_schedule_onto() + { + return NUM_CORES; + } + +/*For now, use TSC -- later, make these two macros with assembly that first + * saves jump point, and second jumps back several times to get reliable time + */ +inline void +Vthread__start_primitive() + { saveLowTimeStampCountInto( ((VthdSemEnv *)(_VMSMasterEnv->semanticEnv))-> + primitiveStartTime ); + } + +/*Just quick and dirty for now -- make reliable later + * will want this to jump back several times -- to be sure cache is warm + * because don't want comm time included in calc-time measurement -- and + * also to throw out any "weird" values due to OS interrupt or TSC rollover + */ +inline int32 +Vthread__end_primitive_and_give_cycles() + { int32 endTime, startTime; + //TODO: fix by repeating time-measurement + saveLowTimeStampCountInto( endTime ); + startTime=((VthdSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime; + return (endTime - startTime); + } + + + +//=========================================================================== + +/*Re-use this in the entry-point fn + */ +inline SlaveVP * +Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData, + VthdSemEnv *semEnv, int32 coreToScheduleOnto ) + { SlaveVP *newSlv; + VthdSemData *semData; + + //This is running in master, so use internal version + newSlv = VMS_WL__create_slaveVP( fnPtr, initData ); + + semData = VMS_WL__malloc( sizeof(VthdSemData) ); + semData->highestTransEntered = -1; + semData->lastTransEntered = NULL; + + newSlv->semanticData = semData; + + //=================== Assign new processor to a core ===================== + #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE + newSlv->coreAnimatedBy = 0; + + #else + + if(coreToScheduleOnto < 0 || coreToScheduleOnto >= NUM_CORES ) + { //out-of-range, so round-robin assignment + newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv; + + if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 ) + semEnv->nextCoreToGetNewSlv = 0; + else + semEnv->nextCoreToGetNewSlv += 1; + } + else //core num in-range, so use it + { newSlv->coreAnimatedBy = coreToScheduleOnto; + } + #endif + //======================================================================== + + return newSlv; + } + + +/* + */ +inline SlaveVP * +Vthread__create_thread( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *creatingSlv ) + { VthdSemReq reqData; + + //the semantic request data is on the stack and disappears when this + // call returns -- it's guaranteed to remain in the Slv's stack for as + // long as the Slv is suspended. + reqData.reqType = 0; //know the type because is a VMS create req + reqData.coreToScheduleOnto = -1; //means round-robin schedule + reqData.fnPtr = fnPtr; + reqData.initData = initData; + reqData.requestingSlv = creatingSlv; + + VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv ); + + return creatingSlv->dataRetFromReq; + } + + +inline SlaveVP * +Vthread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *creatingSlv, int32 coreToScheduleOnto ) + { VthdSemReq reqData; + + //the semantic request data is on the stack and disappears when this + // call returns -- it's guaranteed to remain in the Slv's stack for as + // long as the Slv is suspended. + reqData.reqType = 0; //know type because in a VMS create req + reqData.coreToScheduleOnto = coreToScheduleOnto; + reqData.fnPtr = fnPtr; + reqData.initData = initData; + reqData.requestingSlv = creatingSlv; + + VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv ); + } + +inline void +Vthread__dissipate_thread( SlaveVP *procrToDissipate ) + { + VMS_WL__send_dissipate_req( procrToDissipate ); + } + + +//=========================================================================== + +void * +Vthread__malloc( size_t sizeToMalloc, SlaveVP *animSlv ) + { VthdSemReq reqData; + + reqData.reqType = malloc_req; + reqData.sizeToMalloc = sizeToMalloc; + reqData.requestingSlv = animSlv; + + VMS_WL__send_sem_request( &reqData, animSlv ); + + return animSlv->dataRetFromReq; + } + + +/*Sends request to Master, which does the work of freeing + */ +void +Vthread__free( void *ptrToFree, SlaveVP *animSlv ) + { VthdSemReq reqData; + + reqData.reqType = free_req; + reqData.ptrToFree = ptrToFree; + reqData.requestingSlv = animSlv; + + VMS_WL__send_sem_request( &reqData, animSlv ); + } + + +//=========================================================================== + +inline void +Vthread__set_globals_to( void *globals ) + { + ((VthdSemEnv *) + (_VMSMasterEnv->semanticEnv))->applicationGlobals = globals; + } + +inline void * +Vthread__give_globals() + { + return((VthdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals; + } + + +//=========================================================================== + +inline int32 +Vthread__make_mutex( SlaveVP *animSlv ) + { VthdSemReq reqData; + + reqData.reqType = make_mutex; + reqData.requestingSlv = animSlv; + + VMS_WL__send_sem_request( &reqData, animSlv ); + + return (int32)animSlv->dataRetFromReq; //mutexid is 32bit wide + } + +inline void +Vthread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringSlv ) + { VthdSemReq reqData; + + reqData.reqType = mutex_lock; + reqData.mutexIdx = mutexIdx; + reqData.requestingSlv = acquiringSlv; + + VMS_WL__send_sem_request( &reqData, acquiringSlv ); + } + +inline void +Vthread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingSlv ) + { VthdSemReq reqData; + + reqData.reqType = mutex_unlock; + reqData.mutexIdx = mutexIdx; + reqData.requestingSlv = releasingSlv; + + VMS_WL__send_sem_request( &reqData, releasingSlv ); + } + + +//======================= +inline int32 +Vthread__make_cond( int32 ownedMutexIdx, SlaveVP *animSlv) + { VthdSemReq reqData; + + reqData.reqType = make_cond; + reqData.mutexIdx = ownedMutexIdx; + reqData.requestingSlv = animSlv; + + VMS_WL__send_sem_request( &reqData, animSlv ); + + return (int32)animSlv->dataRetFromReq; //condIdx is 32 bit wide + } + +inline void +Vthread__cond_wait( int32 condIdx, SlaveVP *waitingSlv) + { VthdSemReq reqData; + + reqData.reqType = cond_wait; + reqData.condIdx = condIdx; + reqData.requestingSlv = waitingSlv; + + VMS_WL__send_sem_request( &reqData, waitingSlv ); + } + +inline void * +Vthread__cond_signal( int32 condIdx, SlaveVP *signallingSlv ) + { VthdSemReq reqData; + + reqData.reqType = cond_signal; + reqData.condIdx = condIdx; + reqData.requestingSlv = signallingSlv; + + VMS_WL__send_sem_request( &reqData, signallingSlv ); + } + + +//=========================================================================== +// +/*A function singleton is a function whose body executes exactly once, on a + * single core, no matter how many times the fuction is called and no + * matter how many cores or the timing of cores calling it. + * + *A data singleton is a ticket attached to data. That ticket can be used + * to get the data through the function exactly once, no matter how many + * times the data is given to the function, and no matter the timing of + * trying to get the data through from different cores. + */ + +/*Fn singleton uses ID as index into array of singleton structs held in the + * semantic environment. + */ +void +Vthread__start_fn_singleton( int32 singletonID, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + // + reqData.reqType = singleton_fn_start; + reqData.singletonID = singletonID; + + VMS_WL__send_sem_request( &reqData, animSlv ); + if( animSlv->dataRetFromReq != 0 ) //addr of matching end-singleton + { + VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); //not protected! + VMS_int__return_to_addr_in_ptd_to_loc( + &((semEnv->fnSingletons[singletonID]).savedRetAddr) ); + } + } + +/*Data singleton hands addr of loc holding a pointer to a singleton struct. + * The start_data_singleton makes the structure and puts its addr into the + * location. + */ +void +Vthread__start_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + if( singleton->savedRetAddr && singleton->hasFinished ) + goto JmpToEndSingleton; + + reqData.reqType = singleton_data_start; + reqData.singleton = singleton; + + VMS_WL__send_sem_request( &reqData, animSlv ); + if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr + { + JmpToEndSingleton: + VMS_int__return_to_addr_in_ptd_to_loc(&(singleton->savedRetAddr)); + } + //now, simply return + //will exit either from the start singleton call or the end-singleton call + } + +/*Uses ID as index into array of flags. If flag already set, resumes from + * end-label. Else, sets flag and resumes normally. + * + *Note, this call cannot be inlined because the instr addr at the label + * inside is shared by all invocations of a given singleton ID. + */ +void +Vthread__end_fn_singleton( int32 singletonID, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + //don't need this addr until after at least one singleton has reached + // this function + VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); + VMS_int__return_to_addr_in_ptd_to_loc( + &((semEnv->fnSingletons[singletonID]).savedRetAddr) ); + + reqData.reqType = singleton_fn_end; + reqData.singletonID = singletonID; + + VMS_WL__send_sem_request( &reqData, animSlv ); + } + +void +Vthread__end_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + //don't need this addr until after singleton struct has reached + // this function for first time + //do assembly that saves the return addr of this fn call into the + // data singleton -- that data-singleton can only be given to exactly + // one instance in the code of this function. However, can use this + // function in different places for different data-singletons. + + VMS_int__save_return_into_ptd_to_loc_then_do_ret(&(singleton->savedRetAddr)); + + reqData.reqType = singleton_data_end; + reqData.singleton = singleton; + + VMS_WL__send_sem_request( &reqData, animSlv ); + } + + +/*This executes the function in the masterVP, so it executes in isolation + * from any other copies -- only one copy of the function can ever execute + * at a time. + * + *It suspends to the master, and the request handler takes the function + * pointer out of the request and calls it, then resumes the Slv. + *Only very short functions should be called this way -- for longer-running + * isolation, use transaction-start and transaction-end, which run the code + * between as work-code. + */ +void +Vthread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, + void *data, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + // + reqData.reqType = atomic; + reqData.fnToExecInMaster = ptrToFnToExecInMaster; + reqData.dataForFn = data; + + VMS_WL__send_sem_request( &reqData, animSlv ); + } + + +/*This suspends to the master. + *First, it looks at the Slv's data, to see the highest transactionID that Slv + * already has entered. If the current ID is not larger, it throws an + * exception stating a bug in the code. Otherwise it puts the current ID + * there, and adds the ID to a linked list of IDs entered -- the list is + * used to check that exits are properly ordered. + *Next it is uses transactionID as index into an array of transaction + * structures. + *If the "Slv_currently_executing" field is non-null, then put requesting Slv + * into queue in the struct. (At some point a holder will request + * end-transaction, which will take this Slv from the queue and resume it.) + *If NULL, then write requesting into the field and resume. + */ +void +Vthread__start_transaction( int32 transactionID, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + // + reqData.reqType = trans_start; + reqData.transID = transactionID; + + VMS_WL__send_sem_request( &reqData, animSlv ); + } + +/*This suspends to the master, then uses transactionID as index into an + * array of transaction structures. + *It looks at Slv_currently_executing to be sure it's same as requesting Slv. + * If different, throws an exception, stating there's a bug in the code. + *Next it looks at the queue in the structure. + *If it's empty, it sets Slv_currently_executing field to NULL and resumes. + *If something in, gets it, sets Slv_currently_executing to that Slv, then + * resumes both. + */ +void +Vthread__end_transaction( int32 transactionID, SlaveVP *animSlv ) + { + VthdSemReq reqData; + + // + reqData.reqType = trans_end; + reqData.transID = transactionID; + + VMS_WL__send_sem_request( &reqData, animSlv ); + } +//=========================================================================== diff -r b3a881f25c5a -r b94dc57e4455 Vthread.h --- a/Vthread.h Sun Mar 04 14:29:42 2012 -0800 +++ b/Vthread.h Wed May 09 13:24:19 2012 -0700 @@ -23,7 +23,7 @@ //=========================================================================== //turn on the counter measurements of language overhead -- comment to turn off #define MEAS__TURN_ON_LANG_MEAS -#include "Vthread_Overhead_Meas.h" +#include "Vthread__Measurement.h" #define INIT_NUM_MUTEX 10000 #define INIT_NUM_COND 10000 @@ -159,12 +159,27 @@ } VthdSemEnv; +//========================================================================== + +void +Vthread__init(); + +void +Vthread__init_Seq(); + +void +Vthread__init_Helper(); + //=========================================================================== inline void Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fn, void *initData ); +inline SlaveVP * +Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData, + VthdSemEnv *semEnv, int32 coreToScheduleOnto ); + //======================= inline SlaveVP * diff -r b3a881f25c5a -r b94dc57e4455 Vthread_Measurement.h --- a/Vthread_Measurement.h Sun Mar 04 14:29:42 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,108 +0,0 @@ -/* - * - * - * Created on June 10, 2011, 12:20 PM - */ - -#ifndef VTHREAD_MEAS_H -#define VTHREAD_MEAS_H - -#ifdef MEAS__TURN_ON_LANG_MEAS - - #ifdef MEAS__Make_Meas_Hists_for_Language - #undef MEAS__Make_Meas_Hists_for_Language - #endif - -//=================== Language-specific Measurement Stuff =================== -// -// - #define createHistIdx 1 //note: starts at 1 - #define mutexLockHistIdx 2 - #define mutexUnlockHistIdx 3 - #define condWaitHistIdx 4 - #define condSignalHistIdx 5 - - #define MEAS__Make_Meas_Hists_for_Language() \ - _VMSMasterEnv->measHistsInfo = \ - makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->measHists), 200); \ - makeAMeasHist( createHistIdx, "create", 250, 0, 100 ) \ - makeAMeasHist( mutexLockHistIdx, "mutex_lock", 50, 0, 100 ) \ - makeAMeasHist( mutexUnlockHistIdx, "mutex_unlock", 50, 0, 100 ) \ - makeAMeasHist( condWaitHistIdx, "cond_wait", 50, 0, 100 ) \ - makeAMeasHist( condSignalHistIdx, "cond_signal", 50, 0, 100 ) - - - #define Meas_startCreate \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endCreate \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ createHistIdx ] ); - - #define Meas_startMutexLock \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endMutexLock \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ mutexLockHistIdx ] ); - - #define Meas_startMutexUnlock \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endMutexUnlock \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ mutexUnlockHistIdx ] ); - - #define Meas_startCondWait \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endCondWait \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ condWaitHistIdx ] ); - - #define Meas_startCondSignal \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endCondSignal \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ condSignalHistIdx ] ); - -#else //===================== turned off ========================== - - #define MEAS__Make_Meas_Hists_for_Language() - - #define Meas_startCreate - - #define Meas_endCreate - - #define Meas_startMutexLock - - #define Meas_endMutexLock - - #define Meas_startMutexUnlock - - #define Meas_endMutexUnlock - - #define Meas_startCondWait - - #define Meas_endCondWait - - #define Meas_startCondSignal - - #define Meas_endCondSignal - -#endif /* MEAS__TURN_ON_LANG_MEAS */ - - -#endif /* VTHREAD_MEAS_H */ - diff -r b3a881f25c5a -r b94dc57e4455 Vthread_PluginFns.c --- a/Vthread_PluginFns.c Sun Mar 04 14:29:42 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,186 +0,0 @@ -/* - * Copyright 2010 OpenSourceCodeStewardshipFoundation - * - * Licensed under BSD - */ - -#include -#include -#include - -#include "C_Libraries/Queue_impl/PrivateQueue.h" -#include "Vthread.h" -#include "Vthread_Request_Handlers.h" -#include "Vthread_helper.h" - -//=========================== Local Fn Prototypes =========================== - -void inline -handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ); - -inline void -handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv ); - -inline void -handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ); - - -//============================== Scheduler ================================== -// -/*For Vthread, scheduling a slave simply takes the next work-unit off the - * ready-to-go work-unit queue and assigns it to the slaveToSched. - *If the ready-to-go work-unit queue is empty, then nothing to schedule - * to the slave -- return FALSE to let Master loop know scheduling that - * slave failed. - */ -char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram -SlaveVP * -Vthread__schedule_slaveVP( void *_semEnv, int coreNum ) - { SlaveVP *schedSlv; - VthdSemEnv *semEnv; - - semEnv = (VthdSemEnv *)_semEnv; - - schedSlv = readPrivQ( semEnv->readySlvQs[coreNum] ); - //Note, using a non-blocking queue -- it returns NULL if queue empty - - return( schedSlv ); - } - - - -//=========================== Request Handler ============================= -// -/*Will get requests to send, to receive, and to create new processors. - * Upon send, check the hash to see if a receive is waiting. - * Upon receive, check hash to see if a send has already happened. - * When other is not there, put in. When other is there, the comm. - * completes, which means the receiver P gets scheduled and - * picks up right after the receive request. So make the work-unit - * and put it into the queue of work-units ready to go. - * Other request is create a new Processor, with the function to run in the - * Processor, and initial data. - */ -void -Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv ) - { VthdSemEnv *semEnv; - VMSReqst *req; - - semEnv = (VthdSemEnv *)_semEnv; - - req = VMS_PI__take_next_request_out_of( requestingSlv ); - - while( req != NULL ) - { - switch( req->reqType ) - { case semantic: handleSemReq( req, requestingSlv, semEnv); - break; - case createReq: handleCreate( req, requestingSlv, semEnv); - break; - case dissipate: handleDissipate( requestingSlv, semEnv); - break; - case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv, - (ResumeSlvFnPtr)&resume_slaveVP); - break; - default: - break; - } - - req = VMS_PI__take_next_request_out_of( requestingSlv ); - } //while( req != NULL ) - } - - -void inline -handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VthdSemEnv *semEnv ) - { VthdSemReq *semReq; - - semReq = VMS_PI__take_sem_reqst_from(req); - if( semReq == NULL ) return; - switch( semReq->reqType ) - { - case make_mutex: handleMakeMutex( semReq, semEnv); - break; - case mutex_lock: handleMutexLock( semReq, semEnv); - break; - case mutex_unlock: handleMutexUnlock(semReq, semEnv); - break; - case make_cond: handleMakeCond( semReq, semEnv); - break; - case cond_wait: handleCondWait( semReq, semEnv); - break; - case cond_signal: handleCondSignal( semReq, semEnv); - break; - case malloc_req: handleMalloc( semReq, reqSlv, semEnv); - break; - case free_req: handleFree( semReq, reqSlv, semEnv); - break; - case singleton_fn_start: handleStartFnSingleton(semReq, reqSlv, semEnv); - break; - case singleton_fn_end: handleEndFnSingleton( semReq, reqSlv, semEnv); - break; - case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv); - break; - case singleton_data_end: handleEndDataSingleton(semReq, reqSlv, semEnv); - break; - case atomic: handleAtomic( semReq, reqSlv, semEnv); - break; - case trans_start: handleTransStart( semReq, reqSlv, semEnv); - break; - case trans_end: handleTransEnd( semReq, reqSlv, semEnv); - break; - } - } - -//=========================== VMS Request Handlers =========================== -// -inline void -handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv ) - { - //free any semantic data allocated to the virt procr - VMS_PI__free( requestingSlv->semanticData ); - - //Now, call VMS to free_all AppSlv state -- stack and so on - VMS_PI__dissipate_slaveVP( requestingSlv ); - } - -inline void -handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ) - { VthdSemReq *semReq; - SlaveVP *newSlv; - - //========================= MEASUREMENT STUFF ====================== - Meas_startCreate - //================================================================== - - semReq = VMS_PI__take_sem_reqst_from( req ); - - newSlv = Vthread__create_slaveVP_helper( semReq->fnPtr, semReq->initData, - semEnv, semReq->coreToScheduleOnto); - - //For Vthread, caller needs ptr to created processor returned to it - requestingSlv->dataRetFromReq = newSlv; - - resume_slaveVP( newSlv, semEnv ); - resume_slaveVP( requestingSlv, semEnv ); - - //========================= MEASUREMENT STUFF ====================== - Meas_endCreate - #ifdef MEAS__TIME_PLUGIN - #ifdef MEAS__SUB_CREATE - subIntervalFromHist( startStamp, endStamp, - _VMSMasterEnv->reqHdlrHighTimeHist ); - #endif - #endif - //================================================================== - } - - -//=========================== Helper ============================== -void inline -resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv ) - { - writePrivQ( procr, semEnv->readySlvQs[ procr->coreAnimatedBy] ); - } - -//=========================================================================== \ No newline at end of file diff -r b3a881f25c5a -r b94dc57e4455 Vthread__Measurement.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Vthread__Measurement.h Wed May 09 13:24:19 2012 -0700 @@ -0,0 +1,98 @@ +/* + * + * + * Created on June 10, 2011, 12:20 PM + */ + +#ifndef VTHREAD_MEAS_H +#define VTHREAD_MEAS_H + +#ifdef MEAS__TURN_ON_LANG_MEAS + + #ifdef MEAS__Make_Meas_Hists_for_Language + #undef MEAS__Make_Meas_Hists_for_Language + #endif + +//=================== Language-specific Measurement Stuff =================== +// +// + #define createHistIdx 1 //note: starts at 1 + #define mutexLockHistIdx 2 + #define mutexUnlockHistIdx 3 + #define condWaitHistIdx 4 + #define condSignalHistIdx 5 + + #define MEAS__Make_Meas_Hists_for_Language \ + _VMSMasterEnv->measHistsInfo = \ + makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->measHists), 200); \ + makeAMeasHist( createHistIdx, "create", 250, 0, 100 ) \ + makeAMeasHist( mutexLockHistIdx, "mutex_lock", 50, 0, 100 ) \ + makeAMeasHist( mutexUnlockHistIdx, "mutex_unlock", 50, 0, 100 ) \ + makeAMeasHist( condWaitHistIdx, "cond_wait", 50, 0, 100 ) \ + makeAMeasHist( condSignalHistIdx, "cond_signal", 50, 0, 100 ) + + + #define Meas_startCreate \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endCreate \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ createHistIdx ] ); + + #define Meas_startMutexLock \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endMutexLock \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ mutexLockHistIdx ] ); + + #define Meas_startMutexUnlock \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endMutexUnlock \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ mutexUnlockHistIdx ] ); + + #define Meas_startCondWait \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endCondWait \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ condWaitHistIdx ] ); + + #define Meas_startCondSignal \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endCondSignal \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ condSignalHistIdx ] ); + +#else //===================== turned off ========================== + + #define MEAS__Make_Meas_Hists_for_Language + #define Meas_startCreate + #define Meas_endCreate + #define Meas_startMutexLock + #define Meas_endMutexLock + #define Meas_startMutexUnlock + #define Meas_endMutexUnlock + #define Meas_startCondWait + #define Meas_endCondWait + #define Meas_startCondSignal + #define Meas_endCondSignal + +#endif /* MEAS__TURN_ON_LANG_MEAS */ + + +#endif /* VTHREAD_MEAS_H */ + diff -r b3a881f25c5a -r b94dc57e4455 Vthread__PluginFns.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Vthread__PluginFns.c Wed May 09 13:24:19 2012 -0700 @@ -0,0 +1,186 @@ +/* + * Copyright 2010 OpenSourceCodeStewardshipFoundation + * + * Licensed under BSD + */ + +#include +#include +#include + +#include "C_Libraries/Queue_impl/PrivateQueue.h" +#include "Vthread.h" +#include "Vthread_Request_Handlers.h" +#include "Vthread_helper.h" + +//=========================== Local Fn Prototypes =========================== + +void inline +handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ); + +inline void +handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv ); + +inline void +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ); + + +//============================== Scheduler ================================== +// +/*For Vthread, scheduling a slave simply takes the next work-unit off the + * ready-to-go work-unit queue and assigns it to the slaveToSched. + *If the ready-to-go work-unit queue is empty, then nothing to schedule + * to the slave -- return FALSE to let Master loop know scheduling that + * slave failed. + */ +char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram +SlaveVP * +Vthread__schedule_slaveVP( void *_semEnv, int coreNum ) + { SlaveVP *schedSlv; + VthdSemEnv *semEnv; + + semEnv = (VthdSemEnv *)_semEnv; + + schedSlv = readPrivQ( semEnv->readySlvQs[coreNum] ); + //Note, using a non-blocking queue -- it returns NULL if queue empty + + return( schedSlv ); + } + + + +//=========================== Request Handler ============================= +// +/*Will get requests to send, to receive, and to create new processors. + * Upon send, check the hash to see if a receive is waiting. + * Upon receive, check hash to see if a send has already happened. + * When other is not there, put in. When other is there, the comm. + * completes, which means the receiver P gets scheduled and + * picks up right after the receive request. So make the work-unit + * and put it into the queue of work-units ready to go. + * Other request is create a new Processor, with the function to run in the + * Processor, and initial data. + */ +void +Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv ) + { VthdSemEnv *semEnv; + VMSReqst *req; + + semEnv = (VthdSemEnv *)_semEnv; + + req = VMS_PI__take_next_request_out_of( requestingSlv ); + + while( req != NULL ) + { + switch( req->reqType ) + { case semantic: handleSemReq( req, requestingSlv, semEnv); + break; + case createReq: handleCreate( req, requestingSlv, semEnv); + break; + case dissipate: handleDissipate( requestingSlv, semEnv); + break; + case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv, + (ResumeSlvFnPtr)&resume_slaveVP); + break; + default: + break; + } + + req = VMS_PI__take_next_request_out_of( requestingSlv ); + } //while( req != NULL ) + } + + +void inline +handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VthdSemEnv *semEnv ) + { VthdSemReq *semReq; + + semReq = VMS_PI__take_sem_reqst_from(req); + if( semReq == NULL ) return; + switch( semReq->reqType ) + { + case make_mutex: handleMakeMutex( semReq, semEnv); + break; + case mutex_lock: handleMutexLock( semReq, semEnv); + break; + case mutex_unlock: handleMutexUnlock(semReq, semEnv); + break; + case make_cond: handleMakeCond( semReq, semEnv); + break; + case cond_wait: handleCondWait( semReq, semEnv); + break; + case cond_signal: handleCondSignal( semReq, semEnv); + break; + case malloc_req: handleMalloc( semReq, reqSlv, semEnv); + break; + case free_req: handleFree( semReq, reqSlv, semEnv); + break; + case singleton_fn_start: handleStartFnSingleton(semReq, reqSlv, semEnv); + break; + case singleton_fn_end: handleEndFnSingleton( semReq, reqSlv, semEnv); + break; + case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv); + break; + case singleton_data_end: handleEndDataSingleton(semReq, reqSlv, semEnv); + break; + case atomic: handleAtomic( semReq, reqSlv, semEnv); + break; + case trans_start: handleTransStart( semReq, reqSlv, semEnv); + break; + case trans_end: handleTransEnd( semReq, reqSlv, semEnv); + break; + } + } + +//=========================== VMS Request Handlers =========================== +// +inline void +handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv ) + { + //free any semantic data allocated to the virt procr + VMS_PI__free( requestingSlv->semanticData ); + + //Now, call VMS to free_all AppSlv state -- stack and so on + VMS_PI__dissipate_slaveVP( requestingSlv ); + } + +inline void +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ) + { VthdSemReq *semReq; + SlaveVP *newSlv; + + //========================= MEASUREMENT STUFF ====================== + Meas_startCreate + //================================================================== + + semReq = VMS_PI__take_sem_reqst_from( req ); + + newSlv = Vthread__create_slaveVP_helper( semReq->fnPtr, semReq->initData, + semEnv, semReq->coreToScheduleOnto); + + //For Vthread, caller needs ptr to created processor returned to it + requestingSlv->dataRetFromReq = newSlv; + + resume_slaveVP( newSlv, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); + + //========================= MEASUREMENT STUFF ====================== + Meas_endCreate + #ifdef MEAS__TIME_PLUGIN + #ifdef MEAS__SUB_CREATE + subIntervalFromHist( startStamp, endStamp, + _VMSMasterEnv->reqHdlrHighTimeHist ); + #endif + #endif + //================================================================== + } + + +//=========================== Helper ============================== +void inline +resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv ) + { + writePrivQ( procr, semEnv->readySlvQs[ procr->coreAnimatedBy] ); + } + +//=========================================================================== \ No newline at end of file diff -r b3a881f25c5a -r b94dc57e4455 Vthread__startup_and_shutdown.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Vthread__startup_and_shutdown.c Wed May 09 13:24:19 2012 -0700 @@ -0,0 +1,228 @@ +/* + * Copyright 2010 OpenSourceCodeStewardshipFoundation + * + * Licensed under BSD + */ + +#include +#include + +#include "VMS_impl/VMS.h" +#include "Vthread.h" +#include "C_Libraries/Queue_impl/PrivateQueue.h" +#include "C_Libraries/Hash_impl/PrivateHash.h" + + + +//=========================================================================== + +//TODO: update these comments! +/*These are the library functions *called in the application* + * + *There's a pattern for the outside sequential code to interact with the + * VMS_HW code. + *The VMS_HW system is inside a boundary.. every Vthread system is in its + * own directory that contains the functions for each of the processor types. + * One of the processor types is the "seed" processor that starts the + * cascade of creating all the processors that do the work. + *So, in the directory is a file called "EntryPoint.c" that contains the + * function, named appropriately to the work performed, that the outside + * sequential code calls. This function follows a pattern: + *1) it calls Vthread__init() + *2) it creates the initial data for the seed processor, which is passed + * in to the function + *3) it creates the seed Vthread processor, with the data to start it with. + *4) it calls startVthreadThenWaitUntilWorkDone + *5) it gets the returnValue from the transfer struc and returns that + * from the function + * + *For now, a new Vthread system has to be created via Vthread__init every + * time an entry point function is called -- later, might add letting the + * Vthread system be created once, and let all the entry points just reuse + * it -- want to be as simple as possible now, and see by using what makes + * sense for later.. + */ + + + +//=========================================================================== + +/*This is the "border crossing" function -- the thing that crosses from the + * outside world, into the VMS_HW world. It initializes and starts up the + * VMS system, then creates one processor from the specified function and + * puts it into the readyQ. From that point, that one function is resp. + * for creating all the other processors, that then create others, and so + * forth. + *When all the processors, including the seed, have dissipated, then this + * function returns. The results will have been written by side-effect via + * pointers read from, or written into initData. + * + *NOTE: no Threads should exist in the outside program that might touch + * any of the data reachable from initData passed in to here + */ +void +Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fnPtr, void *initData ) + { VthdSemEnv *semEnv; + SlaveVP *seedSlv; + + #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE + Vthread__init_Seq(); //debug sequential exe + #else + Vthread__init(); //normal multi-thd + #endif + semEnv = _VMSMasterEnv->semanticEnv; + + //Vthread starts with one processor, which is put into initial environ, + // and which then calls create() to create more, thereby expanding work + seedSlv = Vthread__create_slaveVP_helper( fnPtr, initData, semEnv, -1 ); + + resume_slaveVP( seedSlv, semEnv ); + + #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE + VMS_SS__start_the_work_then_wait_until_done_Seq(); //debug sequential exe + #else + VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd + #endif + + Vthread__cleanup_after_shutdown(); + } + + +//=========================================================================== +// +/*Initializes all the data-structures for a Vthread system -- but doesn't + * start it running yet! + * + * + *This sets up the semantic layer over the VMS system + * + *First, calls VMS_Setup, then creates own environment, making it ready + * for creating the seed processor and then starting the work. + */ +void +Vthread__init() + { + MEAS__Make_Meas_Hists_for_Language; + + VMS_SS__init(); + //masterEnv, a global var, now is partially set up by init_VMS + + Vthread__init_Helper(); + } + +#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE +void +Vthread__init_Seq() + { + VMS_SS__init_Seq(); + flushRegisters(); + //masterEnv, a global var, now is partially set up by init_VMS + + Vthread__init_Helper(); + } +#endif + +void +Vthread__init_Helper() + { VthdSemEnv *semanticEnv; + PrivQueueStruc **readySlvQs; + int coreIdx, i; + + //Hook up the semantic layer's plug-ins to the Master virt procr + _VMSMasterEnv->requestHandler = &Vthread__Request_Handler; + _VMSMasterEnv->slaveAssigner = &Vthread__schedule_slaveVP; + + //create the semantic layer's environment (all its data) and add to + // the master environment + semanticEnv = VMS_WL__malloc( sizeof( VthdSemEnv ) ); + _VMSMasterEnv->semanticEnv = semanticEnv; + + //create the ready queue + readySlvQs = VMS_WL__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); + + for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) + { + readySlvQs[ coreIdx ] = makeVMSQ(); + } + + semanticEnv->readySlvQs = readySlvQs; + + semanticEnv->nextCoreToGetNewSlv = 0; + + semanticEnv->mutexDynArrayInfo = + makePrivDynArrayOfSize( (void*)&(semanticEnv->mutexDynArray), INIT_NUM_MUTEX ); + + semanticEnv->condDynArrayInfo = + makePrivDynArrayOfSize( (void*)&(semanticEnv->condDynArray), INIT_NUM_COND ); + + //TODO: bug -- turn these arrays into dyn arrays to eliminate limit + //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( ); + //semanticEnv->transactionStrucs = makeDynArrayInfo( ); + for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ ) + { + semanticEnv->fnSingletons[i].savedRetAddr = NULL; + semanticEnv->fnSingletons[i].hasBeenStarted = FALSE; + semanticEnv->fnSingletons[i].hasFinished = FALSE; + semanticEnv->fnSingletons[i].waitQ = makeVMSQ(); + semanticEnv->transactionStrucs[i].waitingSlvQ = makeVMSQ(); + } + } + + +/*Frees any memory allocated by Vthread__init() then calls VMS__shutdown + */ +void +Vthread__cleanup_after_shutdown() + { /*VthdSemEnv *semEnv; + int32 coreIdx, idx, highestIdx; + VthdMutex **mutexArray, *mutex; + VthdCond **condArray, *cond; */ + + /* It's all allocated inside VMS's big chunk -- that's about to be freed, so + * nothing to do here + semEnv = _VMSMasterEnv->semanticEnv; + +//TODO: double check that all sem env locations freed + + for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) + { + free( semEnv->readySlvQs[coreIdx]->startOfData ); + free( semEnv->readySlvQs[coreIdx] ); + } + + free( semEnv->readySlvQs ); + + + //==== Free mutexes and mutex array ==== + mutexArray = semEnv->mutexDynArray->array; + highestIdx = semEnv->mutexDynArray->highestIdxInArray; + for( idx=0; idx < highestIdx; idx++ ) + { mutex = mutexArray[ idx ]; + if( mutex == NULL ) continue; + free( mutex ); + } + free( mutexArray ); + free( semEnv->mutexDynArray ); + //====================================== + + + //==== Free conds and cond array ==== + condArray = semEnv->condDynArray->array; + highestIdx = semEnv->condDynArray->highestIdxInArray; + for( idx=0; idx < highestIdx; idx++ ) + { cond = condArray[ idx ]; + if( cond == NULL ) continue; + free( cond ); + } + free( condArray ); + free( semEnv->condDynArray ); + //=================================== + + + free( _VMSMasterEnv->semanticEnv ); + */ + VMS_SS__cleanup_at_end_of_shutdown(); + } + + +//=========================================================================== diff -r b3a881f25c5a -r b94dc57e4455 Vthread_helper.c --- a/Vthread_helper.c Sun Mar 04 14:29:42 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,46 +0,0 @@ - -#include - -#include "VMS_impl/VMS.h" -#include "Vthread.h" - -/*Re-use this in the entry-point fn - */ -inline SlaveVP * -Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData, - VthdSemEnv *semEnv, int32 coreToScheduleOnto ) - { SlaveVP *newSlv; - VthdSemData *semData; - - //This is running in master, so use internal version - newSlv = VMS_WL__create_slaveVP( fnPtr, initData ); - - semData = VMS_WL__malloc( sizeof(VthdSemData) ); - semData->highestTransEntered = -1; - semData->lastTransEntered = NULL; - - newSlv->semanticData = semData; - - //=================== Assign new processor to a core ===================== - #ifdef SEQUENTIAL - newSlv->coreAnimatedBy = 0; - - #else - - if(coreToScheduleOnto < 0 || coreToScheduleOnto >= NUM_CORES ) - { //out-of-range, so round-robin assignment - newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv; - - if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 ) - semEnv->nextCoreToGetNewSlv = 0; - else - semEnv->nextCoreToGetNewSlv += 1; - } - else //core num in-range, so use it - { newSlv->coreAnimatedBy = coreToScheduleOnto; - } - #endif - //======================================================================== - - return newSlv; - } \ No newline at end of file diff -r b3a881f25c5a -r b94dc57e4455 Vthread_helper.h --- a/Vthread_helper.h Sun Mar 04 14:29:42 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,19 +0,0 @@ -/* - * File: Vthread_helper.h - * Author: msach - * - * Created on June 10, 2011, 12:20 PM - */ - -#include "VMS_impl/VMS.h" -#include "Vthread.h" - -#ifndef VTHREAD_HELPER_H -#define VTHREAD_HELPER_H - -inline SlaveVP * -Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData, - VthdSemEnv *semEnv, int32 coreToScheduleOnto ); - -#endif /* VTHREAD_HELPER_H */ - diff -r b3a881f25c5a -r b94dc57e4455 Vthread_lib.c --- a/Vthread_lib.c Sun Mar 04 14:29:42 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,618 +0,0 @@ -/* - * Copyright 2010 OpenSourceCodeStewardshipFoundation - * - * Licensed under BSD - */ - -#include -#include - -#include "VMS_impl/VMS.h" -#include "Vthread.h" -#include "Vthread_helper.h" -#include "C_Libraries/Queue_impl/PrivateQueue.h" -#include "C_Libraries/Hash_impl/PrivateHash.h" - - -//========================================================================== - -void -Vthread__init(); - -void -Vthread__init_Seq(); - -void -Vthread__init_Helper(); - - -//=========================================================================== - - -/*These are the library functions *called in the application* - * - *There's a pattern for the outside sequential code to interact with the - * VMS_HW code. - *The VMS_HW system is inside a boundary.. every Vthread system is in its - * own directory that contains the functions for each of the processor types. - * One of the processor types is the "seed" processor that starts the - * cascade of creating all the processors that do the work. - *So, in the directory is a file called "EntryPoint.c" that contains the - * function, named appropriately to the work performed, that the outside - * sequential code calls. This function follows a pattern: - *1) it calls Vthread__init() - *2) it creates the initial data for the seed processor, which is passed - * in to the function - *3) it creates the seed Vthread processor, with the data to start it with. - *4) it calls startVthreadThenWaitUntilWorkDone - *5) it gets the returnValue from the transfer struc and returns that - * from the function - * - *For now, a new Vthread system has to be created via Vthread__init every - * time an entry point function is called -- later, might add letting the - * Vthread system be created once, and let all the entry points just reuse - * it -- want to be as simple as possible now, and see by using what makes - * sense for later.. - */ - - - -//=========================================================================== - -/*This is the "border crossing" function -- the thing that crosses from the - * outside world, into the VMS_HW world. It initializes and starts up the - * VMS system, then creates one processor from the specified function and - * puts it into the readyQ. From that point, that one function is resp. - * for creating all the other processors, that then create others, and so - * forth. - *When all the processors, including the seed, have dissipated, then this - * function returns. The results will have been written by side-effect via - * pointers read from, or written into initData. - * - *NOTE: no Threads should exist in the outside program that might touch - * any of the data reachable from initData passed in to here - */ -void -Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fnPtr, void *initData ) - { VthdSemEnv *semEnv; - SlaveVP *seedSlv; - - #ifdef SEQUENTIAL - Vthread__init_Seq(); //debug sequential exe - #else - Vthread__init(); //normal multi-thd - #endif - semEnv = _VMSMasterEnv->semanticEnv; - - //Vthread starts with one processor, which is put into initial environ, - // and which then calls create() to create more, thereby expanding work - seedSlv = Vthread__create_slaveVP_helper( fnPtr, initData, semEnv, -1 ); - - resume_slaveVP( seedSlv, semEnv ); - - #ifdef SEQUENTIAL - VMS_SS__start_the_work_then_wait_until_done_Seq(); //debug sequential exe - #else - VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd - #endif - - Vthread__cleanup_after_shutdown(); - } - - -inline int32 -Vthread__giveMinWorkUnitCycles( float32 percentOverhead ) - { - return MIN_WORK_UNIT_CYCLES; - } - -inline int32 -Vthread__giveIdealNumWorkUnits() - { - return NUM_SCHED_SLOTS * NUM_CORES; - } - -inline int32 -Vthread__give_number_of_cores_to_schedule_onto() - { - return NUM_CORES; - } - -/*For now, use TSC -- later, make these two macros with assembly that first - * saves jump point, and second jumps back several times to get reliable time - */ -inline void -Vthread__start_primitive() - { saveLowTimeStampCountInto( ((VthdSemEnv *)(_VMSMasterEnv->semanticEnv))-> - primitiveStartTime ); - } - -/*Just quick and dirty for now -- make reliable later - * will want this to jump back several times -- to be sure cache is warm - * because don't want comm time included in calc-time measurement -- and - * also to throw out any "weird" values due to OS interrupt or TSC rollover - */ -inline int32 -Vthread__end_primitive_and_give_cycles() - { int32 endTime, startTime; - //TODO: fix by repeating time-measurement - saveLowTimeStampCountInto( endTime ); - startTime=((VthdSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime; - return (endTime - startTime); - } - -//=========================================================================== -// -/*Initializes all the data-structures for a Vthread system -- but doesn't - * start it running yet! - * - * - *This sets up the semantic layer over the VMS system - * - *First, calls VMS_Setup, then creates own environment, making it ready - * for creating the seed processor and then starting the work. - */ -void -Vthread__init() - { - VMS_SS__init(); - //masterEnv, a global var, now is partially set up by init_VMS - - Vthread__init_Helper(); - } - -#ifdef SEQUENTIAL -void -Vthread__init_Seq() - { - VMS_SS__init_Seq(); - flushRegisters(); - //masterEnv, a global var, now is partially set up by init_VMS - - Vthread__init_Helper(); - } -#endif - -void -Vthread__init_Helper() - { VthdSemEnv *semanticEnv; - PrivQueueStruc **readySlvQs; - int coreIdx, i; - - //Hook up the semantic layer's plug-ins to the Master virt procr - _VMSMasterEnv->requestHandler = &Vthread__Request_Handler; - _VMSMasterEnv->slaveAssigner = &Vthread__schedule_slaveVP; - - //create the semantic layer's environment (all its data) and add to - // the master environment - semanticEnv = VMS_WL__malloc( sizeof( VthdSemEnv ) ); - _VMSMasterEnv->semanticEnv = semanticEnv; - - //create the ready queue - readySlvQs = VMS_WL__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); - - for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) - { - readySlvQs[ coreIdx ] = makeVMSQ(); - } - - semanticEnv->readySlvQs = readySlvQs; - - semanticEnv->nextCoreToGetNewSlv = 0; - - semanticEnv->mutexDynArrayInfo = - makePrivDynArrayOfSize( (void*)&(semanticEnv->mutexDynArray), INIT_NUM_MUTEX ); - - semanticEnv->condDynArrayInfo = - makePrivDynArrayOfSize( (void*)&(semanticEnv->condDynArray), INIT_NUM_COND ); - - //TODO: bug -- turn these arrays into dyn arrays to eliminate limit - //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( ); - //semanticEnv->transactionStrucs = makeDynArrayInfo( ); - for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ ) - { - semanticEnv->fnSingletons[i].savedRetAddr = NULL; - semanticEnv->fnSingletons[i].hasBeenStarted = FALSE; - semanticEnv->fnSingletons[i].hasFinished = FALSE; - semanticEnv->fnSingletons[i].waitQ = makeVMSQ(); - semanticEnv->transactionStrucs[i].waitingSlvQ = makeVMSQ(); - } - } - - -/*Frees any memory allocated by Vthread__init() then calls VMS__shutdown - */ -void -Vthread__cleanup_after_shutdown() - { /*VthdSemEnv *semEnv; - int32 coreIdx, idx, highestIdx; - VthdMutex **mutexArray, *mutex; - VthdCond **condArray, *cond; */ - - /* It's all allocated inside VMS's big chunk -- that's about to be freed, so - * nothing to do here - semEnv = _VMSMasterEnv->semanticEnv; - -//TODO: double check that all sem env locations freed - - for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) - { - free( semEnv->readySlvQs[coreIdx]->startOfData ); - free( semEnv->readySlvQs[coreIdx] ); - } - - free( semEnv->readySlvQs ); - - - //==== Free mutexes and mutex array ==== - mutexArray = semEnv->mutexDynArray->array; - highestIdx = semEnv->mutexDynArray->highestIdxInArray; - for( idx=0; idx < highestIdx; idx++ ) - { mutex = mutexArray[ idx ]; - if( mutex == NULL ) continue; - free( mutex ); - } - free( mutexArray ); - free( semEnv->mutexDynArray ); - //====================================== - - - //==== Free conds and cond array ==== - condArray = semEnv->condDynArray->array; - highestIdx = semEnv->condDynArray->highestIdxInArray; - for( idx=0; idx < highestIdx; idx++ ) - { cond = condArray[ idx ]; - if( cond == NULL ) continue; - free( cond ); - } - free( condArray ); - free( semEnv->condDynArray ); - //=================================== - - - free( _VMSMasterEnv->semanticEnv ); - */ - VMS_SS__cleanup_at_end_of_shutdown(); - } - - -//=========================================================================== - -/* - */ -inline SlaveVP * -Vthread__create_thread( TopLevelFnPtr fnPtr, void *initData, - SlaveVP *creatingSlv ) - { VthdSemReq reqData; - - //the semantic request data is on the stack and disappears when this - // call returns -- it's guaranteed to remain in the Slv's stack for as - // long as the Slv is suspended. - reqData.reqType = 0; //know the type because is a VMS create req - reqData.coreToScheduleOnto = -1; //means round-robin schedule - reqData.fnPtr = fnPtr; - reqData.initData = initData; - reqData.requestingSlv = creatingSlv; - - VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv ); - - return creatingSlv->dataRetFromReq; - } - -inline SlaveVP * -Vthread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData, - SlaveVP *creatingSlv, int32 coreToScheduleOnto ) - { VthdSemReq reqData; - - //the semantic request data is on the stack and disappears when this - // call returns -- it's guaranteed to remain in the Slv's stack for as - // long as the Slv is suspended. - reqData.reqType = 0; //know type because in a VMS create req - reqData.coreToScheduleOnto = coreToScheduleOnto; - reqData.fnPtr = fnPtr; - reqData.initData = initData; - reqData.requestingSlv = creatingSlv; - - VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv ); - } - -inline void -Vthread__dissipate_thread( SlaveVP *procrToDissipate ) - { - VMS_WL__send_dissipate_req( procrToDissipate ); - } - - -//=========================================================================== - -void * -Vthread__malloc( size_t sizeToMalloc, SlaveVP *animSlv ) - { VthdSemReq reqData; - - reqData.reqType = malloc_req; - reqData.sizeToMalloc = sizeToMalloc; - reqData.requestingSlv = animSlv; - - VMS_WL__send_sem_request( &reqData, animSlv ); - - return animSlv->dataRetFromReq; - } - - -/*Sends request to Master, which does the work of freeing - */ -void -Vthread__free( void *ptrToFree, SlaveVP *animSlv ) - { VthdSemReq reqData; - - reqData.reqType = free_req; - reqData.ptrToFree = ptrToFree; - reqData.requestingSlv = animSlv; - - VMS_WL__send_sem_request( &reqData, animSlv ); - } - - -//=========================================================================== - -inline void -Vthread__set_globals_to( void *globals ) - { - ((VthdSemEnv *) - (_VMSMasterEnv->semanticEnv))->applicationGlobals = globals; - } - -inline void * -Vthread__give_globals() - { - return((VthdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals; - } - - -//=========================================================================== - -inline int32 -Vthread__make_mutex( SlaveVP *animSlv ) - { VthdSemReq reqData; - - reqData.reqType = make_mutex; - reqData.requestingSlv = animSlv; - - VMS_WL__send_sem_request( &reqData, animSlv ); - - return (int32)animSlv->dataRetFromReq; //mutexid is 32bit wide - } - -inline void -Vthread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringSlv ) - { VthdSemReq reqData; - - reqData.reqType = mutex_lock; - reqData.mutexIdx = mutexIdx; - reqData.requestingSlv = acquiringSlv; - - VMS_WL__send_sem_request( &reqData, acquiringSlv ); - } - -inline void -Vthread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingSlv ) - { VthdSemReq reqData; - - reqData.reqType = mutex_unlock; - reqData.mutexIdx = mutexIdx; - reqData.requestingSlv = releasingSlv; - - VMS_WL__send_sem_request( &reqData, releasingSlv ); - } - - -//======================= -inline int32 -Vthread__make_cond( int32 ownedMutexIdx, SlaveVP *animSlv) - { VthdSemReq reqData; - - reqData.reqType = make_cond; - reqData.mutexIdx = ownedMutexIdx; - reqData.requestingSlv = animSlv; - - VMS_WL__send_sem_request( &reqData, animSlv ); - - return (int32)animSlv->dataRetFromReq; //condIdx is 32 bit wide - } - -inline void -Vthread__cond_wait( int32 condIdx, SlaveVP *waitingSlv) - { VthdSemReq reqData; - - reqData.reqType = cond_wait; - reqData.condIdx = condIdx; - reqData.requestingSlv = waitingSlv; - - VMS_WL__send_sem_request( &reqData, waitingSlv ); - } - -inline void * -Vthread__cond_signal( int32 condIdx, SlaveVP *signallingSlv ) - { VthdSemReq reqData; - - reqData.reqType = cond_signal; - reqData.condIdx = condIdx; - reqData.requestingSlv = signallingSlv; - - VMS_WL__send_sem_request( &reqData, signallingSlv ); - } - - -//=========================================================================== -// -/*A function singleton is a function whose body executes exactly once, on a - * single core, no matter how many times the fuction is called and no - * matter how many cores or the timing of cores calling it. - * - *A data singleton is a ticket attached to data. That ticket can be used - * to get the data through the function exactly once, no matter how many - * times the data is given to the function, and no matter the timing of - * trying to get the data through from different cores. - */ - -/*Fn singleton uses ID as index into array of singleton structs held in the - * semantic environment. - */ -void -Vthread__start_fn_singleton( int32 singletonID, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - // - reqData.reqType = singleton_fn_start; - reqData.singletonID = singletonID; - - VMS_WL__send_sem_request( &reqData, animSlv ); - if( animSlv->dataRetFromReq != 0 ) //addr of matching end-singleton - { - VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); //not protected! - VMS_int__return_to_addr_in_ptd_to_loc( - &((semEnv->fnSingletons[singletonID]).savedRetAddr) ); - } - } - -/*Data singleton hands addr of loc holding a pointer to a singleton struct. - * The start_data_singleton makes the structure and puts its addr into the - * location. - */ -void -Vthread__start_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - if( singleton->savedRetAddr && singleton->hasFinished ) - goto JmpToEndSingleton; - - reqData.reqType = singleton_data_start; - reqData.singleton = singleton; - - VMS_WL__send_sem_request( &reqData, animSlv ); - if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr - { - JmpToEndSingleton: - VMS_int__return_to_addr_in_ptd_to_loc(&(singleton->savedRetAddr)); - } - //now, simply return - //will exit either from the start singleton call or the end-singleton call - } - -/*Uses ID as index into array of flags. If flag already set, resumes from - * end-label. Else, sets flag and resumes normally. - * - *Note, this call cannot be inlined because the instr addr at the label - * inside is shared by all invocations of a given singleton ID. - */ -void -Vthread__end_fn_singleton( int32 singletonID, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - //don't need this addr until after at least one singleton has reached - // this function - VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); - VMS_int__return_to_addr_in_ptd_to_loc( - &((semEnv->fnSingletons[singletonID]).savedRetAddr) ); - - reqData.reqType = singleton_fn_end; - reqData.singletonID = singletonID; - - VMS_WL__send_sem_request( &reqData, animSlv ); - } - -void -Vthread__end_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - //don't need this addr until after singleton struct has reached - // this function for first time - //do assembly that saves the return addr of this fn call into the - // data singleton -- that data-singleton can only be given to exactly - // one instance in the code of this function. However, can use this - // function in different places for different data-singletons. - - VMS_int__save_return_into_ptd_to_loc_then_do_ret(&(singleton->savedRetAddr)); - - reqData.reqType = singleton_data_end; - reqData.singleton = singleton; - - VMS_WL__send_sem_request( &reqData, animSlv ); - } - - -/*This executes the function in the masterVP, so it executes in isolation - * from any other copies -- only one copy of the function can ever execute - * at a time. - * - *It suspends to the master, and the request handler takes the function - * pointer out of the request and calls it, then resumes the Slv. - *Only very short functions should be called this way -- for longer-running - * isolation, use transaction-start and transaction-end, which run the code - * between as work-code. - */ -void -Vthread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, - void *data, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - // - reqData.reqType = atomic; - reqData.fnToExecInMaster = ptrToFnToExecInMaster; - reqData.dataForFn = data; - - VMS_WL__send_sem_request( &reqData, animSlv ); - } - - -/*This suspends to the master. - *First, it looks at the Slv's data, to see the highest transactionID that Slv - * already has entered. If the current ID is not larger, it throws an - * exception stating a bug in the code. Otherwise it puts the current ID - * there, and adds the ID to a linked list of IDs entered -- the list is - * used to check that exits are properly ordered. - *Next it is uses transactionID as index into an array of transaction - * structures. - *If the "Slv_currently_executing" field is non-null, then put requesting Slv - * into queue in the struct. (At some point a holder will request - * end-transaction, which will take this Slv from the queue and resume it.) - *If NULL, then write requesting into the field and resume. - */ -void -Vthread__start_transaction( int32 transactionID, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - // - reqData.reqType = trans_start; - reqData.transID = transactionID; - - VMS_WL__send_sem_request( &reqData, animSlv ); - } - -/*This suspends to the master, then uses transactionID as index into an - * array of transaction structures. - *It looks at Slv_currently_executing to be sure it's same as requesting Slv. - * If different, throws an exception, stating there's a bug in the code. - *Next it looks at the queue in the structure. - *If it's empty, it sets Slv_currently_executing field to NULL and resumes. - *If something in, gets it, sets Slv_currently_executing to that Slv, then - * resumes both. - */ -void -Vthread__end_transaction( int32 transactionID, SlaveVP *animSlv ) - { - VthdSemReq reqData; - - // - reqData.reqType = trans_end; - reqData.transID = transactionID; - - VMS_WL__send_sem_request( &reqData, animSlv ); - } -//===========================================================================