# HG changeset patch # User Some Random Person # Date 1330900182 28800 # Node ID b3a881f25c5a9afe23c64ea1244b54aafcdb5eb8 # Parent e5d4d5871ac97bd7e7ca58a9fbf1cf226f5c21e9 Nearly compiles with in-process common_ancestor vers of VMS diff -r e5d4d5871ac9 -r b3a881f25c5a DESIGN_NOTES__Vthread_lib.txt --- a/DESIGN_NOTES__Vthread_lib.txt Thu Mar 01 13:20:51 2012 -0800 +++ b/DESIGN_NOTES__Vthread_lib.txt Sun Mar 04 14:29:42 2012 -0800 @@ -1,44 +1,44 @@ -Implement VPThread this way: +Implement Vthread this way: We implemented a subset of PThreads functionality, called VMSPThd, that includes: mutex_lock, mutex_unlock, cond_wait, and cond_notify, which we name as VMSPThd__mutix_lock and so forth. \ All VMSPThd functions take a reference -to the AppVP that is animating the function call, in addition to any other +to the AppSlv that is animating the function call, in addition to any other parameters. A mutex variable is an integer, returned by VMSPThd__mutex_create(), which is used inside the request handler as a key to lookup an entry in a hash table, that lives in the SemanticEnv. \ Such an entry has a field holding a -reference to the AppVP that currently owns the lock, and a queue of AppVPs +reference to the AppSlv that currently owns the lock, and a queue of AppSlvs waiting to acquire the lock. \ Acquiring a lock is done with VMSPThd__mutex_lock(), which generates a -request. \ Recall that all request sends cause the suspention of the AppVP +request. \ Recall that all request sends cause the suspention of the AppSlv that is animating the library call that generates the request, in this case -the AppVP animating VMSPThd__mutex_lock() is suspended. \ The request -includes a reference to that animating AppVP, and the mutex integer value. +the AppSlv animating VMSPThd__mutex_lock() is suspended. \ The request +includes a reference to that animating AppSlv, and the mutex integer value. \ When the request reaches the request handler, the mutex integer is used as key to look up the hash entry, then if the owner field is null (or the same -as the AppVP in the request), the AppVP in the request is placed into the -owner field, and that AppVP is queued to be scheduled for re-animation. -\ However, if a different AppVP is listed in the owner field, then the AppVP +as the AppSlv in the request), the AppSlv in the request is placed into the +owner field, and that AppSlv is queued to be scheduled for re-animation. +\ However, if a different AppSlv is listed in the owner field, then the AppSlv in the request is added to the queue of those trying to acquire. \ Notice that this is a purely sequential algorithm that systematic reasoning can be used on. VMSPThd__mutex_unlock(), meanwhile, generates a request that causes the -request handler to queue for re-animation the AppVP that animated the call. -\ It also pops the queue of AppVPs waiting to acquire the lock, and writes -the AppVP that comes out as the current owner of the lock and queues that -AppVP for re-animation (unless the popped value is null, in which case the +request handler to queue for re-animation the AppSlv that animated the call. +\ It also pops the queue of AppSlvs waiting to acquire the lock, and writes +the AppSlv that comes out as the current owner of the lock and queues that +AppSlv for re-animation (unless the popped value is null, in which case the current owner is just set to null). Implementing condition variables takes a similar approach, in that VMSPThd__init_cond() returns an integer that is then used to look up an entry -in a hash table, where the entry contains a queue of AppVPs waiting on the +in a hash table, where the entry contains a queue of AppSlvs waiting on the condition variable. \ VMSPThd__cond_wait() generates a request that pushes -the AppVP into the queue, while VMSPThd__cond_signal() takes a wait request +the AppSlv into the queue, while VMSPThd__cond_signal() takes a wait request from the queue. Notice that this is again a purely sequential algorithm, and sidesteps issues @@ -59,7 +59,7 @@ debug, and is in a form that should be amenable to proof of freedom from race conditions, given a correct implementation of VMS. \ The hash-table based approach also makes it reasonably high performance, with (essentially) no -slowdown when the number of locks or number of AppVPs grows large. +slowdown when the number of locks or number of AppSlvs grows large. =========================== Behavior: diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread.h --- a/Vthread.h Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread.h Sun Mar 04 14:29:42 2012 -0800 @@ -6,21 +6,24 @@ * */ -#ifndef _VPThread_H -#define _VPThread_H +#ifndef _Vthread_H +#define _Vthread_H + +#define _LANG_NAME_ "Vthread" #include "VMS_impl/VMS.h" #include "C_Libraries/Queue_impl/PrivateQueue.h" #include "C_Libraries/DynArray/DynArray.h" -/*This header defines everything specific to the VPThread semantic plug-in +/*This header defines everything specific to the Vthread semantic plug-in */ //=========================================================================== //turn on the counter measurements of language overhead -- comment to turn off #define MEAS__TURN_ON_LANG_MEAS +#include "Vthread_Overhead_Meas.h" #define INIT_NUM_MUTEX 10000 #define INIT_NUM_COND 10000 @@ -29,7 +32,7 @@ //=========================================================================== //=========================================================================== -typedef struct _VPThreadSemReq VPThdSemReq; +typedef struct _VthreadSemReq VthdSemReq; typedef void (*PtrToAtomicFn ) ( void * ); //executed atomically in master //=========================================================================== @@ -38,17 +41,17 @@ */ typedef struct { - void *endInstrAddr; + void *savedRetAddr; int32 hasBeenStarted; int32 hasFinished; PrivQueueStruc *waitQ; } -VPThdSingleton; +VthdSingleton; /*Semantic layer-specific data sent inside a request from lib called in app * to request handler called in MasterLoop */ -enum VPThreadReqType +enum VthreadReqType { make_mutex = 1, mutex_lock, @@ -56,7 +59,7 @@ make_cond, cond_wait, cond_signal, - make_procr, + make_slaveVP, malloc_req, free_req, singleton_fn_start, @@ -68,9 +71,9 @@ trans_end }; -struct _VPThreadSemReq - { enum VPThreadReqType reqType; - SlaveVP *requestingVP; +struct _VthreadSemReq + { enum VthreadReqType reqType; + SlaveVP *requestingSlv; int32 mutexIdx; int32 condIdx; @@ -82,22 +85,22 @@ void *ptrToFree; int32 singletonID; - VPThdSingleton **singletonPtrAddr; + VthdSingleton *singleton; PtrToAtomicFn fnToExecInMaster; void *dataForFn; int32 transID; } -/* VPThreadSemReq */; +/* VthreadSemReq */; typedef struct { - SlaveVP *VPCurrentlyExecuting; - PrivQueueStruc *waitingVPQ; + SlaveVP *SlvCurrentlyExecuting; + PrivQueueStruc *waitingSlvQ; } -VPThdTrans; +VthdTrans; typedef struct @@ -106,16 +109,16 @@ SlaveVP *holderOfLock; PrivQueueStruc *waitingQueue; } -VPThdMutex; +VthdMutex; typedef struct { int32 condIdx; PrivQueueStruc *waitingQueue; - VPThdMutex *partnerMutex; + VthdMutex *partnerMutex; } -VPThdCond; +VthdCond; typedef struct _TransListElem TransListElem; struct _TransListElem @@ -130,130 +133,129 @@ int32 highestTransEntered; TransListElem *lastTransEntered; } -VPThdSemData; +VthdSemData; typedef struct { //Standard stuff will be in most every semantic env - PrivQueueStruc **readyVPQs; - int32 numVirtVP; - int32 nextCoreToGetNewVP; + PrivQueueStruc **readySlvQs; + int32 nextCoreToGetNewSlv; int32 primitiveStartTime; //Specific to this semantic layer - VPThdMutex **mutexDynArray; + VthdMutex **mutexDynArray; PrivDynArrayInfo *mutexDynArrayInfo; - VPThdCond **condDynArray; + VthdCond **condDynArray; PrivDynArrayInfo *condDynArrayInfo; void *applicationGlobals; //fix limit on num with dynArray - VPThdSingleton fnSingletons[NUM_STRUCS_IN_SEM_ENV]; + VthdSingleton fnSingletons[NUM_STRUCS_IN_SEM_ENV]; - VPThdTrans transactionStrucs[NUM_STRUCS_IN_SEM_ENV]; + VthdTrans transactionStrucs[NUM_STRUCS_IN_SEM_ENV]; } -VPThdSemEnv; +VthdSemEnv; //=========================================================================== inline void -VPThread__create_seed_procr_and_do_work( TopLevelFnPtr fn, void *initData ); +Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fn, void *initData ); //======================= inline SlaveVP * -VPThread__create_thread( TopLevelFnPtr fnPtr, void *initData, - SlaveVP *creatingVP ); +Vthread__create_thread( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *creatingSlv ); inline SlaveVP * -VPThread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData, - SlaveVP *creatingVP, int32 coreToScheduleOnto ); +Vthread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *creatingSlv, int32 coreToScheduleOnto ); inline void -VPThread__dissipate_thread( SlaveVP *procrToDissipate ); +Vthread__dissipate_thread( SlaveVP *procrToDissipate ); //======================= inline void -VPThread__set_globals_to( void *globals ); +Vthread__set_globals_to( void *globals ); inline void * -VPThread__give_globals(); +Vthread__give_globals(); //======================= inline int32 -VPThread__make_mutex( SlaveVP *animVP ); +Vthread__make_mutex( SlaveVP *animSlv ); inline void -VPThread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringVP ); +Vthread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringSlv ); inline void -VPThread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingVP ); +Vthread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingSlv ); //======================= inline int32 -VPThread__make_cond( int32 ownedMutexIdx, SlaveVP *animPr); +Vthread__make_cond( int32 ownedMutexIdx, SlaveVP *animSlv); inline void -VPThread__cond_wait( int32 condIdx, SlaveVP *waitingPr); +Vthread__cond_wait( int32 condIdx, SlaveVP *waitingSlv); inline void * -VPThread__cond_signal( int32 condIdx, SlaveVP *signallingVP ); +Vthread__cond_signal( int32 condIdx, SlaveVP *signallingSlv ); //======================= void -VPThread__start_fn_singleton( int32 singletonID, SlaveVP *animVP ); +Vthread__start_fn_singleton( int32 singletonID, SlaveVP *animSlv ); void -VPThread__end_fn_singleton( int32 singletonID, SlaveVP *animVP ); +Vthread__end_fn_singleton( int32 singletonID, SlaveVP *animSlv ); void -VPThread__start_data_singleton( VPThdSingleton **singeltonAddr, SlaveVP *animVP ); +Vthread__start_data_singleton( VthdSingleton *singelton, SlaveVP *animSlv ); void -VPThread__end_data_singleton( VPThdSingleton **singletonAddr, SlaveVP *animVP ); +Vthread__end_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ); void -VPThread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, - void *data, SlaveVP *animVP ); +Vthread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, + void *data, SlaveVP *animSlv ); void -VPThread__start_transaction( int32 transactionID, SlaveVP *animVP ); +Vthread__start_transaction( int32 transactionID, SlaveVP *animSlv ); void -VPThread__end_transaction( int32 transactionID, SlaveVP *animVP ); +Vthread__end_transaction( int32 transactionID, SlaveVP *animSlv ); //========================= Internal use only ============================= inline void -VPThread__Request_Handler( SlaveVP *requestingVP, void *_semEnv ); +Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv ); inline SlaveVP * -VPThread__schedule_virt_procr( void *_semEnv, int coreNum ); +Vthread__schedule_slaveVP( void *_semEnv, int coreNum ); //======================= inline void -VPThread__free_semantic_request( VPThdSemReq *semReq ); +Vthread__free_semantic_request( VthdSemReq *semReq ); //======================= void * -VPThread__malloc( size_t sizeToMalloc, SlaveVP *animVP ); +Vthread__malloc( size_t sizeToMalloc, SlaveVP *animSlv ); void -VPThread__init(); +Vthread__init(); void -VPThread__cleanup_after_shutdown(); +Vthread__cleanup_after_shutdown(); void inline -resume_procr( SlaveVP *procr, VPThdSemEnv *semEnv ); +resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv ); -#endif /* _VPThread_H */ +#endif /* _Vthread_H */ diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread.s --- a/Vthread.s Thu Mar 01 13:20:51 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,21 +0,0 @@ - -//Assembly code takes the return addr off the stack and saves -// into the singleton. The first field in the singleton is the -// "endInstrAddr" field, and the return addr is at 0x4(%ebp) -.globl asm_save_ret_to_singleton -asm_save_ret_to_singleton: - movq 0x8(%rbp), %rax #get ret address, ebp is the same as in the calling function - movq %rax, (%rdi) #write ret addr to endInstrAddr field - ret - - -//Assembly code changes the return addr on the stack to the one -// saved into the singleton by the end-singleton-fn -//The stack's return addr is at 0x4(%%ebp) -.globl asm_write_ret_from_singleton -asm_write_ret_from_singleton: - movq (%rdi), %rax #get endInstrAddr field - movq %rax, 0x8(%rbp) #write return addr to the stack of the caller - ret - - diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_Meas.h --- a/Vthread_Meas.h Thu Mar 01 13:20:51 2012 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,109 +0,0 @@ -/* - * File: VPThread_helper.h - * Author: msach - * - * Created on June 10, 2011, 12:20 PM - */ - -#ifndef VTHREAD_MEAS_H -#define VTHREAD_MEAS_H - -#ifdef MEAS__TURN_ON_LANG_MEAS - - #ifdef MEAS__Make_Meas_Hists_for_Language - #undef MEAS__Make_Meas_Hists_for_Language - #endif - -//=================== Language-specific Measurement Stuff =================== -// -// - #define createHistIdx 1 //note: starts at 1 - #define mutexLockHistIdx 2 - #define mutexUnlockHistIdx 3 - #define condWaitHistIdx 4 - #define condSignalHistIdx 5 - - #define MEAS__Make_Meas_Hists_for_Language() \ - _VMSMasterEnv->measHistsInfo = \ - makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->measHists), 200); \ - makeAMeasHist( createHistIdx, "create", 250, 0, 100 ) \ - makeAMeasHist( mutexLockHistIdx, "mutex_lock", 50, 0, 100 ) \ - makeAMeasHist( mutexUnlockHistIdx, "mutex_unlock", 50, 0, 100 ) \ - makeAMeasHist( condWaitHistIdx, "cond_wait", 50, 0, 100 ) \ - makeAMeasHist( condSignalHistIdx, "cond_signal", 50, 0, 100 ) - - - #define Meas_startCreate \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endCreate \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ createHistIdx ] ); - - #define Meas_startMutexLock \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endMutexLock \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ mutexLockHistIdx ] ); - - #define Meas_startMutexUnlock \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endMutexUnlock \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ mutexUnlockHistIdx ] ); - - #define Meas_startCondWait \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endCondWait \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ condWaitHistIdx ] ); - - #define Meas_startCondSignal \ - int32 startStamp, endStamp; \ - saveLowTimeStampCountInto( startStamp ); - - #define Meas_endCondSignal \ - saveLowTimeStampCountInto( endStamp ); \ - addIntervalToHist( startStamp, endStamp, \ - _VMSMasterEnv->measHists[ condSignalHistIdx ] ); - -#else //===================== turned off ========================== - - #define MEAS__Make_Meas_Hists_for_Language() - - #define Meas_startCreate - - #define Meas_endCreate - - #define Meas_startMutexLock - - #define Meas_endMutexLock - - #define Meas_startMutexUnlock - - #define Meas_endMutexUnlock - - #define Meas_startCondWait - - #define Meas_endCondWait - - #define Meas_startCondSignal - - #define Meas_endCondSignal - -#endif - - -#endif /* VTHREAD_MEAS_H */ - diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_Measurement.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Vthread_Measurement.h Sun Mar 04 14:29:42 2012 -0800 @@ -0,0 +1,108 @@ +/* + * + * + * Created on June 10, 2011, 12:20 PM + */ + +#ifndef VTHREAD_MEAS_H +#define VTHREAD_MEAS_H + +#ifdef MEAS__TURN_ON_LANG_MEAS + + #ifdef MEAS__Make_Meas_Hists_for_Language + #undef MEAS__Make_Meas_Hists_for_Language + #endif + +//=================== Language-specific Measurement Stuff =================== +// +// + #define createHistIdx 1 //note: starts at 1 + #define mutexLockHistIdx 2 + #define mutexUnlockHistIdx 3 + #define condWaitHistIdx 4 + #define condSignalHistIdx 5 + + #define MEAS__Make_Meas_Hists_for_Language() \ + _VMSMasterEnv->measHistsInfo = \ + makePrivDynArrayOfSize( (void***)&(_VMSMasterEnv->measHists), 200); \ + makeAMeasHist( createHistIdx, "create", 250, 0, 100 ) \ + makeAMeasHist( mutexLockHistIdx, "mutex_lock", 50, 0, 100 ) \ + makeAMeasHist( mutexUnlockHistIdx, "mutex_unlock", 50, 0, 100 ) \ + makeAMeasHist( condWaitHistIdx, "cond_wait", 50, 0, 100 ) \ + makeAMeasHist( condSignalHistIdx, "cond_signal", 50, 0, 100 ) + + + #define Meas_startCreate \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endCreate \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ createHistIdx ] ); + + #define Meas_startMutexLock \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endMutexLock \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ mutexLockHistIdx ] ); + + #define Meas_startMutexUnlock \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endMutexUnlock \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ mutexUnlockHistIdx ] ); + + #define Meas_startCondWait \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endCondWait \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ condWaitHistIdx ] ); + + #define Meas_startCondSignal \ + int32 startStamp, endStamp; \ + saveLowTimeStampCountInto( startStamp ); + + #define Meas_endCondSignal \ + saveLowTimeStampCountInto( endStamp ); \ + addIntervalToHist( startStamp, endStamp, \ + _VMSMasterEnv->measHists[ condSignalHistIdx ] ); + +#else //===================== turned off ========================== + + #define MEAS__Make_Meas_Hists_for_Language() + + #define Meas_startCreate + + #define Meas_endCreate + + #define Meas_startMutexLock + + #define Meas_endMutexLock + + #define Meas_startMutexUnlock + + #define Meas_endMutexUnlock + + #define Meas_startCondWait + + #define Meas_endCondWait + + #define Meas_startCondSignal + + #define Meas_endCondSignal + +#endif /* MEAS__TURN_ON_LANG_MEAS */ + + +#endif /* VTHREAD_MEAS_H */ + diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_PluginFns.c --- a/Vthread_PluginFns.c Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread_PluginFns.c Sun Mar 04 14:29:42 2012 -0800 @@ -8,26 +8,26 @@ #include #include -#include "VMS/Queue_impl/PrivateQueue.h" -#include "VPThread.h" -#include "VPThread_Request_Handlers.h" -#include "VPThread_helper.h" +#include "C_Libraries/Queue_impl/PrivateQueue.h" +#include "Vthread.h" +#include "Vthread_Request_Handlers.h" +#include "Vthread_helper.h" //=========================== Local Fn Prototypes =========================== void inline -handleSemReq( VMSReqst *req, SlaveVP *requestingVP, VPThdSemEnv *semEnv ); +handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ); inline void -handleDissipate( SlaveVP *requestingVP, VPThdSemEnv *semEnv ); +handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv ); inline void -handleCreate( VMSReqst *req, SlaveVP *requestingVP, VPThdSemEnv *semEnv ); +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ); //============================== Scheduler ================================== // -/*For VPThread, scheduling a slave simply takes the next work-unit off the +/*For Vthread, scheduling a slave simply takes the next work-unit off the * ready-to-go work-unit queue and assigns it to the slaveToSched. *If the ready-to-go work-unit queue is empty, then nothing to schedule * to the slave -- return FALSE to let Master loop know scheduling that @@ -35,16 +35,16 @@ */ char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram SlaveVP * -VPThread__schedule_virt_procr( void *_semEnv, int coreNum ) - { SlaveVP *schedVP; - VPThdSemEnv *semEnv; +Vthread__schedule_slaveVP( void *_semEnv, int coreNum ) + { SlaveVP *schedSlv; + VthdSemEnv *semEnv; - semEnv = (VPThdSemEnv *)_semEnv; + semEnv = (VthdSemEnv *)_semEnv; - schedVP = readPrivQ( semEnv->readyVPQs[coreNum] ); + schedSlv = readPrivQ( semEnv->readySlvQs[coreNum] ); //Note, using a non-blocking queue -- it returns NULL if queue empty - return( schedVP ); + return( schedSlv ); } @@ -62,40 +62,40 @@ * Processor, and initial data. */ void -VPThread__Request_Handler( SlaveVP *requestingVP, void *_semEnv ) - { VPThdSemEnv *semEnv; +Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv ) + { VthdSemEnv *semEnv; VMSReqst *req; - semEnv = (VPThdSemEnv *)_semEnv; + semEnv = (VthdSemEnv *)_semEnv; - req = VMS__take_next_request_out_of( requestingVP ); + req = VMS_PI__take_next_request_out_of( requestingSlv ); while( req != NULL ) { switch( req->reqType ) - { case semantic: handleSemReq( req, requestingVP, semEnv); + { case semantic: handleSemReq( req, requestingSlv, semEnv); break; - case createReq: handleCreate( req, requestingVP, semEnv); + case createReq: handleCreate( req, requestingSlv, semEnv); break; - case dissipate: handleDissipate( requestingVP, semEnv); + case dissipate: handleDissipate( requestingSlv, semEnv); break; - case VMSSemantic: VMS__handle_VMSSemReq(req, requestingVP, semEnv, - (ResumeVPFnPtr)&resume_procr); + case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv, + (ResumeSlvFnPtr)&resume_slaveVP); break; default: break; } - req = VMS__take_next_request_out_of( requestingVP ); + req = VMS_PI__take_next_request_out_of( requestingSlv ); } //while( req != NULL ) } void inline -handleSemReq( VMSReqst *req, SlaveVP *reqVP, VPThdSemEnv *semEnv ) - { VPThdSemReq *semReq; +handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VthdSemEnv *semEnv ) + { VthdSemReq *semReq; - semReq = VMS__take_sem_reqst_from(req); + semReq = VMS_PI__take_sem_reqst_from(req); if( semReq == NULL ) return; switch( semReq->reqType ) { @@ -111,23 +111,23 @@ break; case cond_signal: handleCondSignal( semReq, semEnv); break; - case malloc_req: handleMalloc( semReq, reqVP, semEnv); + case malloc_req: handleMalloc( semReq, reqSlv, semEnv); break; - case free_req: handleFree( semReq, reqVP, semEnv); + case free_req: handleFree( semReq, reqSlv, semEnv); break; - case singleton_fn_start: handleStartFnSingleton(semReq, reqVP, semEnv); + case singleton_fn_start: handleStartFnSingleton(semReq, reqSlv, semEnv); break; - case singleton_fn_end: handleEndFnSingleton( semReq, reqVP, semEnv); + case singleton_fn_end: handleEndFnSingleton( semReq, reqSlv, semEnv); break; - case singleton_data_start:handleStartDataSingleton(semReq,reqVP,semEnv); + case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv); break; - case singleton_data_end: handleEndDataSingleton(semReq, reqVP, semEnv); + case singleton_data_end: handleEndDataSingleton(semReq, reqSlv, semEnv); break; - case atomic: handleAtomic( semReq, reqVP, semEnv); + case atomic: handleAtomic( semReq, reqSlv, semEnv); break; - case trans_start: handleTransStart( semReq, reqVP, semEnv); + case trans_start: handleTransStart( semReq, reqSlv, semEnv); break; - case trans_end: handleTransEnd( semReq, reqVP, semEnv); + case trans_end: handleTransEnd( semReq, reqSlv, semEnv); break; } } @@ -135,40 +135,34 @@ //=========================== VMS Request Handlers =========================== // inline void -handleDissipate( SlaveVP *requestingVP, VPThdSemEnv *semEnv ) +handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv ) { //free any semantic data allocated to the virt procr - VMS__free( requestingVP->semanticData ); + VMS_PI__free( requestingSlv->semanticData ); - //Now, call VMS to free_all AppVP state -- stack and so on - VMS__dissipate_procr( requestingVP ); - - semEnv->numVP -= 1; - if( semEnv->numVP == 0 ) - { //no more work, so shutdown - VMS__shutdown(); - } + //Now, call VMS to free_all AppSlv state -- stack and so on + VMS_PI__dissipate_slaveVP( requestingSlv ); } inline void -handleCreate( VMSReqst *req, SlaveVP *requestingVP, VPThdSemEnv *semEnv ) - { VPThdSemReq *semReq; - SlaveVP *newVP; +handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv ) + { VthdSemReq *semReq; + SlaveVP *newSlv; //========================= MEASUREMENT STUFF ====================== Meas_startCreate //================================================================== - semReq = VMS__take_sem_reqst_from( req ); + semReq = VMS_PI__take_sem_reqst_from( req ); - newVP = VPThread__create_procr_helper( semReq->fnPtr, semReq->initData, + newSlv = Vthread__create_slaveVP_helper( semReq->fnPtr, semReq->initData, semEnv, semReq->coreToScheduleOnto); - //For VPThread, caller needs ptr to created processor returned to it - requestingVP->dataRetFromReq = newVP; + //For Vthread, caller needs ptr to created processor returned to it + requestingSlv->dataRetFromReq = newSlv; - resume_procr( newVP, semEnv ); - resume_procr( requestingVP, semEnv ); + resume_slaveVP( newSlv, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); //========================= MEASUREMENT STUFF ====================== Meas_endCreate @@ -184,9 +178,9 @@ //=========================== Helper ============================== void inline -resume_procr( SlaveVP *procr, VPThdSemEnv *semEnv ) +resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv ) { - writePrivQ( procr, semEnv->readyVPQs[ procr->coreAnimatedBy] ); + writePrivQ( procr, semEnv->readySlvQs[ procr->coreAnimatedBy] ); } //=========================================================================== \ No newline at end of file diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_Request_Handlers.c --- a/Vthread_Request_Handlers.c Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread_Request_Handlers.c Sun Mar 04 14:29:42 2012 -0800 @@ -8,7 +8,7 @@ #include #include -#include "VMS_Implementations/VMS_impl/VMS.h" +#include "VMS_impl/VMS.h" #include "C_Libraries/Queue_impl/PrivateQueue.h" #include "C_Libraries/Hash_impl/PrivateHash.h" #include "Vthread.h" @@ -19,13 +19,13 @@ /*The semantic request has a mutexIdx value, which acts as index into array. */ inline void -handleMakeMutex( VPThdSemReq *semReq, VPThdSemEnv *semEnv) - { VPThdMutex *newMutex; - SlaveVP *requestingVP; +handleMakeMutex( VthdSemReq *semReq, VthdSemEnv *semEnv) + { VthdMutex *newMutex; + SlaveVP *requestingSlv; - requestingVP = semReq->requestingVP; - newMutex = VMS__malloc( sizeof(VPThdMutex) ); - newMutex->waitingQueue = makeVMSPrivQ( requestingVP ); + requestingSlv = semReq->requestingSlv; + newMutex = VMS_PI__malloc( sizeof(VthdMutex) ); + newMutex->waitingQueue = makeVMSQ( requestingSlv ); newMutex->holderOfLock = NULL; //The mutex struc contains an int that identifies it -- use that as @@ -33,16 +33,16 @@ newMutex->mutexIdx = addToDynArray( newMutex, semEnv->mutexDynArrayInfo ); //Now communicate the mutex's identifying int back to requesting procr - semReq->requestingVP->dataRetFromReq = (void*)newMutex->mutexIdx; //mutexIdx is 32 bit + semReq->requestingSlv->dataRetFromReq = (void*)newMutex->mutexIdx; //mutexIdx is 32 bit //re-animate the requester - resume_procr( requestingVP, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); } inline void -handleMutexLock( VPThdSemReq *semReq, VPThdSemEnv *semEnv) - { VPThdMutex *mutex; +handleMutexLock( VthdSemReq *semReq, VthdSemEnv *semEnv) + { VthdMutex *mutex; //=================== Deterministic Replay ====================== #ifdef RECORD_DETERMINISTIC_REPLAY @@ -55,14 +55,14 @@ //see if mutex is free or not if( mutex->holderOfLock == NULL ) //none holding, give lock to requester { - mutex->holderOfLock = semReq->requestingVP; + mutex->holderOfLock = semReq->requestingSlv; //re-animate requester, now that it has the lock - resume_procr( semReq->requestingVP, semEnv ); + resume_slaveVP( semReq->requestingSlv, semEnv ); } else //queue up requester to wait for release of lock { - writePrivQ( semReq->requestingVP, mutex->waitingQueue ); + writeVMSQ( semReq->requestingSlv, mutex->waitingQueue ); } Meas_endMutexLock } @@ -70,24 +70,24 @@ /* */ inline void -handleMutexUnlock( VPThdSemReq *semReq, VPThdSemEnv *semEnv) - { VPThdMutex *mutex; +handleMutexUnlock( VthdSemReq *semReq, VthdSemEnv *semEnv) + { VthdMutex *mutex; Meas_startMutexUnlock //lookup mutex struc, using mutexIdx as index mutex = semEnv->mutexDynArray[ semReq->mutexIdx ]; //set new holder of mutex-lock to be next in queue (NULL if empty) - mutex->holderOfLock = readPrivQ( mutex->waitingQueue ); + mutex->holderOfLock = readVMSQ( mutex->waitingQueue ); //if have new non-NULL holder, re-animate it if( mutex->holderOfLock != NULL ) { - resume_procr( mutex->holderOfLock, semEnv ); + resume_slaveVP( mutex->holderOfLock, semEnv ); } //re-animate the releaser of the lock - resume_procr( semReq->requestingVP, semEnv ); + resume_slaveVP( semReq->requestingSlv, semEnv ); Meas_endMutexUnlock } @@ -104,25 +104,25 @@ * interacting with that cond var. So, make this pairing explicit. */ inline void -handleMakeCond( VPThdSemReq *semReq, VPThdSemEnv *semEnv) - { VPThdCond *newCond; - SlaveVP *requestingVP; +handleMakeCond( VthdSemReq *semReq, VthdSemEnv *semEnv) + { VthdCond *newCond; + SlaveVP *requestingSlv; - requestingVP = semReq->requestingVP; - newCond = VMS__malloc( sizeof(VPThdCond) ); + requestingSlv = semReq->requestingSlv; + newCond = VMS_PI__malloc( sizeof(VthdCond) ); newCond->partnerMutex = semEnv->mutexDynArray[ semReq->mutexIdx ]; - newCond->waitingQueue = makeVMSPrivQ(); + newCond->waitingQueue = makeVMSQ(); //The cond struc contains an int that identifies it -- use that as // its index within the array of conds. Add the new cond to array. newCond->condIdx = addToDynArray( newCond, semEnv->condDynArrayInfo ); //Now communicate the cond's identifying int back to requesting procr - semReq->requestingVP->dataRetFromReq = (void*)newCond->condIdx; //condIdx is 32 bit + semReq->requestingSlv->dataRetFromReq = (void*)newCond->condIdx; //condIdx is 32 bit //re-animate the requester - resume_procr( requestingVP, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); } @@ -131,24 +131,24 @@ * the designers of Posix standard ; ) */ inline void -handleCondWait( VPThdSemReq *semReq, VPThdSemEnv *semEnv) - { VPThdCond *cond; - VPThdMutex *mutex; +handleCondWait( VthdSemReq *semReq, VthdSemEnv *semEnv) + { VthdCond *cond; + VthdMutex *mutex; Meas_startCondWait //get cond struc out of array of them that's in the sem env cond = semEnv->condDynArray[ semReq->condIdx ]; //add requester to queue of wait-ers - writePrivQ( semReq->requestingVP, cond->waitingQueue ); + writeVMSQ( semReq->requestingSlv, cond->waitingQueue ); //unlock mutex -- can't reuse above handler 'cause not queuing releaser mutex = cond->partnerMutex; - mutex->holderOfLock = readPrivQ( mutex->waitingQueue ); + mutex->holderOfLock = readVMSQ( mutex->waitingQueue ); if( mutex->holderOfLock != NULL ) { - resume_procr( mutex->holderOfLock, semEnv ); + resume_slaveVP( mutex->holderOfLock, semEnv ); } Meas_endCondWait } @@ -158,25 +158,25 @@ * that gets the lock */ inline void -handleCondSignal( VPThdSemReq *semReq, VPThdSemEnv *semEnv) - { VPThdCond *cond; - VPThdMutex *mutex; - SlaveVP *waitingVP; +handleCondSignal( VthdSemReq *semReq, VthdSemEnv *semEnv) + { VthdCond *cond; + VthdMutex *mutex; + SlaveVP *waitingSlv; Meas_startCondSignal; //get cond struc out of array of them that's in the sem env cond = semEnv->condDynArray[ semReq->condIdx ]; //take next waiting procr out of queue - waitingVP = readPrivQ( cond->waitingQueue ); + waitingSlv = readVMSQ( cond->waitingQueue ); //transfer waiting procr to wait queue of mutex // mutex is guaranteed to be held by signalling procr, so no check mutex = cond->partnerMutex; - pushPrivQ( waitingVP, mutex->waitingQueue ); //is first out when read + writeVMSQ( waitingSlv, mutex->waitingQueue ); //is first out when read //re-animate the signalling procr - resume_procr( semReq->requestingVP, semEnv ); + resume_slaveVP( semReq->requestingSlv, semEnv ); Meas_endCondSignal; } @@ -187,7 +187,7 @@ /* */ void inline -handleMalloc(VPThdSemReq *semReq, SlaveVP *requestingVP,VPThdSemEnv *semEnv) +handleMalloc(VthdSemReq *semReq, SlaveVP *requestingSlv,VthdSemEnv *semEnv) { void *ptr; //========================= MEASUREMENT STUFF ====================== @@ -196,9 +196,9 @@ saveLowTimeStampCountInto( startStamp ); #endif //================================================================== - ptr = VMS__malloc( semReq->sizeToMalloc ); - requestingVP->dataRetFromReq = ptr; - resume_procr( requestingVP, semEnv ); + ptr = VMS_PI__malloc( semReq->sizeToMalloc ); + requestingSlv->dataRetFromReq = ptr; + resume_slaveVP( requestingSlv, semEnv ); //========================= MEASUREMENT STUFF ====================== #ifdef MEAS__TIME_PLUGIN saveLowTimeStampCountInto( endStamp ); @@ -211,7 +211,7 @@ /* */ void inline -handleFree( VPThdSemReq *semReq, SlaveVP *requestingVP, VPThdSemEnv *semEnv) +handleFree( VthdSemReq *semReq, SlaveVP *requestingSlv, VthdSemEnv *semEnv) { //========================= MEASUREMENT STUFF ====================== #ifdef MEAS__TIME_PLUGIN @@ -219,8 +219,8 @@ saveLowTimeStampCountInto( startStamp ); #endif //================================================================== - VMS__free( semReq->ptrToFree ); - resume_procr( requestingVP, semEnv ); + VMS_PI__free( semReq->ptrToFree ); + resume_slaveVP( requestingSlv, semEnv ); //========================= MEASUREMENT STUFF ====================== #ifdef MEAS__TIME_PLUGIN saveLowTimeStampCountInto( endStamp ); @@ -237,113 +237,113 @@ * end-label. Else, sets flag and resumes normally. */ void inline -handleStartSingleton_helper( VPThdSingleton *singleton, SlaveVP *reqstingVP, - VPThdSemEnv *semEnv ) +handleStartSingleton_helper( VthdSingleton *singleton, SlaveVP *reqstingSlv, + VthdSemEnv *semEnv ) { if( singleton->hasFinished ) { //the code that sets the flag to true first sets the end instr addr - reqstingVP->dataRetFromReq = singleton->endInstrAddr; - resume_procr( reqstingVP, semEnv ); + reqstingSlv->dataRetFromReq = singleton->savedRetAddr; + resume_slaveVP( reqstingSlv, semEnv ); return; } else if( singleton->hasBeenStarted ) { //singleton is in-progress in a diff slave, so wait for it to finish - writePrivQ(reqstingVP, singleton->waitQ ); + writeVMSQ(reqstingSlv, singleton->waitQ ); return; } else { //hasn't been started, so this is the first attempt at the singleton singleton->hasBeenStarted = TRUE; - reqstingVP->dataRetFromReq = 0x0; - resume_procr( reqstingVP, semEnv ); + reqstingSlv->dataRetFromReq = 0x0; + resume_slaveVP( reqstingSlv, semEnv ); return; } } void inline -handleStartFnSingleton( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ) - { VPThdSingleton *singleton; +handleStartFnSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ) + { VthdSingleton *singleton; singleton = &(semEnv->fnSingletons[ semReq->singletonID ]); - handleStartSingleton_helper( singleton, requestingVP, semEnv ); + handleStartSingleton_helper( singleton, requestingSlv, semEnv ); } void inline -handleStartDataSingleton( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ) - { VPThdSingleton *singleton; +handleStartDataSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ) + { VthdSingleton *singleton; - if( *(semReq->singletonPtrAddr) == NULL ) - { singleton = VMS__malloc( sizeof(VPThdSingleton) ); - singleton->waitQ = makeVMSPrivQ(); - singleton->endInstrAddr = 0x0; + if( semReq->singleton == NULL ) + { singleton = VMS_PI__malloc( sizeof(VthdSingleton) ); + singleton->waitQ = makeVMSQ(); + singleton->savedRetAddr = 0x0; singleton->hasBeenStarted = FALSE; singleton->hasFinished = FALSE; - *(semReq->singletonPtrAddr) = singleton; + semReq->singleton = singleton; } else - singleton = *(semReq->singletonPtrAddr); - handleStartSingleton_helper( singleton, requestingVP, semEnv ); + singleton = semReq->singleton; + handleStartSingleton_helper( singleton, requestingSlv, semEnv ); } void inline -handleEndSingleton_helper( VPThdSingleton *singleton, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ) - { PrivQueueStruc *waitQ; +handleEndSingleton_helper( VthdSingleton *singleton, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ) + { VMSQueueStruc *waitQ; int32 numWaiting, i; - SlaveVP *resumingVP; + SlaveVP *resumingSlv; if( singleton->hasFinished ) { //by definition, only one slave should ever be able to run end singleton // so if this is true, is an error - //VMS__throw_exception( "singleton code ran twice", requestingVP, NULL); + //VMS_PI__throw_exception( "singleton code ran twice", requestingSlv, NULL); } singleton->hasFinished = TRUE; waitQ = singleton->waitQ; - numWaiting = numInPrivQ( waitQ ); + numWaiting = numInVMSQ( waitQ ); for( i = 0; i < numWaiting; i++ ) { //they will resume inside start singleton, then jmp to end singleton - resumingVP = readPrivQ( waitQ ); - resumingVP->dataRetFromReq = singleton->endInstrAddr; - resume_procr( resumingVP, semEnv ); + resumingSlv = readVMSQ( waitQ ); + resumingSlv->dataRetFromReq = singleton->savedRetAddr; + resume_slaveVP( resumingSlv, semEnv ); } - resume_procr( requestingVP, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); } void inline -handleEndFnSingleton( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ) +handleEndFnSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ) { - VPThdSingleton *singleton; + VthdSingleton *singleton; singleton = &(semEnv->fnSingletons[ semReq->singletonID ]); - handleEndSingleton_helper( singleton, requestingVP, semEnv ); + handleEndSingleton_helper( singleton, requestingSlv, semEnv ); } void inline -handleEndDataSingleton( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ) +handleEndDataSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ) { - VPThdSingleton *singleton; + VthdSingleton *singleton; - singleton = *(semReq->singletonPtrAddr); - handleEndSingleton_helper( singleton, requestingVP, semEnv ); + singleton = semReq->singleton; + handleEndSingleton_helper( singleton, requestingSlv, semEnv ); } /*This executes the function in the masterVP, take the function - * pointer out of the request and call it, then resume the VP. + * pointer out of the request and call it, then resume the Slv. */ void inline -handleAtomic(VPThdSemReq *semReq, SlaveVP *requestingVP,VPThdSemEnv *semEnv) +handleAtomic(VthdSemReq *semReq, SlaveVP *requestingSlv,VthdSemEnv *semEnv) { semReq->fnToExecInMaster( semReq->dataForFn ); - resume_procr( requestingVP, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); } -/*First, it looks at the VP's semantic data, to see the highest transactionID - * that VP +/*First, it looks at the Slv's semantic data, to see the highest transactionID + * that Slv * already has entered. If the current ID is not larger, it throws an * exception stating a bug in the code. *Otherwise it puts the current ID @@ -351,22 +351,22 @@ * used to check that exits are properly ordered. *Next it is uses transactionID as index into an array of transaction * structures. - *If the "VP_currently_executing" field is non-null, then put requesting VP + *If the "Slv_currently_executing" field is non-null, then put requesting Slv * into queue in the struct. (At some point a holder will request - * end-transaction, which will take this VP from the queue and resume it.) + * end-transaction, which will take this Slv from the queue and resume it.) *If NULL, then write requesting into the field and resume. */ void inline -handleTransStart( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ) - { VPThdSemData *semData; +handleTransStart( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ) + { VthdSemData *semData; TransListElem *nextTransElem; //check ordering of entering transactions is correct - semData = requestingVP->semanticData; + semData = requestingSlv->semanticData; if( semData->highestTransEntered > semReq->transID ) { //throw VMS exception, which shuts down VMS. - VMS__throw_exception( "transID smaller than prev", requestingVP, NULL); + VMS_PI__throw_exception( "transID smaller than prev", requestingSlv, NULL); } //add this trans ID to the list of transactions entered -- check when // end a transaction @@ -377,68 +377,68 @@ semData->lastTransEntered = nextTransElem; //get the structure for this transaction ID - VPThdTrans * + VthdTrans * transStruc = &(semEnv->transactionStrucs[ semReq->transID ]); - if( transStruc->VPCurrentlyExecuting == NULL ) + if( transStruc->SlvCurrentlyExecuting == NULL ) { - transStruc->VPCurrentlyExecuting = requestingVP; - resume_procr( requestingVP, semEnv ); + transStruc->SlvCurrentlyExecuting = requestingSlv; + resume_slaveVP( requestingSlv, semEnv ); } else - { //note, might make future things cleaner if save request with VP and + { //note, might make future things cleaner if save request with Slv and // add this trans ID to the linked list when gets out of queue. // but don't need for now, and lazy.. - writePrivQ( requestingVP, transStruc->waitingVPQ ); + writeVMSQ( requestingSlv, transStruc->waitingSlvQ ); } } /*Use the trans ID to get the transaction structure from the array. - *Look at VP_currently_executing to be sure it's same as requesting VP. + *Look at Slv_currently_executing to be sure it's same as requesting Slv. * If different, throw an exception, stating there's a bug in the code. *Next, take the first element off the list of entered transactions. * Check to be sure the ending transaction is the same ID as the next on * the list. If not, incorrectly nested so throw an exception. * *Next, get from the queue in the structure. - *If it's empty, set VP_currently_executing field to NULL and resume - * requesting VP. - *If get somethine, set VP_currently_executing to the VP from the queue, then + *If it's empty, set Slv_currently_executing field to NULL and resume + * requesting Slv. + *If get somethine, set Slv_currently_executing to the Slv from the queue, then * resume both. */ void inline -handleTransEnd( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv) - { VPThdSemData *semData; - SlaveVP *waitingVP; - VPThdTrans *transStruc; +handleTransEnd( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv) + { VthdSemData *semData; + SlaveVP *waitingSlv; + VthdTrans *transStruc; TransListElem *lastTrans; transStruc = &(semEnv->transactionStrucs[ semReq->transID ]); - //make sure transaction ended in same VP as started it. - if( transStruc->VPCurrentlyExecuting != requestingVP ) + //make sure transaction ended in same Slv as started it. + if( transStruc->SlvCurrentlyExecuting != requestingSlv ) { - VMS__throw_exception( "trans ended in diff VP", requestingVP, NULL ); + VMS_PI__throw_exception( "trans ended in diff Slv", requestingSlv, NULL ); } //make sure nesting is correct -- last ID entered should == this ID - semData = requestingVP->semanticData; + semData = requestingSlv->semanticData; lastTrans = semData->lastTransEntered; if( lastTrans->transID != semReq->transID ) { - VMS__throw_exception( "trans incorrectly nested", requestingVP, NULL ); + VMS_PI__throw_exception( "trans incorrectly nested", requestingSlv, NULL ); } semData->lastTransEntered = semData->lastTransEntered->nextTrans; - waitingVP = readPrivQ( transStruc->waitingVPQ ); - transStruc->VPCurrentlyExecuting = waitingVP; + waitingSlv = readVMSQ( transStruc->waitingSlvQ ); + transStruc->SlvCurrentlyExecuting = waitingSlv; - if( waitingVP != NULL ) - resume_procr( waitingVP, semEnv ); + if( waitingSlv != NULL ) + resume_slaveVP( waitingSlv, semEnv ); - resume_procr( requestingVP, semEnv ); + resume_slaveVP( requestingSlv, semEnv ); } diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_Request_Handlers.h --- a/Vthread_Request_Handlers.h Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread_Request_Handlers.h Sun Mar 04 14:29:42 2012 -0800 @@ -6,52 +6,52 @@ * */ -#ifndef _VPThread_REQ_H -#define _VPThread_REQ_H +#ifndef _Vthread_REQ_H +#define _Vthread_REQ_H -#include "VPThread.h" +#include "Vthread.h" -/*This header defines everything specific to the VPThread semantic plug-in +/*This header defines everything specific to the Vthread semantic plug-in */ inline void -handleMakeMutex( VPThdSemReq *semReq, VPThdSemEnv *semEnv); +handleMakeMutex( VthdSemReq *semReq, VthdSemEnv *semEnv); inline void -handleMutexLock( VPThdSemReq *semReq, VPThdSemEnv *semEnv); +handleMutexLock( VthdSemReq *semReq, VthdSemEnv *semEnv); inline void -handleMutexUnlock(VPThdSemReq *semReq, VPThdSemEnv *semEnv); +handleMutexUnlock(VthdSemReq *semReq, VthdSemEnv *semEnv); inline void -handleMakeCond( VPThdSemReq *semReq, VPThdSemEnv *semEnv); +handleMakeCond( VthdSemReq *semReq, VthdSemEnv *semEnv); inline void -handleCondWait( VPThdSemReq *semReq, VPThdSemEnv *semEnv); +handleCondWait( VthdSemReq *semReq, VthdSemEnv *semEnv); inline void -handleCondSignal( VPThdSemReq *semReq, VPThdSemEnv *semEnv); +handleCondSignal( VthdSemReq *semReq, VthdSemEnv *semEnv); void inline -handleMalloc(VPThdSemReq *semReq, SlaveVP *requestingVP,VPThdSemEnv *semEnv); +handleMalloc(VthdSemReq *semReq, SlaveVP *requestingSlv,VthdSemEnv *semEnv); void inline -handleFree( VPThdSemReq *semReq, SlaveVP *requestingVP, VPThdSemEnv *semEnv); +handleFree( VthdSemReq *semReq, SlaveVP *requestingSlv, VthdSemEnv *semEnv); inline void -handleStartFnSingleton( VPThdSemReq *semReq, SlaveVP *reqstingVP, - VPThdSemEnv *semEnv ); +handleStartFnSingleton( VthdSemReq *semReq, SlaveVP *reqstingSlv, + VthdSemEnv *semEnv ); inline void -handleEndFnSingleton( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ); +handleEndFnSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ); inline void -handleStartDataSingleton( VPThdSemReq *semReq, SlaveVP *reqstingVP, - VPThdSemEnv *semEnv ); +handleStartDataSingleton( VthdSemReq *semReq, SlaveVP *reqstingSlv, + VthdSemEnv *semEnv ); inline void -handleEndDataSingleton( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ); +handleEndDataSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ); void inline -handleAtomic( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv); +handleAtomic( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv); void inline -handleTransStart( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv ); +handleTransStart( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv ); void inline -handleTransEnd( VPThdSemReq *semReq, SlaveVP *requestingVP, - VPThdSemEnv *semEnv); +handleTransEnd( VthdSemReq *semReq, SlaveVP *requestingSlv, + VthdSemEnv *semEnv); -#endif /* _VPThread_REQ_H */ +#endif /* _Vthread_REQ_H */ diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_helper.c --- a/Vthread_helper.c Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread_helper.c Sun Mar 04 14:29:42 2012 -0800 @@ -1,48 +1,46 @@ #include -#include "VMS/VMS.h" -#include "VPThread.h" +#include "VMS_impl/VMS.h" +#include "Vthread.h" /*Re-use this in the entry-point fn */ inline SlaveVP * -VPThread__create_procr_helper( TopLevelFnPtr fnPtr, void *initData, - VPThdSemEnv *semEnv, int32 coreToScheduleOnto ) - { SlaveVP *newVP; - VPThdSemData *semData; +Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData, + VthdSemEnv *semEnv, int32 coreToScheduleOnto ) + { SlaveVP *newSlv; + VthdSemData *semData; //This is running in master, so use internal version - newVP = VMS__create_procr( fnPtr, initData ); + newSlv = VMS_WL__create_slaveVP( fnPtr, initData ); - semEnv->numVP += 1; - - semData = VMS__malloc( sizeof(VPThdSemData) ); + semData = VMS_WL__malloc( sizeof(VthdSemData) ); semData->highestTransEntered = -1; semData->lastTransEntered = NULL; - newVP->semanticData = semData; + newSlv->semanticData = semData; //=================== Assign new processor to a core ===================== #ifdef SEQUENTIAL - newVP->coreAnimatedBy = 0; + newSlv->coreAnimatedBy = 0; #else if(coreToScheduleOnto < 0 || coreToScheduleOnto >= NUM_CORES ) { //out-of-range, so round-robin assignment - newVP->coreAnimatedBy = semEnv->nextCoreToGetNewVP; + newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv; - if( semEnv->nextCoreToGetNewVP >= NUM_CORES - 1 ) - semEnv->nextCoreToGetNewVP = 0; + if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 ) + semEnv->nextCoreToGetNewSlv = 0; else - semEnv->nextCoreToGetNewVP += 1; + semEnv->nextCoreToGetNewSlv += 1; } else //core num in-range, so use it - { newVP->coreAnimatedBy = coreToScheduleOnto; + { newSlv->coreAnimatedBy = coreToScheduleOnto; } #endif //======================================================================== - return newVP; + return newSlv; } \ No newline at end of file diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_helper.h --- a/Vthread_helper.h Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread_helper.h Sun Mar 04 14:29:42 2012 -0800 @@ -1,19 +1,19 @@ /* - * File: VPThread_helper.h + * File: Vthread_helper.h * Author: msach * * Created on June 10, 2011, 12:20 PM */ -#include "VMS/VMS.h" -#include "VPThread.h" +#include "VMS_impl/VMS.h" +#include "Vthread.h" -#ifndef VPTHREAD_HELPER_H -#define VPTHREAD_HELPER_H +#ifndef VTHREAD_HELPER_H +#define VTHREAD_HELPER_H inline SlaveVP * -VPThread__create_procr_helper( TopLevelFnPtr fnPtr, void *initData, - VPThdSemEnv *semEnv, int32 coreToScheduleOnto ); +Vthread__create_slaveVP_helper( TopLevelFnPtr fnPtr, void *initData, + VthdSemEnv *semEnv, int32 coreToScheduleOnto ); -#endif /* VPTHREAD_HELPER_H */ +#endif /* VTHREAD_HELPER_H */ diff -r e5d4d5871ac9 -r b3a881f25c5a Vthread_lib.c --- a/Vthread_lib.c Thu Mar 01 13:20:51 2012 -0800 +++ b/Vthread_lib.c Sun Mar 04 14:29:42 2012 -0800 @@ -6,25 +6,24 @@ #include #include -#include -#include "VMS/VMS.h" -#include "VPThread.h" -#include "VPThread_helper.h" -#include "VMS/Queue_impl/PrivateQueue.h" -#include "VMS/Hash_impl/PrivateHash.h" +#include "VMS_impl/VMS.h" +#include "Vthread.h" +#include "Vthread_helper.h" +#include "C_Libraries/Queue_impl/PrivateQueue.h" +#include "C_Libraries/Hash_impl/PrivateHash.h" //========================================================================== void -VPThread__init(); +Vthread__init(); void -VPThread__init_Seq(); +Vthread__init_Seq(); void -VPThread__init_Helper(); +Vthread__init_Helper(); //=========================================================================== @@ -34,24 +33,24 @@ * *There's a pattern for the outside sequential code to interact with the * VMS_HW code. - *The VMS_HW system is inside a boundary.. every VPThread system is in its + *The VMS_HW system is inside a boundary.. every Vthread system is in its * own directory that contains the functions for each of the processor types. * One of the processor types is the "seed" processor that starts the * cascade of creating all the processors that do the work. *So, in the directory is a file called "EntryPoint.c" that contains the * function, named appropriately to the work performed, that the outside * sequential code calls. This function follows a pattern: - *1) it calls VPThread__init() + *1) it calls Vthread__init() *2) it creates the initial data for the seed processor, which is passed * in to the function - *3) it creates the seed VPThread processor, with the data to start it with. - *4) it calls startVPThreadThenWaitUntilWorkDone + *3) it creates the seed Vthread processor, with the data to start it with. + *4) it calls startVthreadThenWaitUntilWorkDone *5) it gets the returnValue from the transfer struc and returns that * from the function * - *For now, a new VPThread system has to be created via VPThread__init every + *For now, a new Vthread system has to be created via Vthread__init every * time an entry point function is called -- later, might add letting the - * VPThread system be created once, and let all the entry points just reuse + * Vthread system be created once, and let all the entry points just reuse * it -- want to be as simple as possible now, and see by using what makes * sense for later.. */ @@ -74,47 +73,47 @@ * any of the data reachable from initData passed in to here */ void -VPThread__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData ) - { VPThdSemEnv *semEnv; - SlaveVP *seedVP; +Vthread__create_seed_slaveVP_and_do_work( TopLevelFnPtr fnPtr, void *initData ) + { VthdSemEnv *semEnv; + SlaveVP *seedSlv; #ifdef SEQUENTIAL - VPThread__init_Seq(); //debug sequential exe + Vthread__init_Seq(); //debug sequential exe #else - VPThread__init(); //normal multi-thd + Vthread__init(); //normal multi-thd #endif semEnv = _VMSMasterEnv->semanticEnv; - //VPThread starts with one processor, which is put into initial environ, + //Vthread starts with one processor, which is put into initial environ, // and which then calls create() to create more, thereby expanding work - seedVP = VPThread__create_procr_helper( fnPtr, initData, semEnv, -1 ); + seedSlv = Vthread__create_slaveVP_helper( fnPtr, initData, semEnv, -1 ); - resume_procr( seedVP, semEnv ); + resume_slaveVP( seedSlv, semEnv ); #ifdef SEQUENTIAL - VMS__start_the_work_then_wait_until_done_Seq(); //debug sequential exe + VMS_SS__start_the_work_then_wait_until_done_Seq(); //debug sequential exe #else - VMS__start_the_work_then_wait_until_done(); //normal multi-thd + VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd #endif - VPThread__cleanup_after_shutdown(); + Vthread__cleanup_after_shutdown(); } inline int32 -VPThread__giveMinWorkUnitCycles( float32 percentOverhead ) +Vthread__giveMinWorkUnitCycles( float32 percentOverhead ) { return MIN_WORK_UNIT_CYCLES; } inline int32 -VPThread__giveIdealNumWorkUnits() +Vthread__giveIdealNumWorkUnits() { return NUM_SCHED_SLOTS * NUM_CORES; } inline int32 -VPThread__give_number_of_cores_to_schedule_onto() +Vthread__give_number_of_cores_to_schedule_onto() { return NUM_CORES; } @@ -123,8 +122,8 @@ * saves jump point, and second jumps back several times to get reliable time */ inline void -VPThread__start_primitive() - { saveLowTimeStampCountInto( ((VPThdSemEnv *)(_VMSMasterEnv->semanticEnv))-> +Vthread__start_primitive() + { saveLowTimeStampCountInto( ((VthdSemEnv *)(_VMSMasterEnv->semanticEnv))-> primitiveStartTime ); } @@ -134,17 +133,17 @@ * also to throw out any "weird" values due to OS interrupt or TSC rollover */ inline int32 -VPThread__end_primitive_and_give_cycles() +Vthread__end_primitive_and_give_cycles() { int32 endTime, startTime; //TODO: fix by repeating time-measurement saveLowTimeStampCountInto( endTime ); - startTime=((VPThdSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime; + startTime=((VthdSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime; return (endTime - startTime); } //=========================================================================== // -/*Initializes all the data-structures for a VPThread system -- but doesn't +/*Initializes all the data-structures for a Vthread system -- but doesn't * start it running yet! * * @@ -154,56 +153,52 @@ * for creating the seed processor and then starting the work. */ void -VPThread__init() +Vthread__init() { - VMS__init(); + VMS_SS__init(); //masterEnv, a global var, now is partially set up by init_VMS - //Moved here from VMS.c because this is not parallel construct independent - MakeTheMeasHists(); - - VPThread__init_Helper(); + Vthread__init_Helper(); } #ifdef SEQUENTIAL void -VPThread__init_Seq() +Vthread__init_Seq() { - VMS__init_Seq(); + VMS_SS__init_Seq(); flushRegisters(); //masterEnv, a global var, now is partially set up by init_VMS - VPThread__init_Helper(); + Vthread__init_Helper(); } #endif void -VPThread__init_Helper() - { VPThdSemEnv *semanticEnv; - PrivQueueStruc **readyVPQs; +Vthread__init_Helper() + { VthdSemEnv *semanticEnv; + PrivQueueStruc **readySlvQs; int coreIdx, i; //Hook up the semantic layer's plug-ins to the Master virt procr - _VMSMasterEnv->requestHandler = &VPThread__Request_Handler; - _VMSMasterEnv->slaveScheduler = &VPThread__schedule_virt_procr; + _VMSMasterEnv->requestHandler = &Vthread__Request_Handler; + _VMSMasterEnv->slaveAssigner = &Vthread__schedule_slaveVP; //create the semantic layer's environment (all its data) and add to // the master environment - semanticEnv = VMS__malloc( sizeof( VPThdSemEnv ) ); + semanticEnv = VMS_WL__malloc( sizeof( VthdSemEnv ) ); _VMSMasterEnv->semanticEnv = semanticEnv; //create the ready queue - readyVPQs = VMS__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); + readySlvQs = VMS_WL__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) { - readyVPQs[ coreIdx ] = makeVMSPrivQ(); + readySlvQs[ coreIdx ] = makeVMSQ(); } - semanticEnv->readyVPQs = readyVPQs; + semanticEnv->readySlvQs = readySlvQs; - semanticEnv->numVP = 0; - semanticEnv->nextCoreToGetNewVP = 0; + semanticEnv->nextCoreToGetNewSlv = 0; semanticEnv->mutexDynArrayInfo = makePrivDynArrayOfSize( (void*)&(semanticEnv->mutexDynArray), INIT_NUM_MUTEX ); @@ -216,23 +211,23 @@ //semanticEnv->transactionStrucs = makeDynArrayInfo( ); for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ ) { - semanticEnv->fnSingletons[i].endInstrAddr = NULL; + semanticEnv->fnSingletons[i].savedRetAddr = NULL; semanticEnv->fnSingletons[i].hasBeenStarted = FALSE; semanticEnv->fnSingletons[i].hasFinished = FALSE; - semanticEnv->fnSingletons[i].waitQ = makeVMSPrivQ(); - semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSPrivQ(); + semanticEnv->fnSingletons[i].waitQ = makeVMSQ(); + semanticEnv->transactionStrucs[i].waitingSlvQ = makeVMSQ(); } } -/*Frees any memory allocated by VPThread__init() then calls VMS__shutdown +/*Frees any memory allocated by Vthread__init() then calls VMS__shutdown */ void -VPThread__cleanup_after_shutdown() - { /*VPThdSemEnv *semEnv; +Vthread__cleanup_after_shutdown() + { /*VthdSemEnv *semEnv; int32 coreIdx, idx, highestIdx; - VPThdMutex **mutexArray, *mutex; - VPThdCond **condArray, *cond; */ + VthdMutex **mutexArray, *mutex; + VthdCond **condArray, *cond; */ /* It's all allocated inside VMS's big chunk -- that's about to be freed, so * nothing to do here @@ -242,11 +237,11 @@ for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) { - free( semEnv->readyVPQs[coreIdx]->startOfData ); - free( semEnv->readyVPQs[coreIdx] ); + free( semEnv->readySlvQs[coreIdx]->startOfData ); + free( semEnv->readySlvQs[coreIdx] ); } - free( semEnv->readyVPQs ); + free( semEnv->readySlvQs ); //==== Free mutexes and mutex array ==== @@ -277,7 +272,7 @@ free( _VMSMasterEnv->semanticEnv ); */ - VMS__cleanup_at_end_of_shutdown(); + VMS_SS__cleanup_at_end_of_shutdown(); } @@ -286,165 +281,165 @@ /* */ inline SlaveVP * -VPThread__create_thread( TopLevelFnPtr fnPtr, void *initData, - SlaveVP *creatingVP ) - { VPThdSemReq reqData; +Vthread__create_thread( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *creatingSlv ) + { VthdSemReq reqData; //the semantic request data is on the stack and disappears when this - // call returns -- it's guaranteed to remain in the VP's stack for as - // long as the VP is suspended. + // call returns -- it's guaranteed to remain in the Slv's stack for as + // long as the Slv is suspended. reqData.reqType = 0; //know the type because is a VMS create req reqData.coreToScheduleOnto = -1; //means round-robin schedule reqData.fnPtr = fnPtr; reqData.initData = initData; - reqData.requestingVP = creatingVP; + reqData.requestingSlv = creatingSlv; - VMS__send_create_procr_req( &reqData, creatingVP ); + VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv ); - return creatingVP->dataRetFromReq; + return creatingSlv->dataRetFromReq; } inline SlaveVP * -VPThread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData, - SlaveVP *creatingVP, int32 coreToScheduleOnto ) - { VPThdSemReq reqData; +Vthread__create_thread_with_affinity( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *creatingSlv, int32 coreToScheduleOnto ) + { VthdSemReq reqData; //the semantic request data is on the stack and disappears when this - // call returns -- it's guaranteed to remain in the VP's stack for as - // long as the VP is suspended. + // call returns -- it's guaranteed to remain in the Slv's stack for as + // long as the Slv is suspended. reqData.reqType = 0; //know type because in a VMS create req reqData.coreToScheduleOnto = coreToScheduleOnto; reqData.fnPtr = fnPtr; reqData.initData = initData; - reqData.requestingVP = creatingVP; + reqData.requestingSlv = creatingSlv; - VMS__send_create_procr_req( &reqData, creatingVP ); + VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv ); } inline void -VPThread__dissipate_thread( SlaveVP *procrToDissipate ) +Vthread__dissipate_thread( SlaveVP *procrToDissipate ) { - VMS__send_dissipate_req( procrToDissipate ); + VMS_WL__send_dissipate_req( procrToDissipate ); } //=========================================================================== void * -VPThread__malloc( size_t sizeToMalloc, SlaveVP *animVP ) - { VPThdSemReq reqData; +Vthread__malloc( size_t sizeToMalloc, SlaveVP *animSlv ) + { VthdSemReq reqData; reqData.reqType = malloc_req; reqData.sizeToMalloc = sizeToMalloc; - reqData.requestingVP = animVP; + reqData.requestingSlv = animSlv; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); - return animVP->dataRetFromReq; + return animSlv->dataRetFromReq; } /*Sends request to Master, which does the work of freeing */ void -VPThread__free( void *ptrToFree, SlaveVP *animVP ) - { VPThdSemReq reqData; +Vthread__free( void *ptrToFree, SlaveVP *animSlv ) + { VthdSemReq reqData; reqData.reqType = free_req; reqData.ptrToFree = ptrToFree; - reqData.requestingVP = animVP; + reqData.requestingSlv = animSlv; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); } //=========================================================================== inline void -VPThread__set_globals_to( void *globals ) +Vthread__set_globals_to( void *globals ) { - ((VPThdSemEnv *) + ((VthdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals = globals; } inline void * -VPThread__give_globals() +Vthread__give_globals() { - return((VPThdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals; + return((VthdSemEnv *) (_VMSMasterEnv->semanticEnv))->applicationGlobals; } //=========================================================================== inline int32 -VPThread__make_mutex( SlaveVP *animVP ) - { VPThdSemReq reqData; +Vthread__make_mutex( SlaveVP *animSlv ) + { VthdSemReq reqData; reqData.reqType = make_mutex; - reqData.requestingVP = animVP; + reqData.requestingSlv = animSlv; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); - return (int32)animVP->dataRetFromReq; //mutexid is 32bit wide + return (int32)animSlv->dataRetFromReq; //mutexid is 32bit wide } inline void -VPThread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringVP ) - { VPThdSemReq reqData; +Vthread__mutex_lock( int32 mutexIdx, SlaveVP *acquiringSlv ) + { VthdSemReq reqData; reqData.reqType = mutex_lock; reqData.mutexIdx = mutexIdx; - reqData.requestingVP = acquiringVP; + reqData.requestingSlv = acquiringSlv; - VMS__send_sem_request( &reqData, acquiringVP ); + VMS_WL__send_sem_request( &reqData, acquiringSlv ); } inline void -VPThread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingVP ) - { VPThdSemReq reqData; +Vthread__mutex_unlock( int32 mutexIdx, SlaveVP *releasingSlv ) + { VthdSemReq reqData; reqData.reqType = mutex_unlock; reqData.mutexIdx = mutexIdx; - reqData.requestingVP = releasingVP; + reqData.requestingSlv = releasingSlv; - VMS__send_sem_request( &reqData, releasingVP ); + VMS_WL__send_sem_request( &reqData, releasingSlv ); } //======================= inline int32 -VPThread__make_cond( int32 ownedMutexIdx, SlaveVP *animPr) - { VPThdSemReq reqData; +Vthread__make_cond( int32 ownedMutexIdx, SlaveVP *animSlv) + { VthdSemReq reqData; reqData.reqType = make_cond; reqData.mutexIdx = ownedMutexIdx; - reqData.requestingVP = animVP; + reqData.requestingSlv = animSlv; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); - return (int32)animVP->dataRetFromReq; //condIdx is 32 bit wide + return (int32)animSlv->dataRetFromReq; //condIdx is 32 bit wide } inline void -VPThread__cond_wait( int32 condIdx, SlaveVP *waitingPr) - { VPThdSemReq reqData; +Vthread__cond_wait( int32 condIdx, SlaveVP *waitingSlv) + { VthdSemReq reqData; reqData.reqType = cond_wait; reqData.condIdx = condIdx; - reqData.requestingVP = waitingVP; + reqData.requestingSlv = waitingSlv; - VMS__send_sem_request( &reqData, waitingVP ); + VMS_WL__send_sem_request( &reqData, waitingSlv ); } inline void * -VPThread__cond_signal( int32 condIdx, SlaveVP *signallingVP ) - { VPThdSemReq reqData; +Vthread__cond_signal( int32 condIdx, SlaveVP *signallingSlv ) + { VthdSemReq reqData; reqData.reqType = cond_signal; reqData.condIdx = condIdx; - reqData.requestingVP = signallingVP; + reqData.requestingSlv = signallingSlv; - VMS__send_sem_request( &reqData, signallingVP ); + VMS_WL__send_sem_request( &reqData, signallingSlv ); } @@ -460,27 +455,24 @@ * trying to get the data through from different cores. */ -/*asm function declarations*/ -void asm_save_ret_to_singleton(VPThdSingleton *singletonPtrAddr); -void asm_write_ret_from_singleton(VPThdSingleton *singletonPtrAddr); - /*Fn singleton uses ID as index into array of singleton structs held in the * semantic environment. */ void -VPThread__start_fn_singleton( int32 singletonID, SlaveVP *animVP ) +Vthread__start_fn_singleton( int32 singletonID, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; // reqData.reqType = singleton_fn_start; reqData.singletonID = singletonID; - VMS__send_sem_request( &reqData, animVP ); - if( animVP->dataRetFromReq ) //will be 0 or addr of label in end singleton + VMS_WL__send_sem_request( &reqData, animSlv ); + if( animSlv->dataRetFromReq != 0 ) //addr of matching end-singleton { - VPThdSemEnv *semEnv = VMS__give_sem_env_for( animVP ); - asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID])); + VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); //not protected! + VMS_int__return_to_addr_in_ptd_to_loc( + &((semEnv->fnSingletons[singletonID]).savedRetAddr) ); } } @@ -489,22 +481,21 @@ * location. */ void -VPThread__start_data_singleton( VPThdSingleton **singletonAddr, SlaveVP *animVP ) +Vthread__start_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; - if( *singletonAddr && (*singletonAddr)->hasFinished ) + if( singleton->savedRetAddr && singleton->hasFinished ) goto JmpToEndSingleton; reqData.reqType = singleton_data_start; - reqData.singletonPtrAddr = singletonAddr; + reqData.singleton = singleton; - VMS__send_sem_request( &reqData, animVP ); - if( animVP->dataRetFromReq ) //either 0 or end singleton's return addr + VMS_WL__send_sem_request( &reqData, animSlv ); + if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr { JmpToEndSingleton: - asm_write_ret_from_singleton(*singletonAddr); - + VMS_int__return_to_addr_in_ptd_to_loc(&(singleton->savedRetAddr)); } //now, simply return //will exit either from the start singleton call or the end-singleton call @@ -517,25 +508,26 @@ * inside is shared by all invocations of a given singleton ID. */ void -VPThread__end_fn_singleton( int32 singletonID, SlaveVP *animVP ) +Vthread__end_fn_singleton( int32 singletonID, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; //don't need this addr until after at least one singleton has reached // this function - VPThdSemEnv *semEnv = VMS__give_sem_env_for( animVP ); - asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID])); + VthdSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv ); + VMS_int__return_to_addr_in_ptd_to_loc( + &((semEnv->fnSingletons[singletonID]).savedRetAddr) ); reqData.reqType = singleton_fn_end; reqData.singletonID = singletonID; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); } void -VPThread__end_data_singleton( VPThdSingleton **singletonPtrAddr, SlaveVP *animVP ) +Vthread__end_data_singleton( VthdSingleton *singleton, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; //don't need this addr until after singleton struct has reached // this function for first time @@ -544,12 +536,12 @@ // one instance in the code of this function. However, can use this // function in different places for different data-singletons. - asm_save_ret_to_singleton(*singletonPtrAddr); + VMS_int__save_return_into_ptd_to_loc_then_do_ret(&(singleton->savedRetAddr)); - reqData.reqType = singleton_data_end; - reqData.singletonPtrAddr = singletonPtrAddr; + reqData.reqType = singleton_data_end; + reqData.singleton = singleton; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); } @@ -558,69 +550,69 @@ * at a time. * *It suspends to the master, and the request handler takes the function - * pointer out of the request and calls it, then resumes the VP. + * pointer out of the request and calls it, then resumes the Slv. *Only very short functions should be called this way -- for longer-running * isolation, use transaction-start and transaction-end, which run the code * between as work-code. */ void -VPThread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, - void *data, SlaveVP *animVP ) +Vthread__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, + void *data, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; // reqData.reqType = atomic; reqData.fnToExecInMaster = ptrToFnToExecInMaster; reqData.dataForFn = data; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); } /*This suspends to the master. - *First, it looks at the VP's data, to see the highest transactionID that VP + *First, it looks at the Slv's data, to see the highest transactionID that Slv * already has entered. If the current ID is not larger, it throws an * exception stating a bug in the code. Otherwise it puts the current ID * there, and adds the ID to a linked list of IDs entered -- the list is * used to check that exits are properly ordered. *Next it is uses transactionID as index into an array of transaction * structures. - *If the "VP_currently_executing" field is non-null, then put requesting VP + *If the "Slv_currently_executing" field is non-null, then put requesting Slv * into queue in the struct. (At some point a holder will request - * end-transaction, which will take this VP from the queue and resume it.) + * end-transaction, which will take this Slv from the queue and resume it.) *If NULL, then write requesting into the field and resume. */ void -VPThread__start_transaction( int32 transactionID, SlaveVP *animVP ) +Vthread__start_transaction( int32 transactionID, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; // reqData.reqType = trans_start; reqData.transID = transactionID; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); } /*This suspends to the master, then uses transactionID as index into an * array of transaction structures. - *It looks at VP_currently_executing to be sure it's same as requesting VP. + *It looks at Slv_currently_executing to be sure it's same as requesting Slv. * If different, throws an exception, stating there's a bug in the code. *Next it looks at the queue in the structure. - *If it's empty, it sets VP_currently_executing field to NULL and resumes. - *If something in, gets it, sets VP_currently_executing to that VP, then + *If it's empty, it sets Slv_currently_executing field to NULL and resumes. + *If something in, gets it, sets Slv_currently_executing to that Slv, then * resumes both. */ void -VPThread__end_transaction( int32 transactionID, SlaveVP *animVP ) +Vthread__end_transaction( int32 transactionID, SlaveVP *animSlv ) { - VPThdSemReq reqData; + VthdSemReq reqData; // reqData.reqType = trans_end; reqData.transID = transactionID; - VMS__send_sem_request( &reqData, animVP ); + VMS_WL__send_sem_request( &reqData, animSlv ); } //===========================================================================