# HG changeset patch # User Nina Engelhardt # Date 1373370777 -7200 # Node ID 5131f941f42c378352f174fce03664e087e4618a # Parent e649c2387a602a835df6dd9d32e4ca9d7b08d8dd update to newer VMS var names diff -r e649c2387a60 -r 5131f941f42c VCilk.h --- a/VCilk.h Thu Jun 02 13:54:34 2011 +0200 +++ b/VCilk.h Tue Jul 09 13:52:57 2013 +0200 @@ -9,9 +9,9 @@ #ifndef _VCilk_H #define _VCilk_H -#include "VMS/Queue_impl/PrivateQueue.h" -#include "VMS/Hash_impl/PrivateHash.h" -#include "VMS/VMS.h" +#include "Queue_impl/PrivateQueue.h" +#include "Hash_impl/PrivateHash.h" +#include "VMS_impl/VMS.h" @@ -21,6 +21,7 @@ //=========================================================================== #define NUM_STRUCS_IN_SEM_ENV 1000 +#define MIN_WORK_UNIT_CYCLES 20000 //=========================================================================== typedef struct _VCilkSemReq VCilkSemReq; typedef void (*PtrToAtomicFn ) ( void * ); //executed atomically in master @@ -58,12 +59,12 @@ struct _VCilkSemReq { enum VCilkReqType reqType; - VirtProcr *requestingPr; + SlaveVP *requestingPr; int32 sizeToMalloc; void *ptrToFree; - VirtProcrFnPtr fnPtr; + TopLevelFnPtr fnPtr; void *initData; int32 coreToSpawnOnto; @@ -79,7 +80,7 @@ typedef struct { - VirtProcr *VPCurrentlyExecuting; + SlaveVP *VPCurrentlyExecuting; PrivQueueStruc *waitingVPQ; } VCilkTrans; @@ -110,7 +111,7 @@ { int32 syncPending; int32 numLiveChildren; - VirtProcr *parentPr; + SlaveVP *parentPr; int32 highestTransEntered; TransListElem *lastTransEntered; @@ -120,7 +121,7 @@ //=========================================================================== void -VCilk__create_seed_procr_and_do_work( VirtProcrFnPtr fn, void *initData ); +VCilk__create_seed_procr_and_do_work( TopLevelFnPtr fn, void *initData ); int32 VCilk__giveMinWorkUnitCycles( float32 percentOverhead ); @@ -145,56 +146,57 @@ //======================= void inline -VCilk__spawn( int32 coreToSpawnOnto, VirtProcrFnPtr fnPtr, - void *initData, VirtProcr *creatingPr ); +VCilk__spawn( int32 coreToSpawnOnto, TopLevelFnPtr fnPtr, + void *initData, SlaveVP *creatingPr ); int32 VCilk__give_number_of_cores_to_spawn_onto(); void -VCilk__sync( VirtProcr *animatingPr ); +VCilk__sync( SlaveVP *animatingPr ); void * -VCilk__malloc( int32 sizeToMalloc, VirtProcr *animPr ); +VCilk__malloc( int32 sizeToMalloc, SlaveVP *animPr ); void -VCilk__free( void *ptrToFree, VirtProcr *animPr ); +VCilk__free( void *ptrToFree, SlaveVP *animPr ); void -VCilk__dissipate_procr( VirtProcr *procrToDissipate ); +VCilk__dissipate_procr( SlaveVP *procrToDissipate ); //======================= Concurrency Stuff ====================== void -VCilk__start_fn_singleton( int32 singletonID, VirtProcr *animPr ); +VCilk__start_fn_singleton( int32 singletonID, SlaveVP *animPr ); void -VCilk__end_fn_singleton( int32 singletonID, VirtProcr *animPr ); +VCilk__end_fn_singleton( int32 singletonID, SlaveVP *animPr ); void -VCilk__start_data_singleton( VCilkSingleton **singeltonAddr, VirtProcr *animPr ); +VCilk__start_data_singleton( VCilkSingleton **singeltonAddr, SlaveVP *animPr ); void -VCilk__end_data_singleton( VCilkSingleton **singletonAddr, VirtProcr *animPr ); +VCilk__end_data_singleton( VCilkSingleton **singletonAddr, SlaveVP *animPr ); void VCilk__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, - void *data, VirtProcr *animPr ); + void *data, SlaveVP *animPr ); void -VCilk__start_transaction( int32 transactionID, VirtProcr *animPr ); +VCilk__start_transaction( int32 transactionID, SlaveVP *animPr ); void -VCilk__end_transaction( int32 transactionID, VirtProcr *animPr ); +VCilk__end_transaction( int32 transactionID, SlaveVP *animPr ); //========================= Internal use only ============================= void -VCilk__Request_Handler( VirtProcr *requestingPr, void *_semEnv ); +VCilk__Request_Handler( SlaveVP *requestingPr, void *_semEnv ); -VirtProcr * -VCilk__schedule_virt_procr( void *_semEnv, int coreNum ); +SlaveVP * +VCilk__schedule_virt_procr( void *_semEnv, AnimSlot *slot ); - +void inline +resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv ); #endif /* _VCilk_H */ diff -r e649c2387a60 -r 5131f941f42c VCilk_PluginFns.c --- a/VCilk_PluginFns.c Thu Jun 02 13:54:34 2011 +0200 +++ b/VCilk_PluginFns.c Tue Jul 09 13:52:57 2013 +0200 @@ -7,54 +7,54 @@ #include #include -#include "VMS/Queue_impl/PrivateQueue.h" +#include "Queue_impl/PrivateQueue.h" #include "VCilk.h" //=========================================================================== void inline -handleSync( VirtProcr *requestingPr, VCilkSemEnv *semEnv ); +handleSync( SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -handleMalloc( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleMalloc( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -handleFree( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleFree( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -handleDissipate( VirtProcr *requestingPr, VCilkSemEnv *semEnv ); +handleDissipate( SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -handleSpawn( VMSReqst *req, VirtProcr *requestingPr, VCilkSemEnv *semEnv ); +handleSpawn( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -dispatchSemReq( VMSReqst *req, VirtProcr *requestingPr, VCilkSemEnv *semEnv); +dispatchSemReq( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv); void inline -handleTransEnd( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleTransEnd( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv*semEnv); void inline -handleTransStart( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleTransStart( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -handleAtomic( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleAtomic( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv); inline void -handleStartFnSingleton( VCilkSemReq *semReq, VirtProcr *reqstingPr, +handleStartFnSingleton( VCilkSemReq *semReq, SlaveVP *reqstingPr, VCilkSemEnv *semEnv ); inline void -handleEndFnSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleEndFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); inline void -handleStartDataSingleton( VCilkSemReq *semReq, VirtProcr *reqstingPr, +handleStartDataSingleton( VCilkSemReq *semReq, SlaveVP *reqstingPr, VCilkSemEnv *semEnv ); inline void -handleEndDataSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleEndDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); void inline -resume_procr( VirtProcr *procr, VCilkSemEnv *semEnv ); +resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv ); //=========================================================================== @@ -67,11 +67,11 @@ * to the slave -- return FALSE to let Master loop know scheduling that * slave failed. */ -VirtProcr * -VCilk__schedule_virt_procr( void *_semEnv, int coreNum ) - { VirtProcr *schedPr; +SlaveVP * +VCilk__schedule_virt_procr( void *_semEnv, AnimSlot *slot ) + { SlaveVP *schedPr; VCilkSemEnv *semEnv; - + int coreNum = slot->coreSlotIsOn; semEnv = (VCilkSemEnv *)_semEnv; schedPr = readPrivQ( semEnv->readyVPQs[coreNum] ); @@ -94,15 +94,14 @@ * Processor, and initial data. */ void -VCilk__Request_Handler( VirtProcr *requestingPr, void *_semEnv ) +VCilk__Request_Handler( SlaveVP *requestingPr, void *_semEnv ) { VCilkSemEnv *semEnv; VMSReqst *req; - VCilkSemReq *semReq; semEnv = (VCilkSemEnv *)_semEnv; - req = VMS__take_next_request_out_of( requestingPr ); + req = VMS_PI__take_next_request_out_of( requestingPr ); while( req != NULL ) { @@ -117,23 +116,24 @@ handleSpawn( req, requestingPr, semEnv); break; case dissipate: handleDissipate( requestingPr, semEnv); - break; - case VMSSemantic: VMS__handle_VMSSemReq(req, requestingPr, semEnv, + return; + case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingPr, semEnv, &resume_procr); break; default: break; } - req = VMS__take_next_request_out_of( requestingPr ); + //FIXME: if req was dissipate, this is accessing free'd memory... + req = VMS_PI__take_next_request_out_of( requestingPr ); } //while( req != NULL ) } void inline -dispatchSemReq( VMSReqst *req, VirtProcr *reqPr, VCilkSemEnv *semEnv ) +dispatchSemReq( VMSReqst *req, SlaveVP *reqPr, VCilkSemEnv *semEnv ) { VCilkSemReq *semReq; - semReq = VMS__take_sem_reqst_from(req); + semReq = VMS_PI__take_sem_reqst_from(req); if( semReq == NULL ) return; switch( semReq->reqType ) @@ -166,7 +166,7 @@ //=========================== Request Handlers ============================== void inline -resume_procr( VirtProcr *procr, VCilkSemEnv *semEnv ) +resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv ) { writePrivQ( procr, semEnv->readyVPQs[ procr->coreAnimatedBy] ); } @@ -179,9 +179,9 @@ * If no, then set sync-pending flag. */ inline void -handleSync( VirtProcr *requestingPr, VCilkSemEnv *semEnv ) +handleSync( SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { - Meas_startSync + //Meas_startSync; if(((VCilkSemData *)(requestingPr->semanticData))->numLiveChildren == 0 ) { //no live children to wait for resume_procr( requestingPr, semEnv ); @@ -190,17 +190,17 @@ { ((VCilkSemData *)(requestingPr->semanticData))->syncPending = TRUE; } - Meas_endSync + //Meas_endSync; } /* */ inline void -handleMalloc( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleMalloc( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { void *ptr; - ptr = VMS__malloc( semReq->sizeToMalloc ); + ptr = VMS_PI__malloc( semReq->sizeToMalloc ); requestingPr->dataRetFromReq = ptr; resume_procr( requestingPr, semEnv ); } @@ -208,10 +208,10 @@ /* */ void inline -handleFree( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleFree( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { - VMS__free( semReq->ptrToFree ); + VMS_PI__free( semReq->ptrToFree ); resume_procr( requestingPr, semEnv ); } @@ -219,16 +219,16 @@ //============================== VMS requests =============================== /*Re-use this in the entry-point fn */ -inline VirtProcr * -VCilk__create_procr_helper( VirtProcrFnPtr fnPtr, void *initData, - VirtProcr *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto ) - { VirtProcr *newPr; +inline SlaveVP * +VCilk__create_procr_helper( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto ) + { SlaveVP *newPr; VCilkSemData *semData; //This is running in master, so use internal version - newPr = VMS__create_procr( fnPtr, initData ); + newPr = VMS_PI__create_slaveVP( fnPtr, initData ); - semData = VMS__malloc( sizeof(VCilkSemData) ); + semData = VMS_PI__malloc( sizeof(VCilkSemData) ); semData->numLiveChildren = 0; semData->parentPr = requestingPr; @@ -272,12 +272,12 @@ void inline -handleSpawn( VMSReqst *req, VirtProcr *requestingPr, VCilkSemEnv *semEnv ) +handleSpawn( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSemReq *semReq; - VirtProcr *newPr; + SlaveVP *newPr; - Meas_startSpawn - semReq = VMS__take_sem_reqst_from( req ); + //Meas_startSpawn; + semReq = VMS_PI__take_sem_reqst_from( req ); newPr = VCilk__create_procr_helper( semReq->fnPtr, semReq->initData, requestingPr, semEnv, semReq->coreToSpawnOnto ); @@ -287,7 +287,7 @@ resume_procr( newPr, semEnv ); resume_procr( requestingPr, semEnv ); - Meas_endSpawn + //Meas_endSpawn; } @@ -297,9 +297,9 @@ *-- if set, then resume the parentVP. */ void inline -handleDissipate( VirtProcr *requestingPr, VCilkSemEnv *semEnv ) +handleDissipate( SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { - VirtProcr * + SlaveVP * parentPr = ((VCilkSemData *) (requestingPr->semanticData))->parentPr; if( parentPr == NULL ) //means this is seed processor being dissipated @@ -324,17 +324,17 @@ } } - VMS__free( requestingPr->semanticData ); + VMS_PI__free( requestingPr->semanticData ); //Now do normal dissipate //call VMS to free_all AppVP state -- stack and so on - VMS__dissipate_procr( requestingPr ); + VMS_PI__dissipate_slaveVP( requestingPr ); semEnv->numVirtPr -= 1; if( semEnv->numVirtPr == 0 ) { //no more work, so shutdown - VMS__shutdown(); + VMS_SS__shutdown(); } } @@ -345,7 +345,7 @@ * end-label. Else, sets flag and resumes normally. */ void inline -handleStartSingleton_helper( VCilkSingleton *singleton, VirtProcr *reqstingPr, +handleStartSingleton_helper( VCilkSingleton *singleton, SlaveVP *reqstingPr, VCilkSemEnv *semEnv ) { if( singleton->hasFinished ) @@ -368,7 +368,7 @@ } } void inline -handleStartFnSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleStartFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSingleton *singleton; @@ -376,13 +376,13 @@ handleStartSingleton_helper( singleton, requestingPr, semEnv ); } void inline -handleStartDataSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleStartDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSingleton *singleton; if( *(semReq->singletonPtrAddr) == NULL ) - { singleton = VMS__malloc( sizeof(VCilkSingleton) ); - singleton->waitQ = makeVMSPrivQ(); + { singleton = VMS_PI__malloc( sizeof(VCilkSingleton) ); + singleton->waitQ = makePrivQ(); singleton->endInstrAddr = 0x0; singleton->hasBeenStarted = FALSE; singleton->hasFinished = FALSE; @@ -395,16 +395,16 @@ void inline -handleEndSingleton_helper( VCilkSingleton *singleton, VirtProcr *requestingPr, +handleEndSingleton_helper( VCilkSingleton *singleton, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { PrivQueueStruc *waitQ; int32 numWaiting, i; - VirtProcr *resumingPr; + SlaveVP *resumingPr; if( singleton->hasFinished ) { //by definition, only one slave should ever be able to run end singleton // so if this is true, is an error - //VMS__throw_exception( "singleton code ran twice", requestingPr, NULL); + //VMS_PI__throw_exception( "singleton code ran twice", requestingPr, NULL); } singleton->hasFinished = TRUE; @@ -421,7 +421,7 @@ } void inline -handleEndFnSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleEndFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSingleton *singleton; @@ -430,7 +430,7 @@ handleEndSingleton_helper( singleton, requestingPr, semEnv ); } void inline -handleEndDataSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleEndDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSingleton *singleton; @@ -444,7 +444,7 @@ * pointer out of the request and call it, then resume the VP. */ void inline -handleAtomic( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleAtomic( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { semReq->fnToExecInMaster( semReq->dataForFn ); @@ -466,7 +466,7 @@ *If NULL, then write requesting into the field and resume. */ void inline -handleTransStart( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleTransStart( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSemData *semData; TransListElem *nextTransElem; @@ -475,12 +475,12 @@ semData = requestingPr->semanticData; if( semData->highestTransEntered > semReq->transID ) { //throw VMS exception, which shuts down VMS. - VMS__throw_exception( "transID smaller than prev", requestingPr, NULL); + VMS_PI__throw_exception( "transID smaller than prev", requestingPr, NULL); } //add this trans ID to the list of transactions entered -- check when // end a transaction semData->highestTransEntered = semReq->transID; - nextTransElem = VMS__malloc( sizeof(TransListElem) ); + nextTransElem = VMS_PI__malloc( sizeof(TransListElem) ); nextTransElem->transID = semReq->transID; nextTransElem->nextTrans = semData->lastTransEntered; semData->lastTransEntered = nextTransElem; @@ -517,10 +517,10 @@ * resume both. */ void inline -handleTransEnd( VCilkSemReq *semReq, VirtProcr *requestingPr, +handleTransEnd( VCilkSemReq *semReq, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) { VCilkSemData *semData; - VirtProcr *waitingPr; + SlaveVP *waitingPr; VCilkTrans *transStruc; TransListElem *lastTrans; @@ -529,7 +529,7 @@ //make sure transaction ended in same VP as started it. if( transStruc->VPCurrentlyExecuting != requestingPr ) { - VMS__throw_exception( "trans ended in diff VP", requestingPr, NULL ); + VMS_PI__throw_exception( "trans ended in diff VP", requestingPr, NULL ); } //make sure nesting is correct -- last ID entered should == this ID @@ -537,7 +537,7 @@ lastTrans = semData->lastTransEntered; if( lastTrans->transID != semReq->transID ) { - VMS__throw_exception( "trans incorrectly nested", requestingPr, NULL ); + VMS_PI__throw_exception( "trans incorrectly nested", requestingPr, NULL ); } semData->lastTransEntered = semData->lastTransEntered->nextTrans; diff -r e649c2387a60 -r 5131f941f42c VCilk_lib.c --- a/VCilk_lib.c Thu Jun 02 13:54:34 2011 +0200 +++ b/VCilk_lib.c Tue Jul 09 13:52:57 2013 +0200 @@ -7,10 +7,10 @@ #include #include -#include "VMS/VMS.h" +#include "VMS_impl/VMS.h" #include "VCilk.h" -#include "VMS/Queue_impl/PrivateQueue.h" -#include "VMS/Hash_impl/PrivateHash.h" +#include "Queue_impl/PrivateQueue.h" +#include "Hash_impl/PrivateHash.h" //========================================================================== @@ -72,7 +72,9 @@ * sense for later.. */ - +inline SlaveVP * +VCilk__create_procr_helper( TopLevelFnPtr fnPtr, void *initData, + SlaveVP *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto ); //=========================================================================== @@ -90,9 +92,9 @@ * any of the data reachable from initData passed in to here */ void -VCilk__create_seed_procr_and_do_work( VirtProcrFnPtr fnPtr, void *initData ) +VCilk__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData ) { VCilkSemEnv *semEnv; - VirtProcr *seedPr; + SlaveVP *seedPr; #ifdef SEQUENTIAL VCilk__init_Seq(); //debug sequential exe @@ -103,13 +105,13 @@ //VCilk starts with one processor, which is put into initial environ, // and which then calls create() to create more, thereby expanding work - seedPr = (VirtProcr*)VCilk__create_procr_helper( fnPtr, initData, NULL, semEnv, -1 ); + seedPr = VCilk__create_procr_helper( fnPtr, initData, NULL, semEnv, -1 ); resume_procr( seedPr, semEnv ); #ifdef SEQUENTIAL VMS__start_the_work_then_wait_until_done_Seq(); //debug sequential exe #else - VMS__start_the_work_then_wait_until_done(); //normal multi-thd + VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd #endif VCilk__cleanup_at_end_of_shutdown(); @@ -125,7 +127,7 @@ int32 VCilk__giveIdealNumWorkUnits() { - return NUM_SCHED_SLOTS * NUM_CORES; + return NUM_ANIM_SLOTS * NUM_CORES; } /*To measure how long a primitive operation takes, when calculating number of @@ -171,7 +173,7 @@ void VCilk__init() { - VMS__init(); + VMS_SS__init(); //masterEnv, a global var, now is partially set up by init_VMS VCilk__init_Helper(); @@ -181,7 +183,7 @@ void VCilk__init_Seq() { - VMS__init_Seq(); + VMS_SS__init_Seq(); //masterEnv, a global var, now is partially set up by init_VMS VCilk__init_Helper(); @@ -198,27 +200,28 @@ //Hook up the semantic layer's plug-ins to the Master virt procr _VMSMasterEnv->requestHandler = &VCilk__Request_Handler; - _VMSMasterEnv->slaveScheduler = &VCilk__schedule_virt_procr; + _VMSMasterEnv->slaveAssigner = &VCilk__schedule_virt_procr; //create the semantic layer's environment (all its data) and add to // the master environment - semanticEnv = VMS__malloc( sizeof( VCilkSemEnv ) ); + semanticEnv = VMS_PI__malloc( sizeof( VCilkSemEnv ) ); _VMSMasterEnv->semanticEnv = semanticEnv; //create the ready queue, hash tables used for pairing send to receive // and so forth //TODO: add hash tables for pairing sends with receives, and // initialize the data ownership system - readyVPQs = VMS__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); + readyVPQs = VMS_PI__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) { - readyVPQs[ coreIdx ] = makeVMSPrivQ(); + readyVPQs[ coreIdx ] = makePrivQ(); } semanticEnv->readyVPQs = readyVPQs; semanticEnv->nextCoreToGetNewPr = 0; + semanticEnv->numVirtPr = 0; //TODO: bug -- turn these arrays into dyn arrays to eliminate limit //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( ); @@ -230,8 +233,8 @@ semanticEnv->fnSingletons[i].endInstrAddr = NULL; semanticEnv->fnSingletons[i].hasBeenStarted = FALSE; semanticEnv->fnSingletons[i].hasFinished = FALSE; - semanticEnv->fnSingletons[i].waitQ = makeVMSPrivQ(); - semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSPrivQ(); + semanticEnv->fnSingletons[i].waitQ = makePrivQ(); + semanticEnv->transactionStrucs[i].waitingVPQ = makePrivQ(); } } @@ -246,18 +249,18 @@ semanticEnv = _VMSMasterEnv->semanticEnv; - /* + int32 coreIdx; for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) { - VMS__free( semanticEnv->readyVPQs[coreIdx]->startOfData ); - VMS__free( semanticEnv->readyVPQs[coreIdx] ); + VMS_PI__free( semanticEnv->readyVPQs[coreIdx]->startOfData ); + VMS_PI__free( semanticEnv->readyVPQs[coreIdx] ); } - VMS__free( semanticEnv->readyVPQs ); + VMS_PI__free( semanticEnv->readyVPQs ); - VMS__free( _VMSMasterEnv->semanticEnv ); - */ - VMS__cleanup_at_end_of_shutdown(); + VMS_PI__free( _VMSMasterEnv->semanticEnv ); + + VMS_SS__cleanup_at_end_of_shutdown(); } @@ -268,8 +271,8 @@ * allocates, so has to be done inside master */ void inline -VCilk__spawn( int32 coreToSpawnOnto, VirtProcrFnPtr fnPtr, - void *initData, VirtProcr *requestingPr ) +VCilk__spawn( int32 coreToSpawnOnto, TopLevelFnPtr fnPtr, + void *initData, SlaveVP *requestingPr ) { VCilkSemReq reqData; //the semantic request data is on the stack and disappears when this @@ -281,7 +284,7 @@ reqData.initData = initData; reqData.requestingPr = requestingPr; - VMS__send_create_procr_req( &reqData, requestingPr ); + VMS_WL__send_create_slaveVP_req( &reqData, requestingPr ); } @@ -296,35 +299,35 @@ /*This runs inside slave VP, so can't do any freeing -- have to do in plugin */ void inline -VCilk__dissipate_procr( VirtProcr *procrToDissipate ) +VCilk__dissipate_procr( SlaveVP *procrToDissipate ) { - VMS__send_dissipate_req( procrToDissipate ); + VMS_WL__send_dissipate_req( procrToDissipate ); } //=========================================================================== void -VCilk__sync( VirtProcr *animPr ) +VCilk__sync( SlaveVP *animPr ) { VCilkSemReq reqData; reqData.reqType = syncReq; reqData.requestingPr = animPr; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); } void * -VCilk__malloc( int32 sizeToMalloc, VirtProcr *animPr ) +VCilk__malloc( int32 sizeToMalloc, SlaveVP *animPr ) { VCilkSemReq reqData; reqData.reqType = mallocReq; reqData.requestingPr = animPr; reqData.sizeToMalloc = sizeToMalloc; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); return animPr->dataRetFromReq; } @@ -333,14 +336,14 @@ /*Sends request to Master, which does the work of freeing */ void -VCilk__free( void *ptrToFree, VirtProcr *animPr ) +VCilk__free( void *ptrToFree, SlaveVP *animPr ) { VCilkSemReq reqData; reqData.reqType = freeReq; reqData.requestingPr = animPr; reqData.ptrToFree = ptrToFree; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); } //=========================================================================== @@ -355,11 +358,16 @@ * trying to get the data through from different cores. */ + +/*asm function declarations*/ +void asm_save_ret_to_singleton(VCilkSingleton *singletonPtrAddr); +void asm_write_ret_from_singleton(VCilkSingleton *singletonPtrAddr); + /*Fn singleton uses ID as index into array of singleton structs held in the * semantic environment. */ void -VCilk__start_fn_singleton( int32 singletonID, VirtProcr *animPr ) +VCilk__start_fn_singleton( int32 singletonID, SlaveVP *animPr ) { VCilkSemReq reqData; @@ -367,15 +375,11 @@ reqData.reqType = singleton_fn_start; reqData.singletonID = singletonID; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton { - asm volatile("movl %0, %%eax; \ - jmp *%%eax" \ - /* outputs */ : \ - /* inputs */ : "g"(animPr->dataRetFromReq) \ - /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx","%edi","%esi"\ - ); + VCilkSemEnv *semEnv = VMS_PI__give_sem_env_for( animPr ); + asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID])); } } @@ -384,7 +388,7 @@ * location. */ void -VCilk__start_data_singleton( VCilkSingleton **singletonAddr, VirtProcr *animPr ) +VCilk__start_data_singleton( VCilkSingleton **singletonAddr, SlaveVP *animPr ) { VCilkSemReq reqData; @@ -394,20 +398,13 @@ reqData.reqType = singleton_data_start; reqData.singletonPtrAddr = singletonAddr; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); if( animPr->dataRetFromReq ) //either 0 or end singleton's return addr { //Assembly code changes the return addr on the stack to the one // saved into the singleton by the end-singleton-fn //The return addr is at 0x4(%%ebp) - JmpToEndSingleton: - asm volatile("movl %0, %%eax; \ - movl (%%eax), %%ebx; \ - movl (%%ebx), %%eax; \ - movl %%eax, 0x4(%%ebp);" \ - /* outputs */ : \ - /* inputs */ : "m"(singletonAddr) \ - /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx","%edi","%esi"\ - ); + JmpToEndSingleton: + asm_write_ret_from_singleton(*singletonAddr); } //now, simply return //will exit either from the start singleton call or the end-singleton call @@ -420,26 +417,26 @@ * inside is shared by all invocations of a given singleton ID. */ void -VCilk__end_fn_singleton( int32 singletonID, VirtProcr *animPr ) +VCilk__end_fn_singleton( int32 singletonID, SlaveVP *animPr ) { VCilkSemReq reqData; //don't need this addr until after at least one singleton has reached // this function - VCilkSemEnv *semEnv = VMS__give_sem_env_for( animPr ); - semEnv->fnSingletons[ singletonID].endInstrAddr = &&EndSingletonInstrAddr; + VCilkSemEnv *semEnv = VMS_PI__give_sem_env_for( animPr ); + asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID])); reqData.reqType = singleton_fn_end; reqData.singletonID = singletonID; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); EndSingletonInstrAddr: return; } void -VCilk__end_data_singleton( VCilkSingleton **singletonPtrAddr, VirtProcr *animPr ) +VCilk__end_data_singleton( VCilkSingleton **singletonPtrAddr, SlaveVP *animPr ) { VCilkSemReq reqData; @@ -454,19 +451,12 @@ //Assembly code takes the return addr off the stack and saves // into the singleton. The first field in the singleton is the // "endInstrAddr" field, and the return addr is at 0x4(%%ebp) - asm volatile("movl 0x4(%%ebp), %%eax; \ - movl %0, %%ebx; \ - movl (%%ebx), %%ecx; \ - movl %%eax, (%%ecx);" \ - /* outputs */ : \ - /* inputs */ : "m"(singletonPtrAddr) \ - /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx","%edi","%esi"\ - ); + asm_save_ret_to_singleton(*singletonPtrAddr); reqData.reqType = singleton_data_end; reqData.singletonPtrAddr = singletonPtrAddr; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); } /*This executes the function in the masterVP, so it executes in isolation @@ -481,7 +471,7 @@ */ void VCilk__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, - void *data, VirtProcr *animPr ) + void *data, SlaveVP *animPr ) { VCilkSemReq reqData; @@ -490,7 +480,7 @@ reqData.fnToExecInMaster = ptrToFnToExecInMaster; reqData.dataForFn = data; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); } @@ -508,7 +498,7 @@ *If NULL, then write requesting into the field and resume. */ void -VCilk__start_transaction( int32 transactionID, VirtProcr *animPr ) +VCilk__start_transaction( int32 transactionID, SlaveVP *animPr ) { VCilkSemReq reqData; @@ -516,7 +506,7 @@ reqData.reqType = trans_start; reqData.transID = transactionID; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); } /*This suspends to the master, then uses transactionID as index into an @@ -529,7 +519,7 @@ * resumes both. */ void -VCilk__end_transaction( int32 transactionID, VirtProcr *animPr ) +VCilk__end_transaction( int32 transactionID, SlaveVP *animPr ) { VCilkSemReq reqData; @@ -537,5 +527,5 @@ reqData.reqType = trans_end; reqData.transID = transactionID; - VMS__send_sem_request( &reqData, animPr ); + VMS_WL__send_sem_request( &reqData, animPr ); }