Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VCilk_impls > VCilk__MC_shared_impl
changeset 9:5131f941f42c tip
update to newer VMS var names
author | Nina Engelhardt <nengel@mailbox.tu-berlin.de> |
---|---|
date | Tue, 09 Jul 2013 13:52:57 +0200 |
parents | e649c2387a60 |
children | |
files | VCilk.h VCilk_PluginFns.c VCilk_lib.c |
diffstat | 3 files changed, 160 insertions(+), 168 deletions(-) [+] |
line diff
1.1 --- a/VCilk.h Thu Jun 02 13:54:34 2011 +0200 1.2 +++ b/VCilk.h Tue Jul 09 13:52:57 2013 +0200 1.3 @@ -9,9 +9,9 @@ 1.4 #ifndef _VCilk_H 1.5 #define _VCilk_H 1.6 1.7 -#include "VMS/Queue_impl/PrivateQueue.h" 1.8 -#include "VMS/Hash_impl/PrivateHash.h" 1.9 -#include "VMS/VMS.h" 1.10 +#include "Queue_impl/PrivateQueue.h" 1.11 +#include "Hash_impl/PrivateHash.h" 1.12 +#include "VMS_impl/VMS.h" 1.13 1.14 1.15 1.16 @@ -21,6 +21,7 @@ 1.17 //=========================================================================== 1.18 #define NUM_STRUCS_IN_SEM_ENV 1000 1.19 1.20 +#define MIN_WORK_UNIT_CYCLES 20000 1.21 //=========================================================================== 1.22 typedef struct _VCilkSemReq VCilkSemReq; 1.23 typedef void (*PtrToAtomicFn ) ( void * ); //executed atomically in master 1.24 @@ -58,12 +59,12 @@ 1.25 1.26 struct _VCilkSemReq 1.27 { enum VCilkReqType reqType; 1.28 - VirtProcr *requestingPr; 1.29 + SlaveVP *requestingPr; 1.30 1.31 int32 sizeToMalloc; 1.32 void *ptrToFree; 1.33 1.34 - VirtProcrFnPtr fnPtr; 1.35 + TopLevelFnPtr fnPtr; 1.36 void *initData; 1.37 int32 coreToSpawnOnto; 1.38 1.39 @@ -79,7 +80,7 @@ 1.40 1.41 typedef struct 1.42 { 1.43 - VirtProcr *VPCurrentlyExecuting; 1.44 + SlaveVP *VPCurrentlyExecuting; 1.45 PrivQueueStruc *waitingVPQ; 1.46 } 1.47 VCilkTrans; 1.48 @@ -110,7 +111,7 @@ 1.49 { 1.50 int32 syncPending; 1.51 int32 numLiveChildren; 1.52 - VirtProcr *parentPr; 1.53 + SlaveVP *parentPr; 1.54 1.55 int32 highestTransEntered; 1.56 TransListElem *lastTransEntered; 1.57 @@ -120,7 +121,7 @@ 1.58 //=========================================================================== 1.59 1.60 void 1.61 -VCilk__create_seed_procr_and_do_work( VirtProcrFnPtr fn, void *initData ); 1.62 +VCilk__create_seed_procr_and_do_work( TopLevelFnPtr fn, void *initData ); 1.63 1.64 int32 1.65 VCilk__giveMinWorkUnitCycles( float32 percentOverhead ); 1.66 @@ -145,56 +146,57 @@ 1.67 //======================= 1.68 1.69 void inline 1.70 -VCilk__spawn( int32 coreToSpawnOnto, VirtProcrFnPtr fnPtr, 1.71 - void *initData, VirtProcr *creatingPr ); 1.72 +VCilk__spawn( int32 coreToSpawnOnto, TopLevelFnPtr fnPtr, 1.73 + void *initData, SlaveVP *creatingPr ); 1.74 1.75 int32 1.76 VCilk__give_number_of_cores_to_spawn_onto(); 1.77 1.78 void 1.79 -VCilk__sync( VirtProcr *animatingPr ); 1.80 +VCilk__sync( SlaveVP *animatingPr ); 1.81 1.82 void * 1.83 -VCilk__malloc( int32 sizeToMalloc, VirtProcr *animPr ); 1.84 +VCilk__malloc( int32 sizeToMalloc, SlaveVP *animPr ); 1.85 1.86 void 1.87 -VCilk__free( void *ptrToFree, VirtProcr *animPr ); 1.88 +VCilk__free( void *ptrToFree, SlaveVP *animPr ); 1.89 1.90 void 1.91 -VCilk__dissipate_procr( VirtProcr *procrToDissipate ); 1.92 +VCilk__dissipate_procr( SlaveVP *procrToDissipate ); 1.93 1.94 1.95 //======================= Concurrency Stuff ====================== 1.96 void 1.97 -VCilk__start_fn_singleton( int32 singletonID, VirtProcr *animPr ); 1.98 +VCilk__start_fn_singleton( int32 singletonID, SlaveVP *animPr ); 1.99 1.100 void 1.101 -VCilk__end_fn_singleton( int32 singletonID, VirtProcr *animPr ); 1.102 +VCilk__end_fn_singleton( int32 singletonID, SlaveVP *animPr ); 1.103 1.104 void 1.105 -VCilk__start_data_singleton( VCilkSingleton **singeltonAddr, VirtProcr *animPr ); 1.106 +VCilk__start_data_singleton( VCilkSingleton **singeltonAddr, SlaveVP *animPr ); 1.107 1.108 void 1.109 -VCilk__end_data_singleton( VCilkSingleton **singletonAddr, VirtProcr *animPr ); 1.110 +VCilk__end_data_singleton( VCilkSingleton **singletonAddr, SlaveVP *animPr ); 1.111 1.112 void 1.113 VCilk__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, 1.114 - void *data, VirtProcr *animPr ); 1.115 + void *data, SlaveVP *animPr ); 1.116 1.117 void 1.118 -VCilk__start_transaction( int32 transactionID, VirtProcr *animPr ); 1.119 +VCilk__start_transaction( int32 transactionID, SlaveVP *animPr ); 1.120 1.121 void 1.122 -VCilk__end_transaction( int32 transactionID, VirtProcr *animPr ); 1.123 +VCilk__end_transaction( int32 transactionID, SlaveVP *animPr ); 1.124 1.125 1.126 //========================= Internal use only ============================= 1.127 void 1.128 -VCilk__Request_Handler( VirtProcr *requestingPr, void *_semEnv ); 1.129 +VCilk__Request_Handler( SlaveVP *requestingPr, void *_semEnv ); 1.130 1.131 -VirtProcr * 1.132 -VCilk__schedule_virt_procr( void *_semEnv, int coreNum ); 1.133 +SlaveVP * 1.134 +VCilk__schedule_virt_procr( void *_semEnv, AnimSlot *slot ); 1.135 1.136 - 1.137 +void inline 1.138 +resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv ); 1.139 #endif /* _VCilk_H */ 1.140
2.1 --- a/VCilk_PluginFns.c Thu Jun 02 13:54:34 2011 +0200 2.2 +++ b/VCilk_PluginFns.c Tue Jul 09 13:52:57 2013 +0200 2.3 @@ -7,54 +7,54 @@ 2.4 #include <stdio.h> 2.5 #include <stdlib.h> 2.6 2.7 -#include "VMS/Queue_impl/PrivateQueue.h" 2.8 +#include "Queue_impl/PrivateQueue.h" 2.9 #include "VCilk.h" 2.10 2.11 2.12 2.13 //=========================================================================== 2.14 void inline 2.15 -handleSync( VirtProcr *requestingPr, VCilkSemEnv *semEnv ); 2.16 +handleSync( SlaveVP *requestingPr, VCilkSemEnv *semEnv ); 2.17 2.18 void inline 2.19 -handleMalloc( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.20 +handleMalloc( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.21 VCilkSemEnv *semEnv ); 2.22 void inline 2.23 -handleFree( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.24 +handleFree( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.25 VCilkSemEnv *semEnv ); 2.26 void inline 2.27 -handleDissipate( VirtProcr *requestingPr, VCilkSemEnv *semEnv ); 2.28 +handleDissipate( SlaveVP *requestingPr, VCilkSemEnv *semEnv ); 2.29 2.30 void inline 2.31 -handleSpawn( VMSReqst *req, VirtProcr *requestingPr, VCilkSemEnv *semEnv ); 2.32 +handleSpawn( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv ); 2.33 2.34 void inline 2.35 -dispatchSemReq( VMSReqst *req, VirtProcr *requestingPr, VCilkSemEnv *semEnv); 2.36 +dispatchSemReq( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv); 2.37 2.38 void inline 2.39 -handleTransEnd( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.40 +handleTransEnd( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.41 VCilkSemEnv*semEnv); 2.42 void inline 2.43 -handleTransStart( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.44 +handleTransStart( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.45 VCilkSemEnv *semEnv ); 2.46 void inline 2.47 -handleAtomic( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.48 +handleAtomic( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.49 VCilkSemEnv *semEnv); 2.50 inline void 2.51 -handleStartFnSingleton( VCilkSemReq *semReq, VirtProcr *reqstingPr, 2.52 +handleStartFnSingleton( VCilkSemReq *semReq, SlaveVP *reqstingPr, 2.53 VCilkSemEnv *semEnv ); 2.54 inline void 2.55 -handleEndFnSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.56 +handleEndFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.57 VCilkSemEnv *semEnv ); 2.58 inline void 2.59 -handleStartDataSingleton( VCilkSemReq *semReq, VirtProcr *reqstingPr, 2.60 +handleStartDataSingleton( VCilkSemReq *semReq, SlaveVP *reqstingPr, 2.61 VCilkSemEnv *semEnv ); 2.62 inline void 2.63 -handleEndDataSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.64 +handleEndDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.65 VCilkSemEnv *semEnv ); 2.66 2.67 void inline 2.68 -resume_procr( VirtProcr *procr, VCilkSemEnv *semEnv ); 2.69 +resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv ); 2.70 2.71 //=========================================================================== 2.72 2.73 @@ -67,11 +67,11 @@ 2.74 * to the slave -- return FALSE to let Master loop know scheduling that 2.75 * slave failed. 2.76 */ 2.77 -VirtProcr * 2.78 -VCilk__schedule_virt_procr( void *_semEnv, int coreNum ) 2.79 - { VirtProcr *schedPr; 2.80 +SlaveVP * 2.81 +VCilk__schedule_virt_procr( void *_semEnv, AnimSlot *slot ) 2.82 + { SlaveVP *schedPr; 2.83 VCilkSemEnv *semEnv; 2.84 - 2.85 + int coreNum = slot->coreSlotIsOn; 2.86 semEnv = (VCilkSemEnv *)_semEnv; 2.87 2.88 schedPr = readPrivQ( semEnv->readyVPQs[coreNum] ); 2.89 @@ -94,15 +94,14 @@ 2.90 * Processor, and initial data. 2.91 */ 2.92 void 2.93 -VCilk__Request_Handler( VirtProcr *requestingPr, void *_semEnv ) 2.94 +VCilk__Request_Handler( SlaveVP *requestingPr, void *_semEnv ) 2.95 { VCilkSemEnv *semEnv; 2.96 VMSReqst *req; 2.97 - VCilkSemReq *semReq; 2.98 2.99 2.100 semEnv = (VCilkSemEnv *)_semEnv; 2.101 2.102 - req = VMS__take_next_request_out_of( requestingPr ); 2.103 + req = VMS_PI__take_next_request_out_of( requestingPr ); 2.104 2.105 while( req != NULL ) 2.106 { 2.107 @@ -117,23 +116,24 @@ 2.108 handleSpawn( req, requestingPr, semEnv); 2.109 break; 2.110 case dissipate: handleDissipate( requestingPr, semEnv); 2.111 - break; 2.112 - case VMSSemantic: VMS__handle_VMSSemReq(req, requestingPr, semEnv, 2.113 + return; 2.114 + case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingPr, semEnv, 2.115 &resume_procr); 2.116 break; 2.117 default: 2.118 break; 2.119 } 2.120 2.121 - req = VMS__take_next_request_out_of( requestingPr ); 2.122 + //FIXME: if req was dissipate, this is accessing free'd memory... 2.123 + req = VMS_PI__take_next_request_out_of( requestingPr ); 2.124 } //while( req != NULL ) 2.125 } 2.126 2.127 void inline 2.128 -dispatchSemReq( VMSReqst *req, VirtProcr *reqPr, VCilkSemEnv *semEnv ) 2.129 +dispatchSemReq( VMSReqst *req, SlaveVP *reqPr, VCilkSemEnv *semEnv ) 2.130 { VCilkSemReq *semReq; 2.131 2.132 - semReq = VMS__take_sem_reqst_from(req); 2.133 + semReq = VMS_PI__take_sem_reqst_from(req); 2.134 2.135 if( semReq == NULL ) return; 2.136 switch( semReq->reqType ) 2.137 @@ -166,7 +166,7 @@ 2.138 2.139 //=========================== Request Handlers ============================== 2.140 void inline 2.141 -resume_procr( VirtProcr *procr, VCilkSemEnv *semEnv ) 2.142 +resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv ) 2.143 { 2.144 writePrivQ( procr, semEnv->readyVPQs[ procr->coreAnimatedBy] ); 2.145 } 2.146 @@ -179,9 +179,9 @@ 2.147 * If no, then set sync-pending flag. 2.148 */ 2.149 inline void 2.150 -handleSync( VirtProcr *requestingPr, VCilkSemEnv *semEnv ) 2.151 +handleSync( SlaveVP *requestingPr, VCilkSemEnv *semEnv ) 2.152 { 2.153 - Meas_startSync 2.154 + //Meas_startSync; 2.155 if(((VCilkSemData *)(requestingPr->semanticData))->numLiveChildren == 0 ) 2.156 { //no live children to wait for 2.157 resume_procr( requestingPr, semEnv ); 2.158 @@ -190,17 +190,17 @@ 2.159 { 2.160 ((VCilkSemData *)(requestingPr->semanticData))->syncPending = TRUE; 2.161 } 2.162 - Meas_endSync 2.163 + //Meas_endSync; 2.164 } 2.165 2.166 /* 2.167 */ 2.168 inline void 2.169 -handleMalloc( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.170 +handleMalloc( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.171 VCilkSemEnv *semEnv ) 2.172 { void *ptr; 2.173 2.174 - ptr = VMS__malloc( semReq->sizeToMalloc ); 2.175 + ptr = VMS_PI__malloc( semReq->sizeToMalloc ); 2.176 requestingPr->dataRetFromReq = ptr; 2.177 resume_procr( requestingPr, semEnv ); 2.178 } 2.179 @@ -208,10 +208,10 @@ 2.180 /* 2.181 */ 2.182 void inline 2.183 -handleFree( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.184 +handleFree( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.185 VCilkSemEnv *semEnv ) 2.186 { 2.187 - VMS__free( semReq->ptrToFree ); 2.188 + VMS_PI__free( semReq->ptrToFree ); 2.189 resume_procr( requestingPr, semEnv ); 2.190 } 2.191 2.192 @@ -219,16 +219,16 @@ 2.193 //============================== VMS requests =============================== 2.194 /*Re-use this in the entry-point fn 2.195 */ 2.196 -inline VirtProcr * 2.197 -VCilk__create_procr_helper( VirtProcrFnPtr fnPtr, void *initData, 2.198 - VirtProcr *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto ) 2.199 - { VirtProcr *newPr; 2.200 +inline SlaveVP * 2.201 +VCilk__create_procr_helper( TopLevelFnPtr fnPtr, void *initData, 2.202 + SlaveVP *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto ) 2.203 + { SlaveVP *newPr; 2.204 VCilkSemData *semData; 2.205 2.206 //This is running in master, so use internal version 2.207 - newPr = VMS__create_procr( fnPtr, initData ); 2.208 + newPr = VMS_PI__create_slaveVP( fnPtr, initData ); 2.209 2.210 - semData = VMS__malloc( sizeof(VCilkSemData) ); 2.211 + semData = VMS_PI__malloc( sizeof(VCilkSemData) ); 2.212 2.213 semData->numLiveChildren = 0; 2.214 semData->parentPr = requestingPr; 2.215 @@ -272,12 +272,12 @@ 2.216 2.217 2.218 void inline 2.219 -handleSpawn( VMSReqst *req, VirtProcr *requestingPr, VCilkSemEnv *semEnv ) 2.220 +handleSpawn( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv ) 2.221 { VCilkSemReq *semReq; 2.222 - VirtProcr *newPr; 2.223 + SlaveVP *newPr; 2.224 2.225 - Meas_startSpawn 2.226 - semReq = VMS__take_sem_reqst_from( req ); 2.227 + //Meas_startSpawn; 2.228 + semReq = VMS_PI__take_sem_reqst_from( req ); 2.229 2.230 newPr = VCilk__create_procr_helper( semReq->fnPtr, semReq->initData, 2.231 requestingPr, semEnv, semReq->coreToSpawnOnto ); 2.232 @@ -287,7 +287,7 @@ 2.233 2.234 resume_procr( newPr, semEnv ); 2.235 resume_procr( requestingPr, semEnv ); 2.236 - Meas_endSpawn 2.237 + //Meas_endSpawn; 2.238 } 2.239 2.240 2.241 @@ -297,9 +297,9 @@ 2.242 *-- if set, then resume the parentVP. 2.243 */ 2.244 void inline 2.245 -handleDissipate( VirtProcr *requestingPr, VCilkSemEnv *semEnv ) 2.246 +handleDissipate( SlaveVP *requestingPr, VCilkSemEnv *semEnv ) 2.247 { 2.248 - VirtProcr * 2.249 + SlaveVP * 2.250 parentPr = ((VCilkSemData *) 2.251 (requestingPr->semanticData))->parentPr; 2.252 if( parentPr == NULL ) //means this is seed processor being dissipated 2.253 @@ -324,17 +324,17 @@ 2.254 } 2.255 } 2.256 2.257 - VMS__free( requestingPr->semanticData ); 2.258 + VMS_PI__free( requestingPr->semanticData ); 2.259 2.260 //Now do normal dissipate 2.261 2.262 //call VMS to free_all AppVP state -- stack and so on 2.263 - VMS__dissipate_procr( requestingPr ); 2.264 + VMS_PI__dissipate_slaveVP( requestingPr ); 2.265 2.266 semEnv->numVirtPr -= 1; 2.267 if( semEnv->numVirtPr == 0 ) 2.268 { //no more work, so shutdown 2.269 - VMS__shutdown(); 2.270 + VMS_SS__shutdown(); 2.271 } 2.272 } 2.273 2.274 @@ -345,7 +345,7 @@ 2.275 * end-label. Else, sets flag and resumes normally. 2.276 */ 2.277 void inline 2.278 -handleStartSingleton_helper( VCilkSingleton *singleton, VirtProcr *reqstingPr, 2.279 +handleStartSingleton_helper( VCilkSingleton *singleton, SlaveVP *reqstingPr, 2.280 VCilkSemEnv *semEnv ) 2.281 { 2.282 if( singleton->hasFinished ) 2.283 @@ -368,7 +368,7 @@ 2.284 } 2.285 } 2.286 void inline 2.287 -handleStartFnSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.288 +handleStartFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.289 VCilkSemEnv *semEnv ) 2.290 { VCilkSingleton *singleton; 2.291 2.292 @@ -376,13 +376,13 @@ 2.293 handleStartSingleton_helper( singleton, requestingPr, semEnv ); 2.294 } 2.295 void inline 2.296 -handleStartDataSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.297 +handleStartDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.298 VCilkSemEnv *semEnv ) 2.299 { VCilkSingleton *singleton; 2.300 2.301 if( *(semReq->singletonPtrAddr) == NULL ) 2.302 - { singleton = VMS__malloc( sizeof(VCilkSingleton) ); 2.303 - singleton->waitQ = makeVMSPrivQ(); 2.304 + { singleton = VMS_PI__malloc( sizeof(VCilkSingleton) ); 2.305 + singleton->waitQ = makePrivQ(); 2.306 singleton->endInstrAddr = 0x0; 2.307 singleton->hasBeenStarted = FALSE; 2.308 singleton->hasFinished = FALSE; 2.309 @@ -395,16 +395,16 @@ 2.310 2.311 2.312 void inline 2.313 -handleEndSingleton_helper( VCilkSingleton *singleton, VirtProcr *requestingPr, 2.314 +handleEndSingleton_helper( VCilkSingleton *singleton, SlaveVP *requestingPr, 2.315 VCilkSemEnv *semEnv ) 2.316 { PrivQueueStruc *waitQ; 2.317 int32 numWaiting, i; 2.318 - VirtProcr *resumingPr; 2.319 + SlaveVP *resumingPr; 2.320 2.321 if( singleton->hasFinished ) 2.322 { //by definition, only one slave should ever be able to run end singleton 2.323 // so if this is true, is an error 2.324 - //VMS__throw_exception( "singleton code ran twice", requestingPr, NULL); 2.325 + //VMS_PI__throw_exception( "singleton code ran twice", requestingPr, NULL); 2.326 } 2.327 2.328 singleton->hasFinished = TRUE; 2.329 @@ -421,7 +421,7 @@ 2.330 2.331 } 2.332 void inline 2.333 -handleEndFnSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.334 +handleEndFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.335 VCilkSemEnv *semEnv ) 2.336 { 2.337 VCilkSingleton *singleton; 2.338 @@ -430,7 +430,7 @@ 2.339 handleEndSingleton_helper( singleton, requestingPr, semEnv ); 2.340 } 2.341 void inline 2.342 -handleEndDataSingleton( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.343 +handleEndDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.344 VCilkSemEnv *semEnv ) 2.345 { 2.346 VCilkSingleton *singleton; 2.347 @@ -444,7 +444,7 @@ 2.348 * pointer out of the request and call it, then resume the VP. 2.349 */ 2.350 void inline 2.351 -handleAtomic( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.352 +handleAtomic( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.353 VCilkSemEnv *semEnv ) 2.354 { 2.355 semReq->fnToExecInMaster( semReq->dataForFn ); 2.356 @@ -466,7 +466,7 @@ 2.357 *If NULL, then write requesting into the field and resume. 2.358 */ 2.359 void inline 2.360 -handleTransStart( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.361 +handleTransStart( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.362 VCilkSemEnv *semEnv ) 2.363 { VCilkSemData *semData; 2.364 TransListElem *nextTransElem; 2.365 @@ -475,12 +475,12 @@ 2.366 semData = requestingPr->semanticData; 2.367 if( semData->highestTransEntered > semReq->transID ) 2.368 { //throw VMS exception, which shuts down VMS. 2.369 - VMS__throw_exception( "transID smaller than prev", requestingPr, NULL); 2.370 + VMS_PI__throw_exception( "transID smaller than prev", requestingPr, NULL); 2.371 } 2.372 //add this trans ID to the list of transactions entered -- check when 2.373 // end a transaction 2.374 semData->highestTransEntered = semReq->transID; 2.375 - nextTransElem = VMS__malloc( sizeof(TransListElem) ); 2.376 + nextTransElem = VMS_PI__malloc( sizeof(TransListElem) ); 2.377 nextTransElem->transID = semReq->transID; 2.378 nextTransElem->nextTrans = semData->lastTransEntered; 2.379 semData->lastTransEntered = nextTransElem; 2.380 @@ -517,10 +517,10 @@ 2.381 * resume both. 2.382 */ 2.383 void inline 2.384 -handleTransEnd( VCilkSemReq *semReq, VirtProcr *requestingPr, 2.385 +handleTransEnd( VCilkSemReq *semReq, SlaveVP *requestingPr, 2.386 VCilkSemEnv *semEnv ) 2.387 { VCilkSemData *semData; 2.388 - VirtProcr *waitingPr; 2.389 + SlaveVP *waitingPr; 2.390 VCilkTrans *transStruc; 2.391 TransListElem *lastTrans; 2.392 2.393 @@ -529,7 +529,7 @@ 2.394 //make sure transaction ended in same VP as started it. 2.395 if( transStruc->VPCurrentlyExecuting != requestingPr ) 2.396 { 2.397 - VMS__throw_exception( "trans ended in diff VP", requestingPr, NULL ); 2.398 + VMS_PI__throw_exception( "trans ended in diff VP", requestingPr, NULL ); 2.399 } 2.400 2.401 //make sure nesting is correct -- last ID entered should == this ID 2.402 @@ -537,7 +537,7 @@ 2.403 lastTrans = semData->lastTransEntered; 2.404 if( lastTrans->transID != semReq->transID ) 2.405 { 2.406 - VMS__throw_exception( "trans incorrectly nested", requestingPr, NULL ); 2.407 + VMS_PI__throw_exception( "trans incorrectly nested", requestingPr, NULL ); 2.408 } 2.409 2.410 semData->lastTransEntered = semData->lastTransEntered->nextTrans;
3.1 --- a/VCilk_lib.c Thu Jun 02 13:54:34 2011 +0200 3.2 +++ b/VCilk_lib.c Tue Jul 09 13:52:57 2013 +0200 3.3 @@ -7,10 +7,10 @@ 3.4 #include <stdio.h> 3.5 #include <stdlib.h> 3.6 3.7 -#include "VMS/VMS.h" 3.8 +#include "VMS_impl/VMS.h" 3.9 #include "VCilk.h" 3.10 -#include "VMS/Queue_impl/PrivateQueue.h" 3.11 -#include "VMS/Hash_impl/PrivateHash.h" 3.12 +#include "Queue_impl/PrivateQueue.h" 3.13 +#include "Hash_impl/PrivateHash.h" 3.14 3.15 3.16 //========================================================================== 3.17 @@ -72,7 +72,9 @@ 3.18 * sense for later.. 3.19 */ 3.20 3.21 - 3.22 +inline SlaveVP * 3.23 +VCilk__create_procr_helper( TopLevelFnPtr fnPtr, void *initData, 3.24 + SlaveVP *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto ); 3.25 3.26 //=========================================================================== 3.27 3.28 @@ -90,9 +92,9 @@ 3.29 * any of the data reachable from initData passed in to here 3.30 */ 3.31 void 3.32 -VCilk__create_seed_procr_and_do_work( VirtProcrFnPtr fnPtr, void *initData ) 3.33 +VCilk__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData ) 3.34 { VCilkSemEnv *semEnv; 3.35 - VirtProcr *seedPr; 3.36 + SlaveVP *seedPr; 3.37 3.38 #ifdef SEQUENTIAL 3.39 VCilk__init_Seq(); //debug sequential exe 3.40 @@ -103,13 +105,13 @@ 3.41 3.42 //VCilk starts with one processor, which is put into initial environ, 3.43 // and which then calls create() to create more, thereby expanding work 3.44 - seedPr = (VirtProcr*)VCilk__create_procr_helper( fnPtr, initData, NULL, semEnv, -1 ); 3.45 + seedPr = VCilk__create_procr_helper( fnPtr, initData, NULL, semEnv, -1 ); 3.46 resume_procr( seedPr, semEnv ); 3.47 3.48 #ifdef SEQUENTIAL 3.49 VMS__start_the_work_then_wait_until_done_Seq(); //debug sequential exe 3.50 #else 3.51 - VMS__start_the_work_then_wait_until_done(); //normal multi-thd 3.52 + VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd 3.53 #endif 3.54 3.55 VCilk__cleanup_at_end_of_shutdown(); 3.56 @@ -125,7 +127,7 @@ 3.57 int32 3.58 VCilk__giveIdealNumWorkUnits() 3.59 { 3.60 - return NUM_SCHED_SLOTS * NUM_CORES; 3.61 + return NUM_ANIM_SLOTS * NUM_CORES; 3.62 } 3.63 3.64 /*To measure how long a primitive operation takes, when calculating number of 3.65 @@ -171,7 +173,7 @@ 3.66 void 3.67 VCilk__init() 3.68 { 3.69 - VMS__init(); 3.70 + VMS_SS__init(); 3.71 //masterEnv, a global var, now is partially set up by init_VMS 3.72 3.73 VCilk__init_Helper(); 3.74 @@ -181,7 +183,7 @@ 3.75 void 3.76 VCilk__init_Seq() 3.77 { 3.78 - VMS__init_Seq(); 3.79 + VMS_SS__init_Seq(); 3.80 //masterEnv, a global var, now is partially set up by init_VMS 3.81 3.82 VCilk__init_Helper(); 3.83 @@ -198,27 +200,28 @@ 3.84 3.85 //Hook up the semantic layer's plug-ins to the Master virt procr 3.86 _VMSMasterEnv->requestHandler = &VCilk__Request_Handler; 3.87 - _VMSMasterEnv->slaveScheduler = &VCilk__schedule_virt_procr; 3.88 + _VMSMasterEnv->slaveAssigner = &VCilk__schedule_virt_procr; 3.89 3.90 //create the semantic layer's environment (all its data) and add to 3.91 // the master environment 3.92 - semanticEnv = VMS__malloc( sizeof( VCilkSemEnv ) ); 3.93 + semanticEnv = VMS_PI__malloc( sizeof( VCilkSemEnv ) ); 3.94 _VMSMasterEnv->semanticEnv = semanticEnv; 3.95 3.96 //create the ready queue, hash tables used for pairing send to receive 3.97 // and so forth 3.98 //TODO: add hash tables for pairing sends with receives, and 3.99 // initialize the data ownership system 3.100 - readyVPQs = VMS__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); 3.101 + readyVPQs = VMS_PI__malloc( NUM_CORES * sizeof(PrivQueueStruc *) ); 3.102 3.103 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) 3.104 { 3.105 - readyVPQs[ coreIdx ] = makeVMSPrivQ(); 3.106 + readyVPQs[ coreIdx ] = makePrivQ(); 3.107 } 3.108 3.109 semanticEnv->readyVPQs = readyVPQs; 3.110 3.111 semanticEnv->nextCoreToGetNewPr = 0; 3.112 + semanticEnv->numVirtPr = 0; 3.113 3.114 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit 3.115 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( ); 3.116 @@ -230,8 +233,8 @@ 3.117 semanticEnv->fnSingletons[i].endInstrAddr = NULL; 3.118 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE; 3.119 semanticEnv->fnSingletons[i].hasFinished = FALSE; 3.120 - semanticEnv->fnSingletons[i].waitQ = makeVMSPrivQ(); 3.121 - semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSPrivQ(); 3.122 + semanticEnv->fnSingletons[i].waitQ = makePrivQ(); 3.123 + semanticEnv->transactionStrucs[i].waitingVPQ = makePrivQ(); 3.124 } 3.125 3.126 } 3.127 @@ -246,18 +249,18 @@ 3.128 3.129 semanticEnv = _VMSMasterEnv->semanticEnv; 3.130 3.131 - /* 3.132 + 3.133 int32 coreIdx; 3.134 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) 3.135 { 3.136 - VMS__free( semanticEnv->readyVPQs[coreIdx]->startOfData ); 3.137 - VMS__free( semanticEnv->readyVPQs[coreIdx] ); 3.138 + VMS_PI__free( semanticEnv->readyVPQs[coreIdx]->startOfData ); 3.139 + VMS_PI__free( semanticEnv->readyVPQs[coreIdx] ); 3.140 } 3.141 - VMS__free( semanticEnv->readyVPQs ); 3.142 + VMS_PI__free( semanticEnv->readyVPQs ); 3.143 3.144 - VMS__free( _VMSMasterEnv->semanticEnv ); 3.145 - */ 3.146 - VMS__cleanup_at_end_of_shutdown(); 3.147 + VMS_PI__free( _VMSMasterEnv->semanticEnv ); 3.148 + 3.149 + VMS_SS__cleanup_at_end_of_shutdown(); 3.150 } 3.151 3.152 3.153 @@ -268,8 +271,8 @@ 3.154 * allocates, so has to be done inside master 3.155 */ 3.156 void inline 3.157 -VCilk__spawn( int32 coreToSpawnOnto, VirtProcrFnPtr fnPtr, 3.158 - void *initData, VirtProcr *requestingPr ) 3.159 +VCilk__spawn( int32 coreToSpawnOnto, TopLevelFnPtr fnPtr, 3.160 + void *initData, SlaveVP *requestingPr ) 3.161 { VCilkSemReq reqData; 3.162 3.163 //the semantic request data is on the stack and disappears when this 3.164 @@ -281,7 +284,7 @@ 3.165 reqData.initData = initData; 3.166 reqData.requestingPr = requestingPr; 3.167 3.168 - VMS__send_create_procr_req( &reqData, requestingPr ); 3.169 + VMS_WL__send_create_slaveVP_req( &reqData, requestingPr ); 3.170 } 3.171 3.172 3.173 @@ -296,35 +299,35 @@ 3.174 /*This runs inside slave VP, so can't do any freeing -- have to do in plugin 3.175 */ 3.176 void inline 3.177 -VCilk__dissipate_procr( VirtProcr *procrToDissipate ) 3.178 +VCilk__dissipate_procr( SlaveVP *procrToDissipate ) 3.179 { 3.180 3.181 - VMS__send_dissipate_req( procrToDissipate ); 3.182 + VMS_WL__send_dissipate_req( procrToDissipate ); 3.183 } 3.184 3.185 //=========================================================================== 3.186 3.187 void 3.188 -VCilk__sync( VirtProcr *animPr ) 3.189 +VCilk__sync( SlaveVP *animPr ) 3.190 { VCilkSemReq reqData; 3.191 3.192 reqData.reqType = syncReq; 3.193 reqData.requestingPr = animPr; 3.194 3.195 - VMS__send_sem_request( &reqData, animPr ); 3.196 + VMS_WL__send_sem_request( &reqData, animPr ); 3.197 } 3.198 3.199 3.200 3.201 void * 3.202 -VCilk__malloc( int32 sizeToMalloc, VirtProcr *animPr ) 3.203 +VCilk__malloc( int32 sizeToMalloc, SlaveVP *animPr ) 3.204 { VCilkSemReq reqData; 3.205 3.206 reqData.reqType = mallocReq; 3.207 reqData.requestingPr = animPr; 3.208 reqData.sizeToMalloc = sizeToMalloc; 3.209 3.210 - VMS__send_sem_request( &reqData, animPr ); 3.211 + VMS_WL__send_sem_request( &reqData, animPr ); 3.212 3.213 return animPr->dataRetFromReq; 3.214 } 3.215 @@ -333,14 +336,14 @@ 3.216 /*Sends request to Master, which does the work of freeing 3.217 */ 3.218 void 3.219 -VCilk__free( void *ptrToFree, VirtProcr *animPr ) 3.220 +VCilk__free( void *ptrToFree, SlaveVP *animPr ) 3.221 { VCilkSemReq reqData; 3.222 3.223 reqData.reqType = freeReq; 3.224 reqData.requestingPr = animPr; 3.225 reqData.ptrToFree = ptrToFree; 3.226 3.227 - VMS__send_sem_request( &reqData, animPr ); 3.228 + VMS_WL__send_sem_request( &reqData, animPr ); 3.229 } 3.230 3.231 //=========================================================================== 3.232 @@ -355,11 +358,16 @@ 3.233 * trying to get the data through from different cores. 3.234 */ 3.235 3.236 + 3.237 +/*asm function declarations*/ 3.238 +void asm_save_ret_to_singleton(VCilkSingleton *singletonPtrAddr); 3.239 +void asm_write_ret_from_singleton(VCilkSingleton *singletonPtrAddr); 3.240 + 3.241 /*Fn singleton uses ID as index into array of singleton structs held in the 3.242 * semantic environment. 3.243 */ 3.244 void 3.245 -VCilk__start_fn_singleton( int32 singletonID, VirtProcr *animPr ) 3.246 +VCilk__start_fn_singleton( int32 singletonID, SlaveVP *animPr ) 3.247 { 3.248 VCilkSemReq reqData; 3.249 3.250 @@ -367,15 +375,11 @@ 3.251 reqData.reqType = singleton_fn_start; 3.252 reqData.singletonID = singletonID; 3.253 3.254 - VMS__send_sem_request( &reqData, animPr ); 3.255 + VMS_WL__send_sem_request( &reqData, animPr ); 3.256 if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton 3.257 { 3.258 - asm volatile("movl %0, %%eax; \ 3.259 - jmp *%%eax" \ 3.260 - /* outputs */ : \ 3.261 - /* inputs */ : "g"(animPr->dataRetFromReq) \ 3.262 - /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx","%edi","%esi"\ 3.263 - ); 3.264 + VCilkSemEnv *semEnv = VMS_PI__give_sem_env_for( animPr ); 3.265 + asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID])); 3.266 } 3.267 } 3.268 3.269 @@ -384,7 +388,7 @@ 3.270 * location. 3.271 */ 3.272 void 3.273 -VCilk__start_data_singleton( VCilkSingleton **singletonAddr, VirtProcr *animPr ) 3.274 +VCilk__start_data_singleton( VCilkSingleton **singletonAddr, SlaveVP *animPr ) 3.275 { 3.276 VCilkSemReq reqData; 3.277 3.278 @@ -394,20 +398,13 @@ 3.279 reqData.reqType = singleton_data_start; 3.280 reqData.singletonPtrAddr = singletonAddr; 3.281 3.282 - VMS__send_sem_request( &reqData, animPr ); 3.283 + VMS_WL__send_sem_request( &reqData, animPr ); 3.284 if( animPr->dataRetFromReq ) //either 0 or end singleton's return addr 3.285 { //Assembly code changes the return addr on the stack to the one 3.286 // saved into the singleton by the end-singleton-fn 3.287 //The return addr is at 0x4(%%ebp) 3.288 - JmpToEndSingleton: 3.289 - asm volatile("movl %0, %%eax; \ 3.290 - movl (%%eax), %%ebx; \ 3.291 - movl (%%ebx), %%eax; \ 3.292 - movl %%eax, 0x4(%%ebp);" \ 3.293 - /* outputs */ : \ 3.294 - /* inputs */ : "m"(singletonAddr) \ 3.295 - /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx","%edi","%esi"\ 3.296 - ); 3.297 + JmpToEndSingleton: 3.298 + asm_write_ret_from_singleton(*singletonAddr); 3.299 } 3.300 //now, simply return 3.301 //will exit either from the start singleton call or the end-singleton call 3.302 @@ -420,26 +417,26 @@ 3.303 * inside is shared by all invocations of a given singleton ID. 3.304 */ 3.305 void 3.306 -VCilk__end_fn_singleton( int32 singletonID, VirtProcr *animPr ) 3.307 +VCilk__end_fn_singleton( int32 singletonID, SlaveVP *animPr ) 3.308 { 3.309 VCilkSemReq reqData; 3.310 3.311 //don't need this addr until after at least one singleton has reached 3.312 // this function 3.313 - VCilkSemEnv *semEnv = VMS__give_sem_env_for( animPr ); 3.314 - semEnv->fnSingletons[ singletonID].endInstrAddr = &&EndSingletonInstrAddr; 3.315 + VCilkSemEnv *semEnv = VMS_PI__give_sem_env_for( animPr ); 3.316 + asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID])); 3.317 3.318 reqData.reqType = singleton_fn_end; 3.319 reqData.singletonID = singletonID; 3.320 3.321 - VMS__send_sem_request( &reqData, animPr ); 3.322 + VMS_WL__send_sem_request( &reqData, animPr ); 3.323 3.324 EndSingletonInstrAddr: 3.325 return; 3.326 } 3.327 3.328 void 3.329 -VCilk__end_data_singleton( VCilkSingleton **singletonPtrAddr, VirtProcr *animPr ) 3.330 +VCilk__end_data_singleton( VCilkSingleton **singletonPtrAddr, SlaveVP *animPr ) 3.331 { 3.332 VCilkSemReq reqData; 3.333 3.334 @@ -454,19 +451,12 @@ 3.335 //Assembly code takes the return addr off the stack and saves 3.336 // into the singleton. The first field in the singleton is the 3.337 // "endInstrAddr" field, and the return addr is at 0x4(%%ebp) 3.338 - asm volatile("movl 0x4(%%ebp), %%eax; \ 3.339 - movl %0, %%ebx; \ 3.340 - movl (%%ebx), %%ecx; \ 3.341 - movl %%eax, (%%ecx);" \ 3.342 - /* outputs */ : \ 3.343 - /* inputs */ : "m"(singletonPtrAddr) \ 3.344 - /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx","%edi","%esi"\ 3.345 - ); 3.346 + asm_save_ret_to_singleton(*singletonPtrAddr); 3.347 3.348 reqData.reqType = singleton_data_end; 3.349 reqData.singletonPtrAddr = singletonPtrAddr; 3.350 3.351 - VMS__send_sem_request( &reqData, animPr ); 3.352 + VMS_WL__send_sem_request( &reqData, animPr ); 3.353 } 3.354 3.355 /*This executes the function in the masterVP, so it executes in isolation 3.356 @@ -481,7 +471,7 @@ 3.357 */ 3.358 void 3.359 VCilk__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster, 3.360 - void *data, VirtProcr *animPr ) 3.361 + void *data, SlaveVP *animPr ) 3.362 { 3.363 VCilkSemReq reqData; 3.364 3.365 @@ -490,7 +480,7 @@ 3.366 reqData.fnToExecInMaster = ptrToFnToExecInMaster; 3.367 reqData.dataForFn = data; 3.368 3.369 - VMS__send_sem_request( &reqData, animPr ); 3.370 + VMS_WL__send_sem_request( &reqData, animPr ); 3.371 } 3.372 3.373 3.374 @@ -508,7 +498,7 @@ 3.375 *If NULL, then write requesting into the field and resume. 3.376 */ 3.377 void 3.378 -VCilk__start_transaction( int32 transactionID, VirtProcr *animPr ) 3.379 +VCilk__start_transaction( int32 transactionID, SlaveVP *animPr ) 3.380 { 3.381 VCilkSemReq reqData; 3.382 3.383 @@ -516,7 +506,7 @@ 3.384 reqData.reqType = trans_start; 3.385 reqData.transID = transactionID; 3.386 3.387 - VMS__send_sem_request( &reqData, animPr ); 3.388 + VMS_WL__send_sem_request( &reqData, animPr ); 3.389 } 3.390 3.391 /*This suspends to the master, then uses transactionID as index into an 3.392 @@ -529,7 +519,7 @@ 3.393 * resumes both. 3.394 */ 3.395 void 3.396 -VCilk__end_transaction( int32 transactionID, VirtProcr *animPr ) 3.397 +VCilk__end_transaction( int32 transactionID, SlaveVP *animPr ) 3.398 { 3.399 VCilkSemReq reqData; 3.400 3.401 @@ -537,5 +527,5 @@ 3.402 reqData.reqType = trans_end; 3.403 reqData.transID = transactionID; 3.404 3.405 - VMS__send_sem_request( &reqData, animPr ); 3.406 + VMS_WL__send_sem_request( &reqData, animPr ); 3.407 }