Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
diff VMS.c @ 24:2b161e1a50ee
1st working version -- as far as can tell due to SEH bugs
| author | Me |
|---|---|
| date | Wed, 07 Jul 2010 13:15:54 -0700 |
| parents | 1dbc7f6e3e67 |
| children | c556193f7211 |
line diff
1.1 --- a/VMS.c Wed Jun 30 13:11:06 2010 -0700 1.2 +++ b/VMS.c Wed Jul 07 13:15:54 2010 -0700 1.3 @@ -71,12 +71,34 @@ 1.4 masterEnv->schedSlots[0]->workIsDone = FALSE; //says don't touch 1.5 masterEnv->schedSlots[0]->procrAssignedToSlot = masterEnv->masterVirtPr; 1.6 masterEnv->masterVirtPr->schedSlot = masterEnv->schedSlots[0]; 1.7 + masterEnv->stillRunning = FALSE; 1.8 1.9 //First core loop to start up gets this, which will schedule seed Pr 1.10 //TODO: debug: check address of masterVirtPr 1.11 writeCASQ( masterEnv->masterVirtPr, workQ ); 1.12 1.13 numProcrsCreated = 1; 1.14 + 1.15 + //======================================================================== 1.16 + // Create the Threads 1.17 + int coreIdx; 1.18 + 1.19 + //Make params given to the win threads that animate the core loops 1.20 + for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) 1.21 + { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) ); 1.22 + coreLoopThdParams[coreIdx]->coreNum = coreIdx; 1.23 + 1.24 + //make the core loop threads, born in suspended state 1.25 + coreLoopThdHandles[ coreIdx ] = 1.26 + CreateThread ( NULL, // Security attributes 1.27 + 0, // Stack size 1.28 + coreLoop, 1.29 + coreLoopThdParams[coreIdx], 1.30 + CREATE_SUSPENDED, 1.31 + &(coreLoopThdIds[coreIdx]) 1.32 + ); 1.33 + } 1.34 + 1.35 } 1.36 1.37 1.38 @@ -103,30 +125,44 @@ 1.39 1.40 /*Semantic layer calls this when it want the system to start running.. 1.41 * 1.42 - *This creates the core loops, pins them to physical cores, gives them the 1.43 - * pointer to the workQ, and starts them running. 1.44 + *This starts the core loops running then waits for them to exit. 1.45 */ 1.46 void 1.47 -VMS__start() 1.48 +VMS__start_the_work_then_wait_until_done() 1.49 { int coreIdx; 1.50 + //Start the core loops running 1.51 +//=========================================================================== 1.52 + LARGE_INTEGER stPerfCount, endPerfCount, countFreq; 1.53 + unsigned long long count = 0, freq = 0; 1.54 + double runTime; 1.55 1.56 - //TODO: Save "orig" stack pointer and frame ptr -- restore in VMS__end() 1.57 - //Create the win threads that animate the core loops 1.58 + QueryPerformanceCounter( &stPerfCount ); 1.59 + 1.60 + //start them running 1.61 + for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) 1.62 + { //Create the threads 1.63 + ResumeThread( coreLoopThdHandles[coreIdx] ); //starts thread 1.64 + } 1.65 + 1.66 + //wait for all to complete 1.67 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) 1.68 { 1.69 - coreLoopThdParams[coreIdx] = (ThdParams *)malloc( sizeof(ThdParams) ); 1.70 - coreLoopThdParams[coreIdx]->coreNum = coreIdx; 1.71 + WaitForSingleObject(coreLoopThdHandles[coreIdx], INFINITE); 1.72 + } 1.73 1.74 - coreLoopThdHandles[coreIdx] = 1.75 - CreateThread ( NULL, // Security attributes 1.76 - 0, // Stack size 1.77 - coreLoop, 1.78 - coreLoopThdParams[coreIdx], 1.79 - CREATE_SUSPENDED, 1.80 - &(coreLoopThdIds[coreIdx]) 1.81 - ); 1.82 - ResumeThread( coreLoopThdHandles[coreIdx] ); //starts thread 1.83 - } 1.84 + //NOTE: do not clean up VMS env here -- semantic layer has to have 1.85 + // a chance to clean up its environment first, then do a call to free 1.86 + // the Master env and rest of VMS locations 1.87 + 1.88 + QueryPerformanceCounter( &endPerfCount ); 1.89 + count = endPerfCount.QuadPart - stPerfCount.QuadPart; 1.90 + 1.91 + QueryPerformanceFrequency( &countFreq ); 1.92 + freq = countFreq.QuadPart; 1.93 + runTime = (double)count / (double)freq; 1.94 + 1.95 + printf("\n Time startup to shutdown: %f\n", runTime); 1.96 + fflush( stdin ); 1.97 } 1.98 1.99 1.100 @@ -283,7 +319,7 @@ 1.101 /*This inserts the semantic-layer's request data into standard VMS carrier 1.102 */ 1.103 inline void 1.104 -VMS__send_sem_request( void *semReqData, VirtProcr *callingPr ) 1.105 +VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) 1.106 { VMSReqst *req; 1.107 1.108 req = malloc( sizeof(VMSReqst) ); 1.109 @@ -295,20 +331,6 @@ 1.110 } 1.111 1.112 1.113 -/*This creates a request of type "dissipate" -- which will cause the virt 1.114 - * processor's state and owned locations to be freed 1.115 - */ 1.116 -inline void 1.117 -VMS__send_dissipate_request( VirtProcr *procrToDissipate ) 1.118 - { VMSReqst *req; 1.119 - 1.120 - req = malloc( sizeof(VMSReqst) ); 1.121 -// req->virtProcrFrom = callingPr; 1.122 - req->reqType = dissipate; 1.123 - req->nextReqst = procrToDissipate->requests; 1.124 - procrToDissipate->requests = req; 1.125 - } 1.126 - 1.127 1.128 //TODO: add a semantic-layer supplied "freer" for the semantic-data portion 1.129 // of a request -- IE call with both a virt procr and a fn-ptr to request 1.130 @@ -322,36 +344,63 @@ 1.131 free( req ); 1.132 } 1.133 1.134 -/*This must be called by the request handler plugin -- it cannot be called 1.135 - * from the semantic library "dissipate processor" function -- instead, the 1.136 - * semantic layer has to generate a request for the plug-in to call this 1.137 - * function. 1.138 - *The reason is that this frees the virtual processor's stack -- which is 1.139 - * still in use inside semantic library calls! 1.140 - * 1.141 - *This frees or recycles all the state owned by and comprising the animating 1.142 - * virtual procr. It frees any state that was malloc'd by the VMS system 1.143 - * itself, and asks the VMS system to dis-own any VMS__malloc'd locations. 1.144 - *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd 1.145 - * state, then that state gets freed (or sent to recycling) as a side-effect 1.146 - * of dis-owning it. 1.147 - */ 1.148 + 1.149 +//TODO: add a semantic-layer supplied "freer" for the semantic-data portion 1.150 +// of a request -- IE call with both a virt procr and a fn-ptr to request 1.151 +// freer (also maybe put sem request freer as a field in virt procr?) 1.152 void 1.153 -VMS__free_procr_locs( VirtProcr *animatingPr ) 1.154 +VMS__free_request( VMSReqst *req ) 1.155 + { 1.156 + free( req ); 1.157 + } 1.158 + 1.159 +VMSReqst * 1.160 +VMS__take_top_request_from( VirtProcr *procrWithReq ) 1.161 + { VMSReqst *req; 1.162 + 1.163 + req = procrWithReq->requests; 1.164 + if( req == NULL ) return req; 1.165 + 1.166 + procrWithReq->requests = procrWithReq->requests->nextReqst; 1.167 + return req; 1.168 + } 1.169 + 1.170 +inline int 1.171 +VMS__isSemanticReqst( VMSReqst *req ) 1.172 { 1.173 - //dis-own all locations owned by this processor, causing to be freed 1.174 - // any locations that it is (was) sole owner of 1.175 - //TODO: implement VMS__malloc system, including "give up ownership" 1.176 + return ( req->reqType == semantic ); 1.177 + } 1.178 1.179 - VMS__remove_and_free_top_request( animatingPr ); 1.180 - free( animatingPr->startOfStack ); 1.181 - 1.182 - //NOTE: animatingPr->semanticData should either have been allocated 1.183 - // with VMS__malloc, or else freed in the request handler plug-in. 1.184 - //NOTE: initialData was given to the processor, so should either have 1.185 - // been alloc'd with VMS__malloc, or freed by the level above animPr. 1.186 - //So, all that's left to free here is the VirtProcr struc itself 1.187 - free( animatingPr ); 1.188 + 1.189 +inline void * 1.190 +VMS__take_sem_reqst_from( VMSReqst *req ) 1.191 + { 1.192 + return req->semReqData; 1.193 + } 1.194 + 1.195 +inline int 1.196 +VMS__isDissipateReqst( VMSReqst *req ) 1.197 + { 1.198 + return ( req->reqType == dissipate ); 1.199 + } 1.200 + 1.201 +inline int 1.202 +VMS__isCreateReqst( VMSReqst *req ) 1.203 + { 1.204 + return ( req->reqType == regCreated ); 1.205 + } 1.206 + 1.207 +void 1.208 +VMS__send_register_new_procr_request(VirtProcr *newPr, VirtProcr *reqstingPr) 1.209 + { VMSReqst *req; 1.210 + 1.211 + req = malloc( sizeof(VMSReqst) ); 1.212 + req->reqType = regCreated; 1.213 + req->semReqData = newPr; 1.214 + req->nextReqst = reqstingPr->requests; 1.215 + reqstingPr->requests = req; 1.216 + 1.217 + VMS__suspend_procr( reqstingPr ); 1.218 } 1.219 1.220 1.221 @@ -384,6 +433,44 @@ 1.222 } 1.223 1.224 1.225 +/*This must be called by the request handler plugin -- it cannot be called 1.226 + * from the semantic library "dissipate processor" function -- instead, the 1.227 + * semantic layer has to generate a request for the plug-in to call this 1.228 + * function. 1.229 + *The reason is that this frees the virtual processor's stack -- which is 1.230 + * still in use inside semantic library calls! 1.231 + * 1.232 + *This frees or recycles all the state owned by and comprising the VMS 1.233 + * portion of the animating virtual procr. The request handler must first 1.234 + * free any semantic data created for the processor that didn't use the 1.235 + * VMS_malloc mechanism. Then it calls this, which first asks the malloc 1.236 + * system to disown any state that did use VMS_malloc, and then frees the 1.237 + * statck and the processor-struct itself. 1.238 + *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd 1.239 + * state, then that state gets freed (or sent to recycling) as a side-effect 1.240 + * of dis-owning it. 1.241 + */ 1.242 +void 1.243 +VMS__free_procr_locs( VirtProcr *animatingPr ) 1.244 + { 1.245 + //dis-own all locations owned by this processor, causing to be freed 1.246 + // any locations that it is (was) sole owner of 1.247 + //TODO: implement VMS__malloc system, including "give up ownership" 1.248 + 1.249 + //The dissipate request might still be attached, so remove and free it 1.250 + VMS__remove_and_free_top_request( animatingPr ); 1.251 + free( animatingPr->startOfStack ); 1.252 + 1.253 + //NOTE: initialData was given to the processor, so should either have 1.254 + // been alloc'd with VMS__malloc, or freed by the level above animPr. 1.255 + //So, all that's left to free here is the stack and the VirtProcr struc 1.256 + // itself 1.257 + free( animatingPr->startOfStack ); 1.258 + free( animatingPr ); 1.259 + } 1.260 + 1.261 + 1.262 + 1.263 /*This is the function run by the special "shut-down" processor 1.264 * 1.265 *The _VMSMasterEnv is needed by this shut down function, so the "wait" 1.266 @@ -422,11 +509,32 @@ 1.267 // get its request handled before all the cores have shutdown. 1.268 //TODO: after all the threads stop, clean out the MasterEnv, the 1.269 // SemanticEnv, and the workQ before returning. 1.270 - VMS__send_dissipate_request( animatingPr ); 1.271 - VMS__suspend_procr( animatingPr ); //will never come back from this 1.272 + VMS__dissipate_procr( animatingPr ); //will never come back from this 1.273 } 1.274 1.275 1.276 +/*This has to free anything allocated during VMS_init, and any other alloc'd 1.277 + * locations that might be left over. 1.278 + */ 1.279 +void 1.280 +VMS__shutdown() 1.281 + { int i; 1.282 + 1.283 + free( _VMSWorkQ ); 1.284 + free( _VMSMasterEnv->filledSlots ); 1.285 + for( i = 0; i < NUM_SCHED_SLOTS; i++ ) 1.286 + { 1.287 + free( _VMSMasterEnv->schedSlots[i] ); 1.288 + } 1.289 + 1.290 + free( _VMSMasterEnv->schedSlots); 1.291 + VMS__free_procr_locs( _VMSMasterEnv->masterVirtPr ); 1.292 + 1.293 + free( _VMSMasterEnv ); 1.294 + } 1.295 + 1.296 + 1.297 +//=========================================================================== 1.298 1.299 inline TSCount getTSCount() 1.300 { unsigned int low, high;
