Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
diff VMS.c @ 53:42dd44df1bb0
Init changed to only use VMS__malloc & uses VMS__malloc versions of utilities
| author | Me |
|---|---|
| date | Mon, 01 Nov 2010 21:21:32 -0700 |
| parents | f59cfa31a579 |
| children | f8508572f3de |
line diff
1.1 --- a/VMS.c Sat Oct 30 21:53:55 2010 -0700 1.2 +++ b/VMS.c Mon Nov 01 21:21:32 2010 -0700 1.3 @@ -33,6 +33,8 @@ 1.4 MallocProlog * 1.5 create_free_list(); 1.6 1.7 +void 1.8 +endOSThreadFn( void *initData, VirtProcr *animatingPr ); 1.9 1.10 pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER; 1.11 pthread_cond_t suspend_cond = PTHREAD_COND_INITIALIZER; 1.12 @@ -89,55 +91,55 @@ 1.13 int coreIdx; 1.14 VirtProcr **masterVPs; 1.15 SchedSlot ***allSchedSlots; //ptr to array of ptrs 1.16 - 1.17 + 1.18 + 1.19 //Make the master env, which holds everything else 1.20 _VMSMasterEnv = malloc( sizeof(MasterEnv) ); 1.21 + 1.22 + //Very first thing put into the master env is the free-list, seeded 1.23 + // with a massive initial chunk of memory. 1.24 + //After this, all other mallocs are VMS__malloc. 1.25 + _VMSMasterEnv->freeListHead = VMS_ext__create_free_list(); 1.26 + 1.27 + //===================== Only VMS__malloc after this ==================== 1.28 masterEnv = _VMSMasterEnv; 1.29 - //Need to set start pt here 'cause used by seed procr, which is created 1.30 - // before the first core loop starts up. -- not sure how yet.. 1.31 -// masterEnv->coreLoopStartPt = ; 1.32 -// masterEnv->coreLoopEndPt = ; 1.33 1.34 //Make a readyToAnimateQ for each core loop 1.35 - readyToAnimateQs = malloc( NUM_CORES * sizeof(SRSWQueueStruc *) ); 1.36 - masterVPs = malloc( NUM_CORES * sizeof(VirtProcr *) ); 1.37 + readyToAnimateQs = VMS__malloc( NUM_CORES * sizeof(SRSWQueueStruc *) ); 1.38 + masterVPs = VMS__malloc( NUM_CORES * sizeof(VirtProcr *) ); 1.39 1.40 //One array for each core, 3 in array, core's masterVP scheds all 1.41 - allSchedSlots = malloc( NUM_CORES * sizeof(SchedSlot *) ); 1.42 + allSchedSlots = VMS__malloc( NUM_CORES * sizeof(SchedSlot *) ); 1.43 1.44 + _VMSMasterEnv->numProcrsCreated = 0; //used by create procr 1.45 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ ) 1.46 - { //running in main thread -- normal malloc inside makeSRSWQ 1.47 + { 1.48 readyToAnimateQs[ coreIdx ] = makeSRSWQ(); 1.49 1.50 //Q: should give masterVP core-specific info as its init data? 1.51 - masterVPs[ coreIdx ] = VMS_ext__create_procr( &masterLoop, masterEnv ); 1.52 + masterVPs[ coreIdx ] = VMS__create_procr( &masterLoop, masterEnv ); 1.53 masterVPs[ coreIdx ]->coreAnimatedBy = coreIdx; 1.54 allSchedSlots[ coreIdx ] = create_sched_slots(); //makes for one core 1.55 - _VMSMasterEnv->numMasterInARow[ coreIdx ] = FALSE; 1.56 + _VMSMasterEnv->numMasterInARow[ coreIdx ] = 0; 1.57 } 1.58 _VMSMasterEnv->readyToAnimateQs = readyToAnimateQs; 1.59 _VMSMasterEnv->masterVPs = masterVPs; 1.60 _VMSMasterEnv->masterLock = UNLOCKED; 1.61 _VMSMasterEnv->allSchedSlots = allSchedSlots; 1.62 - _VMSMasterEnv->numProcrsCreated = 0; 1.63 1.64 1.65 //Aug 19, 2010: no longer need to place initial masterVP into queue 1.66 // because coreLoop now controls -- animates its masterVP when no work 1.67 1.68 - _VMSMasterEnv->freeListHead = VMS__create_free_list(); 1.69 - _VMSMasterEnv->amtOfOutstandingMem = 0; //none allocated yet 1.70 1.71 //============================= MEASUREMENT STUFF ======================== 1.72 #ifdef STATS__TURN_ON_PROBES 1.73 - //creates intervalProbes array and sets pointer to it in masterEnv too 1.74 _VMSMasterEnv->dynIntervalProbesInfo = 1.75 - makeDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 20 ); 1.76 + makePrivDynArrayOfSize( &(_VMSMasterEnv->intervalProbes), 200); 1.77 1.78 - _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, NULL ); 1.79 - _VMSMasterEnv->masterCreateProbeID = 1.80 - VMS_ext__record_time_point_into_new_probe( "masterCreateProbe" ); 1.81 - //Also put creation time directly into master env, for fast retrieval 1.82 + _VMSMasterEnv->probeNameHashTbl = makeHashTable( 1000, &VMS__free ); 1.83 + 1.84 + //put creation time directly into master env, for fast retrieval 1.85 struct timeval timeStamp; 1.86 gettimeofday( &(timeStamp), NULL); 1.87 _VMSMasterEnv->createPtInSecs = 1.88 @@ -152,11 +154,11 @@ 1.89 { SchedSlot **schedSlots; 1.90 int i; 1.91 1.92 - schedSlots = malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); 1.93 + schedSlots = VMS__malloc( NUM_SCHED_SLOTS * sizeof(SchedSlot *) ); 1.94 1.95 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) 1.96 { 1.97 - schedSlots[i] = malloc( sizeof(SchedSlot) ); 1.98 + schedSlots[i] = VMS__malloc( sizeof(SchedSlot) ); 1.99 1.100 //Set state to mean "handling requests done, slot needs filling" 1.101 schedSlots[i]->workIsDone = FALSE; 1.102 @@ -171,9 +173,9 @@ 1.103 { int i; 1.104 for( i = 0; i < NUM_SCHED_SLOTS; i++ ) 1.105 { 1.106 - free( schedSlots[i] ); 1.107 + VMS__free( schedSlots[i] ); 1.108 } 1.109 - free( schedSlots ); 1.110 + VMS__free( schedSlots ); 1.111 } 1.112 1.113 1.114 @@ -191,7 +193,7 @@ 1.115 1.116 //Make the threads that animate the core loops 1.117 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) 1.118 - { coreLoopThdParams[coreIdx] = malloc( sizeof(ThdParams) ); 1.119 + { coreLoopThdParams[coreIdx] = VMS__malloc( sizeof(ThdParams) ); 1.120 coreLoopThdParams[coreIdx]->coreNum = coreIdx; 1.121 1.122 retCode = 1.123 @@ -263,11 +265,12 @@ 1.124 { 1.125 char *stackPtr; 1.126 1.127 - newPr->procrID = _VMSMasterEnv->numProcrsCreated++; 1.128 - newPr->nextInstrPt = fnPtr; 1.129 - newPr->initialData = initialData; 1.130 - newPr->requests = NULL; 1.131 - newPr->schedSlot = NULL; 1.132 + newPr->startOfStack = stackLocs; 1.133 + newPr->procrID = _VMSMasterEnv->numProcrsCreated++; 1.134 + newPr->nextInstrPt = fnPtr; 1.135 + newPr->initialData = initialData; 1.136 + newPr->requests = NULL; 1.137 + newPr->schedSlot = NULL; 1.138 1.139 //fnPtr takes two params -- void *initData & void *animProcr 1.140 //alloc stack locations, make stackPtr be the highest addr minus room 1.141 @@ -301,7 +304,6 @@ 1.142 stackLocs = VMS__malloc( VIRT_PROCR_STACK_SIZE ); 1.143 if( stackLocs == 0 ) 1.144 { perror("VMS__malloc stack"); exit(1); } 1.145 - newPr->startOfStack = stackLocs; 1.146 1.147 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); 1.148 } 1.149 @@ -319,7 +321,6 @@ 1.150 stackLocs = malloc( VIRT_PROCR_STACK_SIZE ); 1.151 if( stackLocs == 0 ) 1.152 { perror("malloc stack"); exit(1); } 1.153 - newPr->startOfStack = stackLocs; 1.154 1.155 return create_procr_helper( newPr, fnPtr, initialData, stackLocs ); 1.156 } 1.157 @@ -445,7 +446,7 @@ 1.158 * pears -- making that suspend the last thing in the virt procr's trace. 1.159 */ 1.160 void 1.161 -VMS__dissipate_procr( VirtProcr *procrToDissipate ) 1.162 +VMS__send_dissipate_req( VirtProcr *procrToDissipate ) 1.163 { VMSReqst req; 1.164 1.165 req.reqType = dissipate; 1.166 @@ -477,18 +478,26 @@ 1.167 1.168 1.169 1.170 -/*This inserts the semantic-layer's request data into standard VMS carrier 1.171 - * request data-struct is allocated on stack of this call & ptr to it sent 1.172 - * to plugin 1.173 +/*This call's name indicates that request is malloc'd -- so req handler 1.174 + * has to free any extra requests tacked on before a send, using this. 1.175 + * 1.176 + * This inserts the semantic-layer's request data into standard VMS carrier 1.177 + * request data-struct that is mallocd. The sem request doesn't need to 1.178 + * be malloc'd if this is called inside the same call chain before the 1.179 + * send of the last request is called. 1.180 + * 1.181 + *The request handler has to call VMS__free_VMSReq for any of these 1.182 */ 1.183 inline void 1.184 -VMS__add_sem_request( void *semReqData, VirtProcr *callingPr ) 1.185 - { VMSReqst req; 1.186 +VMS__add_sem_request_in_mallocd_VMSReqst( void *semReqData, 1.187 + VirtProcr *callingPr ) 1.188 + { VMSReqst *req; 1.189 1.190 - req.reqType = semantic; 1.191 - req.semReqData = semReqData; 1.192 - req.nextReqst = callingPr->requests; 1.193 - callingPr->requests = &req; 1.194 + req = VMS__malloc( sizeof(VMSReqst) ); 1.195 + req->reqType = semantic; 1.196 + req->semReqData = semReqData; 1.197 + req->nextReqst = callingPr->requests; 1.198 + callingPr->requests = req; 1.199 } 1.200 1.201 /*This inserts the semantic-layer's request data into standard VMS carrier 1.202 @@ -573,10 +582,12 @@ 1.203 memcpy( newProbe->nameStr, semReq->nameStr, nameLen ); 1.204 newProbe->hist = NULL; 1.205 newProbe->schedChoiceWasRecorded = FALSE; 1.206 + 1.207 + //This runs in masterVP, so no race-condition worries 1.208 newProbe->probeID = 1.209 addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); 1.210 1.211 - requestingPr->dataReturnedFromReq = newProbe; 1.212 + requestingPr->dataRetFromReq = newProbe; 1.213 1.214 (*resumePrFnPtr)( requestingPr, semEnv ); 1.215 } 1.216 @@ -601,7 +612,7 @@ 1.217 * of dis-owning it. 1.218 */ 1.219 void 1.220 -VMS__handle_dissipate_reqst( VirtProcr *animatingPr ) 1.221 +VMS__dissipate_procr( VirtProcr *animatingPr ) 1.222 { 1.223 //dis-own all locations owned by this processor, causing to be freed 1.224 // any locations that it is (was) sole owner of 1.225 @@ -619,15 +630,13 @@ 1.226 } 1.227 1.228 1.229 -//TODO: re-architect so that have clean separation between request handler 1.230 +//TODO: look at architecting cleanest separation between request handler 1.231 // and master loop, for dissipate, create, shutdown, and other non-semantic 1.232 // requests. Issue is chain: one removes requests from AppVP, one dispatches 1.233 // on type of request, and one handles each type.. but some types require 1.234 // action from both request handler and master loop -- maybe just give the 1.235 // request handler calls like: VMS__handle_X_request_type 1.236 1.237 -void 1.238 -endOSThreadFn( void *initData, VirtProcr *animatingPr ); 1.239 1.240 /*This is called by the semantic layer's request handler when it decides its 1.241 * time to shut down the VMS system. Calling this causes the core loop OS 1.242 @@ -641,10 +650,9 @@ 1.243 * masterVP any AppVPs that might still be allocated and sitting in the 1.244 * semantic environment, or have been orphaned in the _VMSWorkQ. 1.245 * 1.246 - *NOTE: the semantic plug-in is expected to use VMS__malloc_to to get all the 1.247 + *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the 1.248 * locations it needs, and give ownership to masterVP. Then, they will be 1.249 - * automatically freed when the masterVP is dissipated. (This happens after 1.250 - * the core loop threads have all exited) 1.251 + * automatically freed. 1.252 * 1.253 *In here,create one core-loop shut-down processor for each core loop and put 1.254 * them all directly into the readyToAnimateQ. 1.255 @@ -655,7 +663,7 @@ 1.256 * point is it sure that all results have completed. 1.257 */ 1.258 void 1.259 -VMS__handle_shutdown_reqst( void *dummy, VirtProcr *animatingPr ) 1.260 +VMS__shutdown() 1.261 { int coreIdx; 1.262 VirtProcr *shutDownPr; 1.263 1.264 @@ -703,19 +711,19 @@ 1.265 } 1.266 1.267 1.268 -/*This is called after the threads have shut down and control has returned 1.269 - * to the semantic layer, in the entry point function in the main thread. 1.270 - * It has to free anything allocated during VMS_init, and any other alloc'd 1.271 - * locations that might be left over. 1.272 +/*This is called from the startup & shutdown 1.273 */ 1.274 void 1.275 -VMS__cleanup_after_shutdown() 1.276 +VMS__cleanup_at_end_of_shutdown() 1.277 { 1.278 SRSWQueueStruc **readyToAnimateQs; 1.279 int coreIdx; 1.280 VirtProcr **masterVPs; 1.281 SchedSlot ***allSchedSlots; //ptr to array of ptrs 1.282 1.283 + //All the environment data has been allocated with VMS__malloc, so just 1.284 + // free its internal big-chunk and all inside it disappear. 1.285 +/* 1.286 readyToAnimateQs = _VMSMasterEnv->readyToAnimateQs; 1.287 masterVPs = _VMSMasterEnv->masterVPs; 1.288 allSchedSlots = _VMSMasterEnv->allSchedSlots; 1.289 @@ -724,23 +732,23 @@ 1.290 { 1.291 freeSRSWQ( readyToAnimateQs[ coreIdx ] ); 1.292 //master VPs were created external to VMS, so use external free 1.293 - VMS_ext__dissipate_procr( masterVPs[ coreIdx ] ); 1.294 + VMS__dissipate_procr( masterVPs[ coreIdx ] ); 1.295 1.296 freeSchedSlots( allSchedSlots[ coreIdx ] ); 1.297 } 1.298 1.299 - free( _VMSMasterEnv->readyToAnimateQs ); 1.300 - free( _VMSMasterEnv->masterVPs ); 1.301 - free( _VMSMasterEnv->allSchedSlots ); 1.302 + VMS__free( _VMSMasterEnv->readyToAnimateQs ); 1.303 + VMS__free( _VMSMasterEnv->masterVPs ); 1.304 + VMS__free( _VMSMasterEnv->allSchedSlots ); 1.305 1.306 - VMS_ext__free_free_list( _VMSMasterEnv->freeListHead ); 1.307 - 1.308 //============================= MEASUREMENT STUFF ======================== 1.309 #ifdef STATS__TURN_ON_PROBES 1.310 - freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &free ); 1.311 + freeDynArrayDeep( _VMSMasterEnv->dynIntervalProbesInfo, &VMS__free_probe); 1.312 #endif 1.313 //======================================================================== 1.314 - 1.315 - free( _VMSMasterEnv ); 1.316 +*/ 1.317 + //These are the only two that use system free 1.318 + VMS_ext__free_free_list( _VMSMasterEnv->freeListHead ); 1.319 + free( (void *)_VMSMasterEnv ); 1.320 } 1.321
