Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VMS_impls > VMS__MC_shared_impl
diff VMS.c @ 139:99798e4438a6
Merge of Malloc2 and inter master requests
| author | Merten Sach <msach@mailbox.tu-berlin.de> |
|---|---|
| date | Mon, 19 Sep 2011 16:12:01 +0200 |
| parents | def70e32cf2c 99343ffe1918 |
| children | 2c8f3cf6c058 |
line diff
1.1 --- a/VMS.c Wed Sep 07 19:36:46 2011 +0200 1.2 +++ b/VMS.c Mon Sep 19 16:12:01 2011 +0200 1.3 @@ -13,6 +13,7 @@ 1.4 1.5 #include "VMS.h" 1.6 #include "ProcrContext.h" 1.7 +#include "scheduling.h" 1.8 #include "Queue_impl/BlockingQueue.h" 1.9 #include "Histogram/Histogram.h" 1.10 1.11 @@ -105,8 +106,14 @@ 1.12 //Very first thing put into the master env is the free-list, seeded 1.13 // with a massive initial chunk of memory. 1.14 //After this, all other mallocs are VMS__malloc. 1.15 - _VMSMasterEnv->freeLists = VMS_ext__create_free_list(); 1.16 - 1.17 + int i; 1.18 + for(i=0; i<NUM_CORES; i++) 1.19 + { 1.20 + _VMSMasterEnv->freeLists[i] = VMS_ext__create_free_list(); 1.21 + _VMSMasterEnv->interMasterRequestsFor[i] = NULL; 1.22 + _VMSMasterEnv->interMasterRequestsSentBy[i] = NULL; 1.23 + } 1.24 + _VMSMasterEnv->currentMasterProcrID = 0; 1.25 1.26 //============================= MEASUREMENT STUFF ======================== 1.27 #ifdef MEAS__TIME_MALLOC 1.28 @@ -497,6 +504,19 @@ 1.29 VMS__suspend_procr( callingPr ); 1.30 } 1.31 1.32 +void inline 1.33 +VMS__send_inter_plugin_req( void *reqData, int32 targetMaster, 1.34 + VirtProcr *requestingMaster ) 1.35 + { _VMSMasterEnv->interMasterRequestsFor[targetMaster] = 1.36 + (InterMasterReqst *) reqData; 1.37 + } 1.38 + 1.39 +void inline 1.40 +VMS__send_inter_VMSCore_req( InterVMSCoreReqst *reqData, 1.41 + int32 targetMaster, VirtProcr *requestingMaster ) 1.42 + { _VMSMasterEnv->interMasterRequestsFor[targetMaster] = 1.43 + (InterMasterReqst *) reqData; 1.44 + } 1.45 1.46 /* 1.47 */ 1.48 @@ -542,18 +562,27 @@ 1.49 1.50 semReq = req->semReqData; 1.51 1.52 - newProbe = VMS__malloc( sizeof(IntervalProbe) ); 1.53 - newProbe->nameStr = VMS__strDup( semReq->nameStr ); 1.54 - newProbe->hist = NULL; 1.55 - newProbe->schedChoiceWasRecorded = FALSE; 1.56 + switch(semReq->reqType){ 1.57 + case createProbe: 1.58 + newProbe = VMS__malloc( sizeof(IntervalProbe) ); 1.59 + newProbe->nameStr = VMS__strDup( (char*)semReq->data ); 1.60 + newProbe->hist = NULL; 1.61 + newProbe->schedChoiceWasRecorded = FALSE; 1.62 1.63 - //This runs in masterVP, so no race-condition worries 1.64 - newProbe->probeID = 1.65 - addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); 1.66 - 1.67 - requestingPr->dataRetFromReq = newProbe; 1.68 - 1.69 - (*resumePrFnPtr)( requestingPr, semEnv ); 1.70 + //This runs in masterVP, so no race-condition worries 1.71 + newProbe->probeID = 1.72 + addToDynArray( newProbe, _VMSMasterEnv->dynIntervalProbesInfo ); 1.73 + requestingPr->dataRetFromReq = newProbe; 1.74 + break; 1.75 + case interMasterReqst: 1.76 + sendInterMasterReqst(semReq->receiverID, 1.77 + (InterMasterReqst*)semReq->data); 1.78 + break; 1.79 + default: 1.80 + break; 1.81 + } 1.82 + 1.83 + resumePrFnPtr( requestingPr, semEnv ); 1.84 } 1.85 1.86 1.87 @@ -589,8 +618,9 @@ 1.88 // itself 1.89 //Note, should not stack-allocate initial data -- no guarantee, in 1.90 // general that creating processor will outlive ones it creates. 1.91 - VMS__free( animatingPr->startOfStack ); 1.92 - VMS__free( animatingPr ); 1.93 + 1.94 + VMS__free( animatingPr->startOfStack); 1.95 + VMS__free( animatingPr); 1.96 } 1.97 1.98 1.99 @@ -629,14 +659,12 @@ 1.100 void 1.101 VMS__shutdown() 1.102 { int coreIdx; 1.103 - VirtProcr *shutDownPr; 1.104 - 1.105 - //create the shutdown processors, one for each core loop -- put them 1.106 - // directly into the Q -- each core will die when gets one 1.107 + //Send a shutdown Request to all MasterLoops. 1.108 for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ ) 1.109 { //Note, this is running in the master 1.110 - shutDownPr = VMS__create_procr( &endOSThreadFn, NULL ); 1.111 - writeVMSQ( shutDownPr, _VMSMasterEnv->readyToAnimateQs[coreIdx] ); 1.112 + InterVMSCoreReqst *shutdownReqst = VMS__malloc(sizeof(InterVMSCoreReqst)); 1.113 + shutdownReqst->secondReqType = shutdownVP; 1.114 + sendInterMasterReqst(coreIdx, (InterMasterReqst*)shutdownReqst); 1.115 } 1.116 1.117 } 1.118 @@ -655,6 +683,7 @@ 1.119 * to core loop function -- note that this slices out a level of virtual 1.120 * processors). 1.121 */ 1.122 +/* 1.123 void 1.124 endOSThreadFn( void *initData, VirtProcr *animatingPr ) 1.125 { 1.126 @@ -664,6 +693,7 @@ 1.127 asmTerminateCoreLoop(animatingPr); 1.128 #endif 1.129 } 1.130 + */ 1.131 1.132 1.133 /*This is called from the startup & shutdown 1.134 @@ -671,6 +701,9 @@ 1.135 void 1.136 VMS__cleanup_at_end_of_shutdown() 1.137 { 1.138 + // Set to zero so that all data structures are freed correctly 1.139 + _VMSMasterEnv->currentMasterProcrID = 0; 1.140 + 1.141 //unused 1.142 //VMSQueueStruc **readyToAnimateQs; 1.143 //int coreIdx; 1.144 @@ -751,7 +784,9 @@ 1.145 //======================================================================== 1.146 */ 1.147 //These are the only two that use system free 1.148 - VMS_ext__free_free_list( _VMSMasterEnv->freeLists ); 1.149 + int i; 1.150 + for(i=0; i<NUM_CORES; i++) 1.151 + VMS_ext__free_free_list( _VMSMasterEnv->freeLists[i]); 1.152 free( (void *)_VMSMasterEnv ); 1.153 } 1.154
