VMS/VMS_Implementations/Vthread_impls/Vthread_MC_shared_impl

view Vthread_Request_Handlers.c @ 29:b94dc57e4455

refactored many files -- chgd names, moved code around -- doesn't compile
author Some Random Person <seanhalle@yahoo.com>
date Wed, 09 May 2012 13:24:19 -0700
parents e5d4d5871ac9
children
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "VMS_impl/VMS.h"
12 #include "C_Libraries/Queue_impl/PrivateQueue.h"
13 #include "C_Libraries/Hash_impl/PrivateHash.h"
14 #include "Vthread.h"
18 //=============================== Mutexes =================================
19 /*The semantic request has a mutexIdx value, which acts as index into array.
20 */
21 inline void
22 handleMakeMutex( VthdSemReq *semReq, VthdSemEnv *semEnv)
23 { VthdMutex *newMutex;
24 SlaveVP *requestingSlv;
26 requestingSlv = semReq->requestingSlv;
27 newMutex = VMS_PI__malloc( sizeof(VthdMutex) );
28 newMutex->waitingQueue = makeVMSQ( requestingSlv );
29 newMutex->holderOfLock = NULL;
31 //The mutex struc contains an int that identifies it -- use that as
32 // its index within the array of mutexes. Add the new mutex to array.
33 newMutex->mutexIdx = addToDynArray( newMutex, semEnv->mutexDynArrayInfo );
35 //Now communicate the mutex's identifying int back to requesting procr
36 semReq->requestingSlv->dataRetFromReq = (void*)newMutex->mutexIdx; //mutexIdx is 32 bit
38 //re-animate the requester
39 resume_slaveVP( requestingSlv, semEnv );
40 }
43 inline void
44 handleMutexLock( VthdSemReq *semReq, VthdSemEnv *semEnv)
45 { VthdMutex *mutex;
46 //=================== Deterministic Replay ======================
47 #ifdef RECORD_DETERMINISTIC_REPLAY
49 #endif
50 //=================================================================
51 Meas_startMutexLock
52 //lookup mutex struc, using mutexIdx as index
53 mutex = semEnv->mutexDynArray[ semReq->mutexIdx ];
55 //see if mutex is free or not
56 if( mutex->holderOfLock == NULL ) //none holding, give lock to requester
57 {
58 mutex->holderOfLock = semReq->requestingSlv;
60 //re-animate requester, now that it has the lock
61 resume_slaveVP( semReq->requestingSlv, semEnv );
62 }
63 else //queue up requester to wait for release of lock
64 {
65 writeVMSQ( semReq->requestingSlv, mutex->waitingQueue );
66 }
67 Meas_endMutexLock
68 }
70 /*
71 */
72 inline void
73 handleMutexUnlock( VthdSemReq *semReq, VthdSemEnv *semEnv)
74 { VthdMutex *mutex;
76 Meas_startMutexUnlock
77 //lookup mutex struc, using mutexIdx as index
78 mutex = semEnv->mutexDynArray[ semReq->mutexIdx ];
80 //set new holder of mutex-lock to be next in queue (NULL if empty)
81 mutex->holderOfLock = readVMSQ( mutex->waitingQueue );
83 //if have new non-NULL holder, re-animate it
84 if( mutex->holderOfLock != NULL )
85 {
86 resume_slaveVP( mutex->holderOfLock, semEnv );
87 }
89 //re-animate the releaser of the lock
90 resume_slaveVP( semReq->requestingSlv, semEnv );
91 Meas_endMutexUnlock
92 }
94 //=========================== Condition Vars ==============================
95 /*The semantic request has the cond-var value and mutex value, which are the
96 * indexes into the array. Not worrying about having too many mutexes or
97 * cond vars created, so using array instead of hash table, for speed.
98 */
101 /*Make cond has to be called with the mutex that the cond is paired to
102 * Don't have to implement this way, but was confusing learning cond vars
103 * until deduced that each cond var owns a mutex that is used only for
104 * interacting with that cond var. So, make this pairing explicit.
105 */
106 inline void
107 handleMakeCond( VthdSemReq *semReq, VthdSemEnv *semEnv)
108 { VthdCond *newCond;
109 SlaveVP *requestingSlv;
111 requestingSlv = semReq->requestingSlv;
112 newCond = VMS_PI__malloc( sizeof(VthdCond) );
113 newCond->partnerMutex = semEnv->mutexDynArray[ semReq->mutexIdx ];
115 newCond->waitingQueue = makeVMSQ();
117 //The cond struc contains an int that identifies it -- use that as
118 // its index within the array of conds. Add the new cond to array.
119 newCond->condIdx = addToDynArray( newCond, semEnv->condDynArrayInfo );
121 //Now communicate the cond's identifying int back to requesting procr
122 semReq->requestingSlv->dataRetFromReq = (void*)newCond->condIdx; //condIdx is 32 bit
124 //re-animate the requester
125 resume_slaveVP( requestingSlv, semEnv );
126 }
129 /*Mutex has already been paired to the cond var, so don't need to send the
130 * mutex, just the cond var. Don't have to do this, but want to bitch-slap
131 * the designers of Posix standard ; )
132 */
133 inline void
134 handleCondWait( VthdSemReq *semReq, VthdSemEnv *semEnv)
135 { VthdCond *cond;
136 VthdMutex *mutex;
138 Meas_startCondWait
139 //get cond struc out of array of them that's in the sem env
140 cond = semEnv->condDynArray[ semReq->condIdx ];
142 //add requester to queue of wait-ers
143 writeVMSQ( semReq->requestingSlv, cond->waitingQueue );
145 //unlock mutex -- can't reuse above handler 'cause not queuing releaser
146 mutex = cond->partnerMutex;
147 mutex->holderOfLock = readVMSQ( mutex->waitingQueue );
149 if( mutex->holderOfLock != NULL )
150 {
151 resume_slaveVP( mutex->holderOfLock, semEnv );
152 }
153 Meas_endCondWait
154 }
157 /*Note that have to implement this such that guarantee the waiter is the one
158 * that gets the lock
159 */
160 inline void
161 handleCondSignal( VthdSemReq *semReq, VthdSemEnv *semEnv)
162 { VthdCond *cond;
163 VthdMutex *mutex;
164 SlaveVP *waitingSlv;
166 Meas_startCondSignal;
167 //get cond struc out of array of them that's in the sem env
168 cond = semEnv->condDynArray[ semReq->condIdx ];
170 //take next waiting procr out of queue
171 waitingSlv = readVMSQ( cond->waitingQueue );
173 //transfer waiting procr to wait queue of mutex
174 // mutex is guaranteed to be held by signalling procr, so no check
175 mutex = cond->partnerMutex;
176 writeVMSQ( waitingSlv, mutex->waitingQueue ); //is first out when read
178 //re-animate the signalling procr
179 resume_slaveVP( semReq->requestingSlv, semEnv );
180 Meas_endCondSignal;
181 }
185 //============================================================================
186 //
187 /*
188 */
189 void inline
190 handleMalloc(VthdSemReq *semReq, SlaveVP *requestingSlv,VthdSemEnv *semEnv)
191 { void *ptr;
193 //========================= MEASUREMENT STUFF ======================
194 #ifdef MEAS__TIME_PLUGIN
195 int32 startStamp, endStamp;
196 saveLowTimeStampCountInto( startStamp );
197 #endif
198 //==================================================================
199 ptr = VMS_PI__malloc( semReq->sizeToMalloc );
200 requestingSlv->dataRetFromReq = ptr;
201 resume_slaveVP( requestingSlv, semEnv );
202 //========================= MEASUREMENT STUFF ======================
203 #ifdef MEAS__TIME_PLUGIN
204 saveLowTimeStampCountInto( endStamp );
205 subIntervalFromHist( startStamp, endStamp,
206 _VMSMasterEnv->reqHdlrHighTimeHist );
207 #endif
208 //==================================================================
209 }
211 /*
212 */
213 void inline
214 handleFree( VthdSemReq *semReq, SlaveVP *requestingSlv, VthdSemEnv *semEnv)
215 {
216 //========================= MEASUREMENT STUFF ======================
217 #ifdef MEAS__TIME_PLUGIN
218 int32 startStamp, endStamp;
219 saveLowTimeStampCountInto( startStamp );
220 #endif
221 //==================================================================
222 VMS_PI__free( semReq->ptrToFree );
223 resume_slaveVP( requestingSlv, semEnv );
224 //========================= MEASUREMENT STUFF ======================
225 #ifdef MEAS__TIME_PLUGIN
226 saveLowTimeStampCountInto( endStamp );
227 subIntervalFromHist( startStamp, endStamp,
228 _VMSMasterEnv->reqHdlrHighTimeHist );
229 #endif
230 //==================================================================
231 }
234 //===========================================================================
235 //
236 /*Uses ID as index into array of flags. If flag already set, resumes from
237 * end-label. Else, sets flag and resumes normally.
238 */
239 void inline
240 handleStartSingleton_helper( VthdSingleton *singleton, SlaveVP *reqstingSlv,
241 VthdSemEnv *semEnv )
242 {
243 if( singleton->hasFinished )
244 { //the code that sets the flag to true first sets the end instr addr
245 reqstingSlv->dataRetFromReq = singleton->savedRetAddr;
246 resume_slaveVP( reqstingSlv, semEnv );
247 return;
248 }
249 else if( singleton->hasBeenStarted )
250 { //singleton is in-progress in a diff slave, so wait for it to finish
251 writeVMSQ(reqstingSlv, singleton->waitQ );
252 return;
253 }
254 else
255 { //hasn't been started, so this is the first attempt at the singleton
256 singleton->hasBeenStarted = TRUE;
257 reqstingSlv->dataRetFromReq = 0x0;
258 resume_slaveVP( reqstingSlv, semEnv );
259 return;
260 }
261 }
262 void inline
263 handleStartFnSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv,
264 VthdSemEnv *semEnv )
265 { VthdSingleton *singleton;
267 singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
268 handleStartSingleton_helper( singleton, requestingSlv, semEnv );
269 }
270 void inline
271 handleStartDataSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv,
272 VthdSemEnv *semEnv )
273 { VthdSingleton *singleton;
275 if( semReq->singleton == NULL )
276 { singleton = VMS_PI__malloc( sizeof(VthdSingleton) );
277 singleton->waitQ = makeVMSQ();
278 singleton->savedRetAddr = 0x0;
279 singleton->hasBeenStarted = FALSE;
280 singleton->hasFinished = FALSE;
281 semReq->singleton = singleton;
282 }
283 else
284 singleton = semReq->singleton;
285 handleStartSingleton_helper( singleton, requestingSlv, semEnv );
286 }
289 void inline
290 handleEndSingleton_helper( VthdSingleton *singleton, SlaveVP *requestingSlv,
291 VthdSemEnv *semEnv )
292 { VMSQueueStruc *waitQ;
293 int32 numWaiting, i;
294 SlaveVP *resumingSlv;
296 if( singleton->hasFinished )
297 { //by definition, only one slave should ever be able to run end singleton
298 // so if this is true, is an error
299 //VMS_PI__throw_exception( "singleton code ran twice", requestingSlv, NULL);
300 }
302 singleton->hasFinished = TRUE;
303 waitQ = singleton->waitQ;
304 numWaiting = numInVMSQ( waitQ );
305 for( i = 0; i < numWaiting; i++ )
306 { //they will resume inside start singleton, then jmp to end singleton
307 resumingSlv = readVMSQ( waitQ );
308 resumingSlv->dataRetFromReq = singleton->savedRetAddr;
309 resume_slaveVP( resumingSlv, semEnv );
310 }
312 resume_slaveVP( requestingSlv, semEnv );
314 }
315 void inline
316 handleEndFnSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv,
317 VthdSemEnv *semEnv )
318 {
319 VthdSingleton *singleton;
321 singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
322 handleEndSingleton_helper( singleton, requestingSlv, semEnv );
323 }
324 void inline
325 handleEndDataSingleton( VthdSemReq *semReq, SlaveVP *requestingSlv,
326 VthdSemEnv *semEnv )
327 {
328 VthdSingleton *singleton;
330 singleton = semReq->singleton;
331 handleEndSingleton_helper( singleton, requestingSlv, semEnv );
332 }
335 /*This executes the function in the masterVP, take the function
336 * pointer out of the request and call it, then resume the Slv.
337 */
338 void inline
339 handleAtomic(VthdSemReq *semReq, SlaveVP *requestingSlv,VthdSemEnv *semEnv)
340 {
341 semReq->fnToExecInMaster( semReq->dataForFn );
342 resume_slaveVP( requestingSlv, semEnv );
343 }
345 /*First, it looks at the Slv's semantic data, to see the highest transactionID
346 * that Slv
347 * already has entered. If the current ID is not larger, it throws an
348 * exception stating a bug in the code.
349 *Otherwise it puts the current ID
350 * there, and adds the ID to a linked list of IDs entered -- the list is
351 * used to check that exits are properly ordered.
352 *Next it is uses transactionID as index into an array of transaction
353 * structures.
354 *If the "Slv_currently_executing" field is non-null, then put requesting Slv
355 * into queue in the struct. (At some point a holder will request
356 * end-transaction, which will take this Slv from the queue and resume it.)
357 *If NULL, then write requesting into the field and resume.
358 */
359 void inline
360 handleTransStart( VthdSemReq *semReq, SlaveVP *requestingSlv,
361 VthdSemEnv *semEnv )
362 { VthdSemData *semData;
363 TransListElem *nextTransElem;
365 //check ordering of entering transactions is correct
366 semData = requestingSlv->semanticData;
367 if( semData->highestTransEntered > semReq->transID )
368 { //throw VMS exception, which shuts down VMS.
369 VMS_PI__throw_exception( "transID smaller than prev", requestingSlv, NULL);
370 }
371 //add this trans ID to the list of transactions entered -- check when
372 // end a transaction
373 semData->highestTransEntered = semReq->transID;
374 nextTransElem = VMS_PI__malloc( sizeof(TransListElem) );
375 nextTransElem->transID = semReq->transID;
376 nextTransElem->nextTrans = semData->lastTransEntered;
377 semData->lastTransEntered = nextTransElem;
379 //get the structure for this transaction ID
380 VthdTrans *
381 transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
383 if( transStruc->SlvCurrentlyExecuting == NULL )
384 {
385 transStruc->SlvCurrentlyExecuting = requestingSlv;
386 resume_slaveVP( requestingSlv, semEnv );
387 }
388 else
389 { //note, might make future things cleaner if save request with Slv and
390 // add this trans ID to the linked list when gets out of queue.
391 // but don't need for now, and lazy..
392 writeVMSQ( requestingSlv, transStruc->waitingSlvQ );
393 }
394 }
397 /*Use the trans ID to get the transaction structure from the array.
398 *Look at Slv_currently_executing to be sure it's same as requesting Slv.
399 * If different, throw an exception, stating there's a bug in the code.
400 *Next, take the first element off the list of entered transactions.
401 * Check to be sure the ending transaction is the same ID as the next on
402 * the list. If not, incorrectly nested so throw an exception.
403 *
404 *Next, get from the queue in the structure.
405 *If it's empty, set Slv_currently_executing field to NULL and resume
406 * requesting Slv.
407 *If get somethine, set Slv_currently_executing to the Slv from the queue, then
408 * resume both.
409 */
410 void inline
411 handleTransEnd( VthdSemReq *semReq, SlaveVP *requestingSlv,
412 VthdSemEnv *semEnv)
413 { VthdSemData *semData;
414 SlaveVP *waitingSlv;
415 VthdTrans *transStruc;
416 TransListElem *lastTrans;
418 transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
420 //make sure transaction ended in same Slv as started it.
421 if( transStruc->SlvCurrentlyExecuting != requestingSlv )
422 {
423 VMS_PI__throw_exception( "trans ended in diff Slv", requestingSlv, NULL );
424 }
426 //make sure nesting is correct -- last ID entered should == this ID
427 semData = requestingSlv->semanticData;
428 lastTrans = semData->lastTransEntered;
429 if( lastTrans->transID != semReq->transID )
430 {
431 VMS_PI__throw_exception( "trans incorrectly nested", requestingSlv, NULL );
432 }
434 semData->lastTransEntered = semData->lastTransEntered->nextTrans;
437 waitingSlv = readVMSQ( transStruc->waitingSlvQ );
438 transStruc->SlvCurrentlyExecuting = waitingSlv;
440 if( waitingSlv != NULL )
441 resume_slaveVP( waitingSlv, semEnv );
443 resume_slaveVP( requestingSlv, semEnv );
444 }