VMS/VMS_Implementations/VOMP_impls/VOMP__MC_shared_impl

view VOMP_Request_Handlers.c @ 1:21cf36019f0d

Partially converted SSR to VOMP -- start of changes
author Some Random Person <seanhalle@yahoo.com>
date Thu, 24 May 2012 08:57:24 -0700
parents b311282ec174
children
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
10 #include "VMS_impl/VMS.h"
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
13 #include "VOMP.h"
17 //=========================== Local Fn Prototypes ===========================
18 void
19 resume_slaveVP( SlaveVP *procr, VOMPSemEnv *semEnv );
23 //===========================================================================
24 // Helpers
26 /*Only clone the elements of req used in these reqst handlers
27 */
28 VOMPSemReq *
29 cloneReq( VOMPSemReq *semReq )
30 { VOMPSemReq *clonedReq;
32 clonedReq = VMS_PI__malloc( sizeof(VOMPSemReq) );
33 clonedReq->reqType = semReq->reqType;
34 clonedReq->sendPr = semReq->sendPr;
35 clonedReq->msg = semReq->msg;
36 clonedReq->nextReqInHashEntry = NULL;
38 return clonedReq;
39 }
41 HashEntry *
42 giveEntryElseInsertReqst( char *key, VOMPSemReq *semReq,
43 HashTable *commHashTbl )
44 { HashEntry *entry;
45 VOMPSemReq *waitingReq;
47 entry = getEntryFromTable( (char *)key, commHashTbl );
48 if( entry == NULL )
49 { //no waiting sends or receives, so add this request and exit
50 // note: have to clone the request because it's on stack of sender
51 addValueIntoTable( key, cloneReq( semReq ), commHashTbl );
52 return NULL;
53 }
54 waitingReq = (VOMPSemReq *)entry->content;
55 if( waitingReq == NULL ) //might happen when last waiting gets paired
56 { //no waiting sends or receives, so add this request and exit
57 entry->content = semReq;
58 return NULL;
59 }
60 return entry;
61 }
66 //===========================================================================
67 /*The semantic request has the receiving processor and the message type
68 *
69 *Note one value in this approach: without the extra VMS layer,
70 * the send and receive would happen in real time instead of virtual time,
71 * which would waste real time while one of them waited for other
72 *
73 *When successfully pair-up, transfer ownership of the sent data
74 * to the receiving processor
75 *
76 *Messages of a given Type have to be kept separate.. so need a separate
77 * entry in the hash table for each pair: receivePr, msgType
78 *
79 *Also, if same sender sends multiple before any get received, then need to
80 * stack the sends up -- even if a send waits until it's paired, several
81 * separate processors can send to the same receiver, and hashing on the
82 * receive processor, so they will stack up.
83 */
84 void
85 handleSendType( VOMPSemReq *semReq, VOMPSemEnv *semEnv )
86 { SlaveVP *sendPr, *receivePr;
87 int key[] = {0,0,0};
88 VOMPSemReq *waitingReq;
89 HashEntry *entry;
90 HashTable *commHashTbl = semEnv->commHashTbl;
92 DEBUG__printf1(dbgRqstHdlr,"SendType request from processor %d",semReq->sendPr->slaveID)
94 receivePr = semReq->receivePr; //For "send", know both send & recv procrs
95 sendPr = semReq->sendPr;
97 //TODO: handle transfer of msg-locs ownership
98 //TODO: hash table implemented such that using "addEntry" or
99 // "addValue" to table causes the *value* in old entry to be
100 // *freed* -- this is bad. Want to stack up values in a linked
101 // list when multiple have the same key.
103 //TODO: use a faster hash function -- see notes in intelligence gather
104 key[0] = (int)receivePr->slaveID;
105 key[1] = (int)(semReq->msgType);
106 //key[2] acts as the 0 that terminates the string
108 entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
109 if( entry == NULL ) return; //was just inserted
111 waitingReq = (VOMPSemReq *)entry->content;
113 //At this point, know have waiting request(s) -- either sends or recv
114 //Note, can only have max of one receive waiting, and cannot have both
115 // sends and receives waiting (they would have paired off)
116 // but can have multiple sends from diff sending VPs, all same msg-type
117 if( waitingReq->reqType == send_type )
118 { //waiting request is another send, so stack this up on list
119 // but first clone the sending request so it persists.
120 VOMPSemReq *clonedReq = cloneReq( semReq );
121 clonedReq-> nextReqInHashEntry = waitingReq->nextReqInHashEntry;
122 waitingReq->nextReqInHashEntry = clonedReq;
123 DEBUG__printf2( dbgRqstHdlr, "linked requests: %p, %p ", clonedReq,\
124 waitingReq )
125 return;
126 }
127 else
128 {
129 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
130 Dependency newd;
131 newd.from_vp = sendPr->slaveID;
132 newd.from_task = sendPr->assignCount;
133 newd.to_vp = receivePr->slaveID;
134 newd.to_task = receivePr->assignCount +1;
135 //(newd,semEnv->commDependenciesList);
136 addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList);
137 int32 groupId = semReq->msgType;
138 if(semEnv->ntonGroupsInfo->numInArray <= groupId){
139 makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
140 }
141 if(semEnv->ntonGroups[groupId] == NULL){
142 semEnv->ntonGroups[groupId] = new_NtoN(groupId);
143 }
144 Unit u;
145 u.vp = sendPr->slaveID;
146 u.task = sendPr->assignCount;
147 addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
148 u.vp = receivePr->slaveID;
149 u.task = receivePr->assignCount +1;
150 addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
151 #endif
153 //waiting request is a receive, so it pairs to this send
154 //First, remove the waiting receive request from the entry
155 entry->content = waitingReq->nextReqInHashEntry;
156 VMS_PI__free( waitingReq ); //Don't use contents -- so free it
158 if( entry->content == NULL )
159 { //TODO: mod hash table to double-link, so can delete entry from
160 // table without hashing the key and looking it up again
161 deleteEntryFromTable( entry->key, commHashTbl ); //frees hashEntry
162 }
164 //attach msg that's in this send request to receiving procr
165 // when comes back from suspend will have msg in dataRetFromReq
166 receivePr->dataRetFromReq = semReq->msg;
168 //bring both processors back from suspend
169 resume_slaveVP( sendPr, semEnv );
170 resume_slaveVP( receivePr, semEnv );
172 return;
173 }
174 }
177 /*Looks like can make single handler for both sends..
178 */
179 //TODO: combine both send handlers into single handler
180 void
181 handleSendFromTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
182 { SlaveVP *sendPr, *receivePr;
183 int key[] = {0,0,0};
184 VOMPSemReq *waitingReq;
185 HashEntry *entry;
186 HashTable *commHashTbl = semEnv->commHashTbl;
188 DEBUG__printf2(dbgRqstHdlr,"SendFromTo request from processor %d to %d",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
190 receivePr = semReq->receivePr; //For "send", know both send & recv procrs
191 sendPr = semReq->sendPr;
194 key[0] = (int)receivePr->slaveID;
195 key[1] = (int)sendPr->slaveID;
196 //key[2] acts at the 0 that terminates the string
198 entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
199 if( entry == NULL ) return; //was just inserted
201 waitingReq = (VOMPSemReq *)entry->content;
203 //At this point, know have waiting request(s) -- either sends or recv
204 if( waitingReq->reqType == send_from_to )
205 { printf("\n ERROR: shouldn't be two send-from-tos waiting \n");
206 }
207 else
208 { //waiting request is a receive, so it completes pair with this send
209 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
210 Dependency newd;
211 newd.from_vp = sendPr->slaveID;
212 newd.from_task = sendPr->assignCount;
213 newd.to_vp = receivePr->slaveID;
214 newd.to_task = receivePr->assignCount +1;
215 //addToListOfArraysDependency(newd,semEnv->commDependenciesList);
216 addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);
217 #endif
218 //First, remove the waiting receive request from the entry
219 entry->content = waitingReq->nextReqInHashEntry;
220 VMS_PI__free( waitingReq ); //Don't use contents -- so free it
222 //can only be one waiting req for "from-to" semantics
223 if( entry->content != NULL )
224 {
225 printf("\nERROR in handleSendFromTo\n");
226 }
227 deleteEntryFromTable( entry->key, commHashTbl ); //frees HashEntry
229 //attach msg that's in this send request to receiving procr
230 // when comes back from suspend, will have msg in dataRetFromReq
231 receivePr->dataRetFromReq = semReq->msg;
233 //bring both processors back from suspend
234 resume_slaveVP( sendPr, semEnv );
235 resume_slaveVP( receivePr, semEnv );
237 return;
238 }
239 }
243 //============================== Receives ===========================
244 //
245 /*Removed this one for now, because forces either a search or going to a
246 * two-level hash table, where one level the key is the receivePr, in the
247 * other level, the key is the type.
248 *So, each dest procr that either does a receive_type or that a send_type
249 * targets it, would have a hash table created just for it and placed
250 * into the first-level hash table entry for that receive procr.
251 *Then, doing a receive_type first looks up entry for receive procr in first
252 * table, gets the type-table out of that entry, and does a second lookup
253 * in the type-table.
254 *Doing a receive from-to looks up in the first table, gets the second table
255 * hashed on "from" procr.
256 *Doing a receive_any looks up in the first table, then looks to see if
257 * either of the hash tables have any entries -- would then have to do a
258 * linear search through the hash-table's array for the first non-empty
259 * spot
260 *Yuck.
261 *
262 *Alternatively, could keep two hash tables updated all the time -- one that
263 * does the receive_type and receive_from_to and a second that does
264 * receive_any -- would only hash the second table by the receive procr.
265 * When remove from one table, keep back-links to both tables, so can also
266 * quickly remove from other table.
267 *Cost is doing two hash-table lookups for every insert.
268 * If ever add receive_any, looking like this second option easier and even
269 * less costly.
270 */
271 void
272 handleReceiveAny( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
273 {
275 }
278 void
279 handleReceiveType( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
280 { SlaveVP *sendPr, *receivePr;
281 int key[] = {0,0,0};
282 VOMPSemReq *waitingReq;
283 HashEntry *entry;
284 HashTable *commHashTbl = semEnv->commHashTbl;
286 receivePr = semReq->receivePr;
288 DEBUG__printf1(dbgRqstHdlr,"ReceiveType request from processor %d",receivePr->slaveID)
290 key[0] = (int)receivePr->slaveID;
291 key[1] = (int)(semReq->msgType);
292 //key[2] acts as the 0 that terminates the string
295 entry = giveEntryElseInsertReqst((char*)key, semReq, commHashTbl);//clones
296 if( entry == NULL ) return; //was just inserted
298 waitingReq = (VOMPSemReq *)entry->content; //previously cloned by insert
300 //At this point, know have waiting request(s) -- should be send(s)
301 if( waitingReq->reqType == send_type )
302 { //waiting request is a send, so pair it with this receive
303 //first, remove the waiting send request from the list in entry
305 entry->content = waitingReq->nextReqInHashEntry;
306 if( entry->content == NULL )
307 { deleteEntryFromTable( entry->key, commHashTbl ); //frees HashEntry
308 }
310 //attach msg that's in the send request to receiving procr
311 // when comes back from suspend, will have msg in dataRetFromReq
312 receivePr->dataRetFromReq = waitingReq->msg;
314 //bring both processors back from suspend
315 sendPr = waitingReq->sendPr;
316 VMS_PI__free( waitingReq );
318 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
319 Dependency newd;
320 newd.from_vp = sendPr->slaveID;
321 newd.from_task = sendPr->assignCount;
322 newd.to_vp = receivePr->slaveID;
323 newd.to_task = receivePr->assignCount +1;
324 //addToListOfArraysDependency(newd,semEnv->commDependenciesList);
325 addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList);
326 int32 groupId = semReq->msgType;
327 if(semEnv->ntonGroupsInfo->numInArray <= groupId){
328 makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
329 }
330 if(semEnv->ntonGroups[groupId] == NULL){
331 semEnv->ntonGroups[groupId] = new_NtoN(groupId);
332 }
333 Unit u;
334 u.vp = sendPr->slaveID;
335 u.task = sendPr->assignCount;
336 addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
337 u.vp = receivePr->slaveID;
338 u.task = receivePr->assignCount +1;
339 addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
340 #endif
342 resume_slaveVP( sendPr, semEnv );
343 resume_slaveVP( receivePr, semEnv );
345 return;
346 }
347 printf("\nLang Impl Error: Should never be two waiting receives!\n");
348 }
351 /*
352 */
353 void
354 handleReceiveFromTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
355 { SlaveVP *sendPr, *receivePr;
356 int key[] = {0,0,0};
357 VOMPSemReq *waitingReq;
358 HashEntry *entry;
359 HashTable *commHashTbl = semEnv->commHashTbl;
361 DEBUG__printf2(dbgRqstHdlr,"ReceiveFromTo %d : %d",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
363 receivePr = semReq->receivePr;
364 sendPr = semReq->sendPr; //for receive from-to, know send procr
366 key[0] = (int)receivePr->slaveID;
367 key[1] = (int)sendPr->slaveID;
368 //key[2] acts at the 0 that terminates the string
370 entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
371 if( entry == NULL ) return; //was just inserted
373 waitingReq = (VOMPSemReq *)entry->content;
375 //At this point, know have waiting request(s) -- should be send(s)
376 if( waitingReq->reqType == send_from_to )
377 { //waiting request is a send, so pair it with this receive
378 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
379 Dependency newd;
380 newd.from_vp = sendPr->slaveID;
381 newd.from_task = sendPr->assignCount;
382 newd.to_vp = receivePr->slaveID;
383 newd.to_task = receivePr->assignCount +1;
384 //addToListOfArraysDependency(newd,semEnv->commDependenciesList);
385 addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);
386 #endif
387 //For from-to, should only ever be a single reqst waiting tobe paird
388 entry->content = waitingReq->nextReqInHashEntry;
389 if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
390 deleteEntryFromTable( entry->key, commHashTbl ); //frees entry too
392 //attach msg that's in the send request to receiving procr
393 // when comes back from suspend, will have msg in dataRetFromReq
394 receivePr->dataRetFromReq = waitingReq->msg;
396 //bring both processors back from suspend
397 sendPr = waitingReq->sendPr;
398 VMS_PI__free( waitingReq );
400 resume_slaveVP( sendPr, semEnv );
401 resume_slaveVP( receivePr, semEnv );
403 return;
404 }
405 printf("\nLang Impl Error: Should never be two waiting receives!\n");
406 }
410 //===============================================
411 void
412 handleTransferTo( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
413 {
415 }
417 void
418 handleTransferOut( VOMPSemReq *semReq, VOMPSemEnv *semEnv)
419 {
421 }
424 /*
425 */
426 void
427 handleMalloc( VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv )
428 { void *ptr;
430 DEBUG__printf1(dbgRqstHdlr,"Malloc request from processor %d",requestingPr->slaveID)
432 ptr = VMS_PI__malloc( semReq->sizeToMalloc );
433 requestingPr->dataRetFromReq = ptr;
434 resume_slaveVP( requestingPr, semEnv );
435 }
437 /*
438 */
439 void
440 handleFree( VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv )
441 {
442 DEBUG__printf1(dbgRqstHdlr,"Free request from processor %d",requestingPr->slaveID)
443 VMS_PI__free( semReq->ptrToFree );
444 resume_slaveVP( requestingPr, semEnv );
445 }
448 //===========================================================================
449 //
450 /*Uses ID as index into array of flags. If flag already set, resumes from
451 * end-label. Else, sets flag and resumes normally.
452 */
453 void inline
454 handleStartSingleton_helper( VOMPSingleton *singleton, SlaveVP *reqstingPr,
455 VOMPSemEnv *semEnv )
456 {
457 if( singleton->hasFinished )
458 { //the code that sets the flag to true first sets the end instr addr
459 reqstingPr->dataRetFromReq = singleton->endInstrAddr;
460 resume_slaveVP( reqstingPr, semEnv );
461 return;
462 }
463 else if( singleton->hasBeenStarted )
464 { //singleton is in-progress in a diff slave, so wait for it to finish
465 writePrivQ(reqstingPr, singleton->waitQ );
466 return;
467 }
468 else
469 { //hasn't been started, so this is the first attempt at the singleton
470 singleton->hasBeenStarted = TRUE;
471 reqstingPr->dataRetFromReq = 0x0;
472 resume_slaveVP( reqstingPr, semEnv );
473 return;
474 }
475 }
476 void inline
477 handleStartFnSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
478 VOMPSemEnv *semEnv )
479 { VOMPSingleton *singleton;
480 DEBUG__printf1(dbgRqstHdlr,"StartFnSingleton request from processor %d",requestingPr->slaveID)
482 singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
483 handleStartSingleton_helper( singleton, requestingPr, semEnv );
484 }
485 void inline
486 handleStartDataSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
487 VOMPSemEnv *semEnv )
488 { VOMPSingleton *singleton;
490 DEBUG__printf1(dbgRqstHdlr,"StartDataSingleton request from processor %d",requestingPr->slaveID)
491 if( *(semReq->singletonPtrAddr) == NULL )
492 { singleton = VMS_PI__malloc( sizeof(VOMPSingleton) );
493 singleton->waitQ = makeVMSQ();
494 singleton->endInstrAddr = 0x0;
495 singleton->hasBeenStarted = FALSE;
496 singleton->hasFinished = FALSE;
497 *(semReq->singletonPtrAddr) = singleton;
498 }
499 else
500 singleton = *(semReq->singletonPtrAddr);
501 handleStartSingleton_helper( singleton, requestingPr, semEnv );
502 }
505 void inline
506 handleEndSingleton_helper( VOMPSingleton *singleton, SlaveVP *requestingPr,
507 VOMPSemEnv *semEnv )
508 { PrivQueueStruc *waitQ;
509 int32 numWaiting, i;
510 SlaveVP *resumingPr;
512 if( singleton->hasFinished )
513 { //by definition, only one slave should ever be able to run end singleton
514 // so if this is true, is an error
515 ERROR1( "singleton code ran twice", requestingPr );
516 }
518 singleton->hasFinished = TRUE;
519 waitQ = singleton->waitQ;
520 numWaiting = numInPrivQ( waitQ );
521 for( i = 0; i < numWaiting; i++ )
522 { //they will resume inside start singleton, then jmp to end singleton
523 resumingPr = readPrivQ( waitQ );
524 resumingPr->dataRetFromReq = singleton->endInstrAddr;
525 resume_slaveVP( resumingPr, semEnv );
526 }
528 resume_slaveVP( requestingPr, semEnv );
530 }
531 void inline
532 handleEndFnSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
533 VOMPSemEnv *semEnv )
534 {
535 VOMPSingleton *singleton;
537 DEBUG__printf1(dbgRqstHdlr,"EndFnSingleton request from processor %d",requestingPr->slaveID)
539 singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
540 handleEndSingleton_helper( singleton, requestingPr, semEnv );
541 }
542 void inline
543 handleEndDataSingleton( VOMPSemReq *semReq, SlaveVP *requestingPr,
544 VOMPSemEnv *semEnv )
545 {
546 VOMPSingleton *singleton;
548 DEBUG__printf1(dbgRqstHdlr,"EndDataSingleton request from processor %d",requestingPr->slaveID)
550 singleton = *(semReq->singletonPtrAddr);
551 handleEndSingleton_helper( singleton, requestingPr, semEnv );
552 }
555 /*This executes the function in the masterVP, take the function
556 * pointer out of the request and call it, then resume the VP.
557 */
558 void
559 handleAtomic( VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv )
560 {
561 DEBUG__printf1(dbgRqstHdlr,"Atomic request from processor %d",requestingPr->slaveID)
562 semReq->fnToExecInMaster( semReq->dataForFn );
563 resume_slaveVP( requestingPr, semEnv );
564 }
566 /*First, it looks at the VP's semantic data, to see the highest transactionID
567 * that VP
568 * already has entered. If the current ID is not larger, it throws an
569 * exception stating a bug in the code.
570 *Otherwise it puts the current ID
571 * there, and adds the ID to a linked list of IDs entered -- the list is
572 * used to check that exits are properly ordered.
573 *Next it is uses transactionID as index into an array of transaction
574 * structures.
575 *If the "VP_currently_executing" field is non-null, then put requesting VP
576 * into queue in the struct. (At some point a holder will request
577 * end-transaction, which will take this VP from the queue and resume it.)
578 *If NULL, then write requesting into the field and resume.
579 */
580 void
581 handleTransStart( VOMPSemReq *semReq, SlaveVP *requestingPr,
582 VOMPSemEnv *semEnv )
583 { VOMPSemData *semData;
584 TransListElem *nextTransElem;
586 DEBUG__printf1(dbgRqstHdlr,"TransStart request from processor %d",requestingPr->slaveID)
588 //check ordering of entering transactions is correct
589 semData = requestingPr->semanticData;
590 if( semData->highestTransEntered > semReq->transID )
591 { //throw VMS exception, which shuts down VMS.
592 VMS_PI__throw_exception( "transID smaller than prev", requestingPr, NULL);
593 }
594 //add this trans ID to the list of transactions entered -- check when
595 // end a transaction
596 semData->highestTransEntered = semReq->transID;
597 nextTransElem = VMS_PI__malloc( sizeof(TransListElem) );
598 nextTransElem->transID = semReq->transID;
599 nextTransElem->nextTrans = semData->lastTransEntered;
600 semData->lastTransEntered = nextTransElem;
602 //get the structure for this transaction ID
603 VOMPTrans *
604 transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
606 if( transStruc->VPCurrentlyExecuting == NULL )
607 {
608 transStruc->VPCurrentlyExecuting = requestingPr;
609 resume_slaveVP( requestingPr, semEnv );
610 }
611 else
612 { //note, might make future things cleaner if save request with VP and
613 // add this trans ID to the linked list when gets out of queue.
614 // but don't need for now, and lazy..
615 writePrivQ( requestingPr, transStruc->waitingVPQ );
616 }
617 }
620 /*Use the trans ID to get the transaction structure from the array.
621 *Look at VP_currently_executing to be sure it's same as requesting VP.
622 * If different, throw an exception, stating there's a bug in the code.
623 *Next, take the first element off the list of entered transactions.
624 * Check to be sure the ending transaction is the same ID as the next on
625 * the list. If not, incorrectly nested so throw an exception.
626 *
627 *Next, get from the queue in the structure.
628 *If it's empty, set VP_currently_executing field to NULL and resume
629 * requesting VP.
630 *If get somethine, set VP_currently_executing to the VP from the queue, then
631 * resume both.
632 */
633 void
634 handleTransEnd(VOMPSemReq *semReq, SlaveVP *requestingPr, VOMPSemEnv *semEnv)
635 { VOMPSemData *semData;
636 SlaveVP *waitingPr;
637 VOMPTrans *transStruc;
638 TransListElem *lastTrans;
640 DEBUG__printf1(dbgRqstHdlr,"TransEnd request from processor %d",requestingPr->slaveID)
642 transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
644 //make sure transaction ended in same VP as started it.
645 if( transStruc->VPCurrentlyExecuting != requestingPr )
646 {
647 VMS_PI__throw_exception( "trans ended in diff VP", requestingPr, NULL );
648 }
650 //make sure nesting is correct -- last ID entered should == this ID
651 semData = requestingPr->semanticData;
652 lastTrans = semData->lastTransEntered;
653 if( lastTrans->transID != semReq->transID )
654 {
655 VMS_PI__throw_exception( "trans incorrectly nested", requestingPr, NULL );
656 }
658 semData->lastTransEntered = semData->lastTransEntered->nextTrans;
661 waitingPr = readPrivQ( transStruc->waitingVPQ );
662 transStruc->VPCurrentlyExecuting = waitingPr;
664 if( waitingPr != NULL )
665 resume_slaveVP( waitingPr, semEnv );
667 resume_slaveVP( requestingPr, semEnv );
668 }