view VCilk_PluginFns.c @ 9:5131f941f42c

update to newer VMS var names
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Tue, 09 Jul 2013 13:52:57 +0200
parents f0ec8652cbf4
children
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
10 #include "Queue_impl/PrivateQueue.h"
11 #include "VCilk.h"
15 //===========================================================================
16 void inline
17 handleSync( SlaveVP *requestingPr, VCilkSemEnv *semEnv );
19 void inline
20 handleMalloc( VCilkSemReq *semReq, SlaveVP *requestingPr,
21 VCilkSemEnv *semEnv );
22 void inline
23 handleFree( VCilkSemReq *semReq, SlaveVP *requestingPr,
24 VCilkSemEnv *semEnv );
25 void inline
26 handleDissipate( SlaveVP *requestingPr, VCilkSemEnv *semEnv );
28 void inline
29 handleSpawn( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv );
31 void inline
32 dispatchSemReq( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv);
34 void inline
35 handleTransEnd( VCilkSemReq *semReq, SlaveVP *requestingPr,
36 VCilkSemEnv*semEnv);
37 void inline
38 handleTransStart( VCilkSemReq *semReq, SlaveVP *requestingPr,
39 VCilkSemEnv *semEnv );
40 void inline
41 handleAtomic( VCilkSemReq *semReq, SlaveVP *requestingPr,
42 VCilkSemEnv *semEnv);
43 inline void
44 handleStartFnSingleton( VCilkSemReq *semReq, SlaveVP *reqstingPr,
45 VCilkSemEnv *semEnv );
46 inline void
47 handleEndFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr,
48 VCilkSemEnv *semEnv );
49 inline void
50 handleStartDataSingleton( VCilkSemReq *semReq, SlaveVP *reqstingPr,
51 VCilkSemEnv *semEnv );
52 inline void
53 handleEndDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr,
54 VCilkSemEnv *semEnv );
56 void inline
57 resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv );
59 //===========================================================================
62 //============================== Scheduler ==================================
63 //
64 /*For VCilk, scheduling a slave simply takes the next work-unit off the
65 * ready-to-go work-unit queue and assigns it to the slaveToSched.
66 *If the ready-to-go work-unit queue is empty, then nothing to schedule
67 * to the slave -- return FALSE to let Master loop know scheduling that
68 * slave failed.
69 */
70 SlaveVP *
71 VCilk__schedule_virt_procr( void *_semEnv, AnimSlot *slot )
72 { SlaveVP *schedPr;
73 VCilkSemEnv *semEnv;
74 int coreNum = slot->coreSlotIsOn;
75 semEnv = (VCilkSemEnv *)_semEnv;
77 schedPr = readPrivQ( semEnv->readyVPQs[coreNum] );
78 //Note, using a non-blocking queue -- it returns NULL if queue empty
80 return( schedPr );
81 }
84 //=========================== Request Handler =============================
85 //
86 /*Will get requests to send, to receive, and to create new processors.
87 * Upon send, check the hash to see if a receive is waiting.
88 * Upon receive, check hash to see if a send has already happened.
89 * When other is not there, put in. When other is there, the comm.
90 * completes, which means the receiver P gets scheduled and
91 * picks up right after the receive request. So make the work-unit
92 * and put it into the queue of work-units ready to go.
93 * Other request is create a new Processor, with the function to run in the
94 * Processor, and initial data.
95 */
96 void
97 VCilk__Request_Handler( SlaveVP *requestingPr, void *_semEnv )
98 { VCilkSemEnv *semEnv;
99 VMSReqst *req;
102 semEnv = (VCilkSemEnv *)_semEnv;
104 req = VMS_PI__take_next_request_out_of( requestingPr );
106 while( req != NULL )
107 {
108 switch( req->reqType )
109 { case semantic: dispatchSemReq( req, requestingPr, semEnv );
110 break;
111 case createReq: //create request has to come as a VMS request,
112 // to allow MasterLoop to do stuff before gets
113 // here, and maybe also stuff after all requests
114 // done -- however, can still attach semantic
115 // req data to req.
116 handleSpawn( req, requestingPr, semEnv);
117 break;
118 case dissipate: handleDissipate( requestingPr, semEnv);
119 return;
120 case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingPr, semEnv,
121 &resume_procr);
122 break;
123 default:
124 break;
125 }
127 //FIXME: if req was dissipate, this is accessing free'd memory...
128 req = VMS_PI__take_next_request_out_of( requestingPr );
129 } //while( req != NULL )
130 }
132 void inline
133 dispatchSemReq( VMSReqst *req, SlaveVP *reqPr, VCilkSemEnv *semEnv )
134 { VCilkSemReq *semReq;
136 semReq = VMS_PI__take_sem_reqst_from(req);
138 if( semReq == NULL ) return;
139 switch( semReq->reqType )
140 {
141 case syncReq: handleSync( reqPr, semEnv );
142 break;
143 case mallocReq: handleMalloc( semReq, reqPr, semEnv );
144 break;
145 case freeReq: handleFree( semReq, reqPr, semEnv );
146 break;
147 case singleton_fn_start: handleStartFnSingleton(semReq, reqPr, semEnv);
148 break;
149 case singleton_fn_end: handleEndFnSingleton( semReq, reqPr, semEnv);
150 break;
151 case singleton_data_start:handleStartDataSingleton(semReq,reqPr,semEnv);
152 break;
153 case singleton_data_end: handleEndDataSingleton(semReq, reqPr, semEnv);
154 break;
155 case atomic: handleAtomic( semReq, reqPr, semEnv );
156 break;
157 case trans_start: handleTransStart( semReq, reqPr, semEnv );
158 break;
159 case trans_end: handleTransEnd( semReq, reqPr, semEnv );
160 break;
161 }
162 //NOTE: semantic request data strucs allocated on stack in VCilk Lib calls
163 }
167 //=========================== Request Handlers ==============================
168 void inline
169 resume_procr( SlaveVP *procr, VCilkSemEnv *semEnv )
170 {
171 writePrivQ( procr, semEnv->readyVPQs[ procr->coreAnimatedBy] );
172 }
177 /* check if list of live children is empty.
178 * If yes, then resume.
179 * If no, then set sync-pending flag.
180 */
181 inline void
182 handleSync( SlaveVP *requestingPr, VCilkSemEnv *semEnv )
183 {
184 //Meas_startSync;
185 if(((VCilkSemData *)(requestingPr->semanticData))->numLiveChildren == 0 )
186 { //no live children to wait for
187 resume_procr( requestingPr, semEnv );
188 }
189 else
190 {
191 ((VCilkSemData *)(requestingPr->semanticData))->syncPending = TRUE;
192 }
193 //Meas_endSync;
194 }
196 /*
197 */
198 inline void
199 handleMalloc( VCilkSemReq *semReq, SlaveVP *requestingPr,
200 VCilkSemEnv *semEnv )
201 { void *ptr;
203 ptr = VMS_PI__malloc( semReq->sizeToMalloc );
204 requestingPr->dataRetFromReq = ptr;
205 resume_procr( requestingPr, semEnv );
206 }
208 /*
209 */
210 void inline
211 handleFree( VCilkSemReq *semReq, SlaveVP *requestingPr,
212 VCilkSemEnv *semEnv )
213 {
214 VMS_PI__free( semReq->ptrToFree );
215 resume_procr( requestingPr, semEnv );
216 }
219 //============================== VMS requests ===============================
220 /*Re-use this in the entry-point fn
221 */
222 inline SlaveVP *
223 VCilk__create_procr_helper( TopLevelFnPtr fnPtr, void *initData,
224 SlaveVP *requestingPr, VCilkSemEnv *semEnv, int32 coreToScheduleOnto )
225 { SlaveVP *newPr;
226 VCilkSemData *semData;
228 //This is running in master, so use internal version
229 newPr = VMS_PI__create_slaveVP( fnPtr, initData );
231 semData = VMS_PI__malloc( sizeof(VCilkSemData) );
233 semData->numLiveChildren = 0;
234 semData->parentPr = requestingPr;
235 semData->syncPending = FALSE;
237 semData->highestTransEntered = -1;
238 semData->lastTransEntered = NULL;
240 newPr->semanticData = semData;
242 /* increase the number of live children of requester.
243 */
244 if( requestingPr != NULL ) //NULL when creating seed procr
245 ((VCilkSemData *)(requestingPr->semanticData))->numLiveChildren +=1;
247 semEnv->numVirtPr += 1;
249 //=================== Assign new processor to a core =====================
250 #ifdef SEQUENTIAL
251 newPr->coreAnimatedBy = 0;
253 #else
255 if(coreToScheduleOnto < 0 || coreToScheduleOnto >= NUM_CORES )
256 { //out-of-range, so round-robin assignment
257 newPr->coreAnimatedBy = semEnv->nextCoreToGetNewPr;
259 if( semEnv->nextCoreToGetNewPr >= NUM_CORES - 1 )
260 semEnv->nextCoreToGetNewPr = 0;
261 else
262 semEnv->nextCoreToGetNewPr += 1;
263 }
264 else //core num in-range, so use it
265 { newPr->coreAnimatedBy = coreToScheduleOnto;
266 }
267 #endif
268 //========================================================================
270 return newPr;
271 }
274 void inline
275 handleSpawn( VMSReqst *req, SlaveVP *requestingPr, VCilkSemEnv *semEnv )
276 { VCilkSemReq *semReq;
277 SlaveVP *newPr;
279 //Meas_startSpawn;
280 semReq = VMS_PI__take_sem_reqst_from( req );
282 newPr = VCilk__create_procr_helper( semReq->fnPtr, semReq->initData,
283 requestingPr, semEnv, semReq->coreToSpawnOnto );
285 //For VPThread, caller needs ptr to created processor returned to it
286 requestingPr->dataRetFromReq = newPr;
288 resume_procr( newPr, semEnv );
289 resume_procr( requestingPr, semEnv );
290 //Meas_endSpawn;
291 }
295 /*get parentVP & remove dissipator from parent's live children.
296 *If this was last live child, check "sync pending" flag
297 *-- if set, then resume the parentVP.
298 */
299 void inline
300 handleDissipate( SlaveVP *requestingPr, VCilkSemEnv *semEnv )
301 {
302 SlaveVP *
303 parentPr = ((VCilkSemData *)
304 (requestingPr->semanticData))->parentPr;
305 if( parentPr == NULL ) //means this is seed processor being dissipated
306 { //Just act normally, except don't deal with parent
307 // VMS__Free is implemented to ignore requests to free data from
308 // outside VMS, so all this processor's non-VMS allocated data will
309 // remain and be cleaned up outside
310 }
311 else
312 {
313 ((VCilkSemData *)(parentPr->semanticData))->numLiveChildren -= 1;
314 if( ((VCilkSemData *)
315 (parentPr->semanticData))->numLiveChildren <= 0 )
316 { //this was last live child of parent
317 if( ((VCilkSemData *)
318 (parentPr->semanticData))->syncPending == TRUE )
319 { //was waiting for last child to dissipate, so resume it
320 ((VCilkSemData *)
321 (parentPr->semanticData))->syncPending = FALSE;
322 resume_procr( parentPr, semEnv );
323 }
324 }
325 }
327 VMS_PI__free( requestingPr->semanticData );
329 //Now do normal dissipate
331 //call VMS to free_all AppVP state -- stack and so on
332 VMS_PI__dissipate_slaveVP( requestingPr );
334 semEnv->numVirtPr -= 1;
335 if( semEnv->numVirtPr == 0 )
336 { //no more work, so shutdown
337 VMS_SS__shutdown();
338 }
339 }
342 //=============================== Atomic ====================================
343 //
344 /*Uses ID as index into array of flags. If flag already set, resumes from
345 * end-label. Else, sets flag and resumes normally.
346 */
347 void inline
348 handleStartSingleton_helper( VCilkSingleton *singleton, SlaveVP *reqstingPr,
349 VCilkSemEnv *semEnv )
350 {
351 if( singleton->hasFinished )
352 { //the code that sets the flag to true first sets the end instr addr
353 reqstingPr->dataRetFromReq = singleton->endInstrAddr;
354 resume_procr( reqstingPr, semEnv );
355 return;
356 }
357 else if( singleton->hasBeenStarted )
358 { //singleton is in-progress in a diff slave, so wait for it to finish
359 writePrivQ(reqstingPr, singleton->waitQ );
360 return;
361 }
362 else
363 { //hasn't been started, so this is the first attempt at the singleton
364 singleton->hasBeenStarted = TRUE;
365 reqstingPr->dataRetFromReq = 0x0;
366 resume_procr( reqstingPr, semEnv );
367 return;
368 }
369 }
370 void inline
371 handleStartFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr,
372 VCilkSemEnv *semEnv )
373 { VCilkSingleton *singleton;
375 singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
376 handleStartSingleton_helper( singleton, requestingPr, semEnv );
377 }
378 void inline
379 handleStartDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr,
380 VCilkSemEnv *semEnv )
381 { VCilkSingleton *singleton;
383 if( *(semReq->singletonPtrAddr) == NULL )
384 { singleton = VMS_PI__malloc( sizeof(VCilkSingleton) );
385 singleton->waitQ = makePrivQ();
386 singleton->endInstrAddr = 0x0;
387 singleton->hasBeenStarted = FALSE;
388 singleton->hasFinished = FALSE;
389 *(semReq->singletonPtrAddr) = singleton;
390 }
391 else
392 singleton = *(semReq->singletonPtrAddr);
393 handleStartSingleton_helper( singleton, requestingPr, semEnv );
394 }
397 void inline
398 handleEndSingleton_helper( VCilkSingleton *singleton, SlaveVP *requestingPr,
399 VCilkSemEnv *semEnv )
400 { PrivQueueStruc *waitQ;
401 int32 numWaiting, i;
402 SlaveVP *resumingPr;
404 if( singleton->hasFinished )
405 { //by definition, only one slave should ever be able to run end singleton
406 // so if this is true, is an error
407 //VMS_PI__throw_exception( "singleton code ran twice", requestingPr, NULL);
408 }
410 singleton->hasFinished = TRUE;
411 waitQ = singleton->waitQ;
412 numWaiting = numInPrivQ( waitQ );
413 for( i = 0; i < numWaiting; i++ )
414 { //they will resume inside start singleton, then jmp to end singleton
415 resumingPr = readPrivQ( waitQ );
416 resumingPr->dataRetFromReq = singleton->endInstrAddr;
417 resume_procr( resumingPr, semEnv );
418 }
420 resume_procr( requestingPr, semEnv );
422 }
423 void inline
424 handleEndFnSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr,
425 VCilkSemEnv *semEnv )
426 {
427 VCilkSingleton *singleton;
429 singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
430 handleEndSingleton_helper( singleton, requestingPr, semEnv );
431 }
432 void inline
433 handleEndDataSingleton( VCilkSemReq *semReq, SlaveVP *requestingPr,
434 VCilkSemEnv *semEnv )
435 {
436 VCilkSingleton *singleton;
438 singleton = *(semReq->singletonPtrAddr);
439 handleEndSingleton_helper( singleton, requestingPr, semEnv );
440 }
443 /*This executes the function in the masterVP, take the function
444 * pointer out of the request and call it, then resume the VP.
445 */
446 void inline
447 handleAtomic( VCilkSemReq *semReq, SlaveVP *requestingPr,
448 VCilkSemEnv *semEnv )
449 {
450 semReq->fnToExecInMaster( semReq->dataForFn );
451 resume_procr( requestingPr, semEnv );
452 }
454 /*First, it looks at the VP's semantic data, to see the highest transactionID
455 * that VP
456 * already has entered. If the current ID is not larger, it throws an
457 * exception stating a bug in the code.
458 *Otherwise it puts the current ID
459 * there, and adds the ID to a linked list of IDs entered -- the list is
460 * used to check that exits are properly ordered.
461 *Next it is uses transactionID as index into an array of transaction
462 * structures.
463 *If the "VP_currently_executing" field is non-null, then put requesting VP
464 * into queue in the struct. (At some point a holder will request
465 * end-transaction, which will take this VP from the queue and resume it.)
466 *If NULL, then write requesting into the field and resume.
467 */
468 void inline
469 handleTransStart( VCilkSemReq *semReq, SlaveVP *requestingPr,
470 VCilkSemEnv *semEnv )
471 { VCilkSemData *semData;
472 TransListElem *nextTransElem;
474 //check ordering of entering transactions is correct
475 semData = requestingPr->semanticData;
476 if( semData->highestTransEntered > semReq->transID )
477 { //throw VMS exception, which shuts down VMS.
478 VMS_PI__throw_exception( "transID smaller than prev", requestingPr, NULL);
479 }
480 //add this trans ID to the list of transactions entered -- check when
481 // end a transaction
482 semData->highestTransEntered = semReq->transID;
483 nextTransElem = VMS_PI__malloc( sizeof(TransListElem) );
484 nextTransElem->transID = semReq->transID;
485 nextTransElem->nextTrans = semData->lastTransEntered;
486 semData->lastTransEntered = nextTransElem;
488 //get the structure for this transaction ID
489 VCilkTrans *
490 transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
492 if( transStruc->VPCurrentlyExecuting == NULL )
493 {
494 transStruc->VPCurrentlyExecuting = requestingPr;
495 resume_procr( requestingPr, semEnv );
496 }
497 else
498 { //note, might make future things cleaner if save request with VP and
499 // add this trans ID to the linked list when gets out of queue.
500 // but don't need for now, and lazy..
501 writePrivQ( requestingPr, transStruc->waitingVPQ );
502 }
503 }
506 /*Use the trans ID to get the transaction structure from the array.
507 *Look at VP_currently_executing to be sure it's same as requesting VP.
508 * If different, throw an exception, stating there's a bug in the code.
509 *Next, take the first element off the list of entered transactions.
510 * Check to be sure the ending transaction is the same ID as the next on
511 * the list. If not, incorrectly nested so throw an exception.
512 *
513 *Next, get from the queue in the structure.
514 *If it's empty, set VP_currently_executing field to NULL and resume
515 * requesting VP.
516 *If get somethine, set VP_currently_executing to the VP from the queue, then
517 * resume both.
518 */
519 void inline
520 handleTransEnd( VCilkSemReq *semReq, SlaveVP *requestingPr,
521 VCilkSemEnv *semEnv )
522 { VCilkSemData *semData;
523 SlaveVP *waitingPr;
524 VCilkTrans *transStruc;
525 TransListElem *lastTrans;
527 transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
529 //make sure transaction ended in same VP as started it.
530 if( transStruc->VPCurrentlyExecuting != requestingPr )
531 {
532 VMS_PI__throw_exception( "trans ended in diff VP", requestingPr, NULL );
533 }
535 //make sure nesting is correct -- last ID entered should == this ID
536 semData = requestingPr->semanticData;
537 lastTrans = semData->lastTransEntered;
538 if( lastTrans->transID != semReq->transID )
539 {
540 VMS_PI__throw_exception( "trans incorrectly nested", requestingPr, NULL );
541 }
543 semData->lastTransEntered = semData->lastTransEntered->nextTrans;
546 waitingPr = readPrivQ( transStruc->waitingVPQ );
547 transStruc->VPCurrentlyExecuting = waitingPr;
549 if( waitingPr != NULL )
550 resume_procr( waitingPr, semEnv );
552 resume_procr( requestingPr, semEnv );
553 }