Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > SSR_impls > SSR__MC_shared_impl
view SSR.c @ 83:a4bc9d8a8f11
Changed to __brch__Dev__data_tracking_assign
| author | Sean Halle <seanhalle@yahoo.com> |
|---|---|
| date | Fri, 28 Dec 2012 09:38:49 -0800 |
| parents | 494703aecda2 |
| children | ce07f1a42ddf |
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
14 #include "SSR.h"
15 #include "SSR_Counter_Recording.h"
17 //==========================================================================
19 void
20 SSR__init();
22 void
23 SSR__init_Helper();
24 //==========================================================================
27 /*TODO: Q: dealing with library f()s and DKU vs WT vs FoR
28 * (still want to do FoR, with time-lines as syntax, could be super cool)
29 * A: thinking pin the coreCtlrs for all of BLIS -- let Master arbitrate
30 * among library, DKU, WT, FoR -- all the patterns in terms of virtual
31 * processors (or equivalently work-units), so Master picks which virt procr
32 * from which portions of app (DKU, WT, FoR) onto which anim slots
33 *Might even do hierarchy of masters -- group of anim slots for each core
34 * has its own master, that keeps generated work local
35 * single-reader-single-writer sync everywhere -- no atomic primitives
36 * Might have the different assigners talk to each other, to negotiate
37 * larger-grain sharing of resources, according to predicted critical
38 * path, and expansion of work
39 */
43 //===========================================================================
46 /*These are the library functions *called in the application*
47 *
48 *There's a pattern for the outside sequential code to interact with the
49 * VMS_HW code.
50 *The VMS_HW system is inside a boundary.. every SSR system is in its
51 * own directory that contains the functions for each of the processor types.
52 * One of the processor types is the "seed" processor that starts the
53 * cascade of creating all the processors that do the work.
54 *So, in the directory is a file called "EntryPoint.c" that contains the
55 * function, named appropriately to the work performed, that the outside
56 * sequential code calls. This function follows a pattern:
57 *1) it calls SSR__init()
58 *2) it creates the initial data for the seed processor, which is passed
59 * in to the function
60 *3) it creates the seed SSR processor, with the data to start it with.
61 *4) it calls startSSRThenWaitUntilWorkDone
62 *5) it gets the returnValue from the transfer struc and returns that
63 * from the function
64 *
65 *For now, a new SSR system has to be created via SSR__init every
66 * time an entry point function is called -- later, might add letting the
67 * SSR system be created once, and let all the entry points just reuse
68 * it -- want to be as simple as possible now, and see by using what makes
69 * sense for later..
70 */
74 //===========================================================================
76 /*This is the "border crossing" function -- the thing that crosses from the
77 * outside world, into the VMS_HW world. It initializes and starts up the
78 * VMS system, then creates one processor from the specified function and
79 * puts it into the readyQ. From that point, that one function is resp.
80 * for creating all the other processors, that then create others, and so
81 * forth.
82 *When all the processors, including the seed, have dissipated, then this
83 * function returns. The results will have been written by side-effect via
84 * pointers read from, or written into initData.
85 *
86 *NOTE: no Threads should exist in the outside program that might touch
87 * any of the data reachable from initData passed in to here
88 */
89 void
90 SSR__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData )
91 { SSRSemEnv *semEnv;
92 SlaveVP *seedPr;
94 SSR__init(); //normal multi-thd
96 semEnv = _VMSMasterEnv->semanticEnv;
98 //SSR starts with one processor, which is put into initial environ,
99 // and which then calls create() to create more, thereby expanding work
100 seedPr = SSR__create_procr_helper( fnPtr, initData,
101 semEnv, semEnv->nextCoreToGetNewPr++ );
103 resume_slaveVP( seedPr, semEnv );
105 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
107 SSR__cleanup_after_shutdown();
108 }
111 int32
112 SSR__giveMinWorkUnitCycles( float32 percentOverhead )
113 {
114 return MIN_WORK_UNIT_CYCLES;
115 }
117 int32
118 SSR__giveIdealNumWorkUnits()
119 {
120 return NUM_ANIM_SLOTS * NUM_CORES;
121 }
123 int32
124 SSR__give_number_of_cores_to_schedule_onto()
125 {
126 return NUM_CORES;
127 }
129 /*For now, use TSC -- later, make these two macros with assembly that first
130 * saves jump point, and second jumps back several times to get reliable time
131 */
132 void
133 SSR__start_primitive()
134 { saveLowTimeStampCountInto( ((SSRSemEnv *)(_VMSMasterEnv->semanticEnv))->
135 primitiveStartTime );
136 }
138 /*Just quick and dirty for now -- make reliable later
139 * will want this to jump back several times -- to be sure cache is warm
140 * because don't want comm time included in calc-time measurement -- and
141 * also to throw out any "weird" values due to OS interrupt or TSC rollover
142 */
143 int32
144 SSR__end_primitive_and_give_cycles()
145 { int32 endTime, startTime;
146 //TODO: fix by repeating time-measurement
147 saveLowTimeStampCountInto( endTime );
148 startTime =((SSRSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
149 return (endTime - startTime);
150 }
152 //===========================================================================
154 /*Initializes all the data-structures for a SSR system -- but doesn't
155 * start it running yet!
156 *
157 *This runs in the main thread -- before VMS starts up
158 *
159 *This sets up the semantic layer over the VMS system
160 *
161 *First, calls VMS_Setup, then creates own environment, making it ready
162 * for creating the seed processor and then starting the work.
163 */
164 void
165 SSR__init()
166 {
167 VMS_SS__init();
168 //masterEnv, a global var, now is partially set up by init_VMS
169 // after this, have VMS_int__malloc and VMS_int__free available
171 SSR__init_Helper();
172 }
175 void idle_fn(void* data, SlaveVP *animatingSlv){
176 while(1){
177 VMS_int__suspend_slaveVP_and_send_req(animatingSlv);
178 }
179 }
181 void
182 SSR__init_Helper()
183 { SSRSemEnv *semanticEnv;
184 PrivQueueStruc **readyVPQs;
185 int coreIdx, i, j;
187 //Hook up the semantic layer's plug-ins to the Master virt procr
188 _VMSMasterEnv->requestHandler = &SSR__Request_Handler;
189 _VMSMasterEnv->slaveAssigner = &SSR__assign_slaveVP_to_slot;
190 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
191 _VMSMasterEnv->counterHandler = &SSR__counter_handler;
192 #endif
194 //create the semantic layer's environment (all its data) and add to
195 // the master environment
196 semanticEnv = VMS_int__malloc( sizeof( SSRSemEnv ) );
197 _VMSMasterEnv->semanticEnv = semanticEnv;
199 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
200 SSR__init_counter_data_structs();
201 #endif
202 #ifdef IDLE_SLAVES
203 semanticEnv->shutdownInitiated = FALSE;
204 for(i=0;i<NUM_CORES;++i){
205 for(j=0;j<NUM_ANIM_SLOTS;++j){
206 semanticEnv->idlePr[i][j] = VMS_int__create_slaveVP(&idle_fn,NULL);
207 semanticEnv->idlePr[i][j]->coreAnimatedBy = i;
208 semanticEnv->idlePr[i][j]->typeOfVP = Idle;
209 }
210 }
211 #endif
212 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
213 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
214 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
215 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
216 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
217 semanticEnv->singletonDependenciesList = makeListOfArrays(sizeof(Dependency),128);
218 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
220 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
221 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
222 #endif
224 //create the ready queue, hash tables used for pairing send to receive
225 // and so forth
226 //TODO: add hash tables for pairing sends with receives, and
227 // initialize the data ownership system
228 readyVPQs = VMS_int__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
230 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
231 {
232 readyVPQs[ coreIdx ] = makeVMSQ();
233 }
235 semanticEnv->readyVPQs = readyVPQs;
237 semanticEnv->nextCoreToGetNewPr = 0;
238 semanticEnv->numSlaveVP = 0;
240 semanticEnv->commHashTbl = makeHashTable( 1<<16, &VMS_int__free );//start big
242 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
243 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
244 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
245 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
246 {
247 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
248 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
249 semanticEnv->fnSingletons[i].hasFinished = FALSE;
250 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
251 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
252 }
254 VMSCommNode *
255 systemNode = VMS_SS__give_comm_hierarchy(); //this is read only!!
257 //Do something with the comm system here.. make own, faster, data
258 // structure that is used by assigner -- it can include info about
259 // measured miss rates, and structures that track the data..
260 //Next step would be to take a shot at a function call to put into the
261 // application that gives a "name" to collection of data consumed by
262 // a work-unit, and a name to the data produced.. along with the size
263 // each of those named collections.
264 //Then, can come up with a way to represent how much of each
265 // named data collection that resides in each of the caches. Keep that
266 // representation in the data structure that build from parsing the
267 // comm system returned from VMS.
268 }
271 /*Frees any memory allocated by SSR__init() then calls VMS_int__shutdown
272 */
273 void
274 SSR__cleanup_after_shutdown()
275 { SSRSemEnv *semanticEnv;
277 semanticEnv = _VMSMasterEnv->semanticEnv;
279 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
280 //UCC
281 FILE* output;
282 int n;
283 char filename[255];
284 for(n=0;n<255;n++)
285 {
286 sprintf(filename, "./counters/UCC.%d",n);
287 output = fopen(filename,"r");
288 if(output)
289 {
290 fclose(output);
291 }else{
292 break;
293 }
294 }
295 if(n<255){
296 printf("Saving UCC to File: %s ...\n", filename);
297 output = fopen(filename,"w+");
298 if(output!=NULL){
299 set_dependency_file(output);
300 //fprintf(output,"digraph Dependencies {\n");
301 //set_dot_file(output);
302 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
303 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
304 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
305 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
306 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
307 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
308 //fprintf(output,"}\n");
309 fflush(output);
311 } else
312 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
313 } else {
314 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
315 }
316 //Loop Graph
317 for(n=0;n<255;n++)
318 {
319 sprintf(filename, "./counters/LoopGraph.%d",n);
320 output = fopen(filename,"r");
321 if(output)
322 {
323 fclose(output);
324 }else{
325 break;
326 }
327 }
328 if(n<255){
329 printf("Saving LoopGraph to File: %s ...\n", filename);
330 output = fopen(filename,"w+");
331 if(output!=NULL){
332 set_dependency_file(output);
333 //fprintf(output,"digraph Dependencies {\n");
334 //set_dot_file(output);
335 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
336 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
337 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
338 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
339 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
340 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
341 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
342 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
343 //fprintf(output,"}\n");
344 fflush(output);
346 } else
347 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
348 } else {
349 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
350 }
353 freeListOfArrays(semanticEnv->unitList);
354 freeListOfArrays(semanticEnv->commDependenciesList);
355 freeListOfArrays(semanticEnv->ctlDependenciesList);
356 freeListOfArrays(semanticEnv->dynDependenciesList);
357 freeListOfArrays(semanticEnv->singletonDependenciesList);
358 #endif
359 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
360 for(n=0;n<255;n++)
361 {
362 sprintf(filename, "./counters/Counters.%d.csv",n);
363 output = fopen(filename,"r");
364 if(output)
365 {
366 fclose(output);
367 }else{
368 break;
369 }
370 }
371 if(n<255){
372 printf("Saving Counter measurements to File: %s ...\n", filename);
373 output = fopen(filename,"w+");
374 if(output!=NULL){
375 set_counter_file(output);
376 int i;
377 for(i=0;i<NUM_CORES;i++){
378 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
379 fflush(output);
380 }
382 } else
383 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
384 } else {
385 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
386 }
388 #endif
389 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
390 * nothing to do here
393 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
394 {
395 VMS_int__free( semanticEnv->readyVPQs[coreIdx]->startOfData );
396 VMS_int__free( semanticEnv->readyVPQs[coreIdx] );
397 }
398 VMS_int__free( semanticEnv->readyVPQs );
400 freeHashTable( semanticEnv->commHashTbl );
401 VMS_int__free( _VMSMasterEnv->semanticEnv );
402 */
403 VMS_SS__cleanup_at_end_of_shutdown();
404 }
407 //===========================================================================
409 /*
410 */
411 SlaveVP *
412 SSR__create_procr_with( TopLevelFnPtr fnPtr, void *initData,
413 SlaveVP *creatingPr )
414 { SSRSemReq reqData;
416 //the semantic request data is on the stack and disappears when this
417 // call returns -- it's guaranteed to remain in the VP's stack for as
418 // long as the VP is suspended.
419 reqData.reqType = 0; //know type because in a VMS create req
420 reqData.coreToAssignOnto = -1; //means round-robin assign
421 reqData.fnPtr = fnPtr;
422 reqData.initData = initData;
423 reqData.sendPr = creatingPr;
425 VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
427 return creatingPr->dataRetFromReq;
428 }
430 SlaveVP *
431 SSR__create_procr_with_affinity( TopLevelFnPtr fnPtr, void *initData,
432 SlaveVP *creatingPr, int32 coreToAssignOnto )
433 { SSRSemReq reqData;
435 //the semantic request data is on the stack and disappears when this
436 // call returns -- it's guaranteed to remain in the VP's stack for as
437 // long as the VP is suspended.
438 reqData.reqType = 0; //know type because in a VMS create req
439 reqData.coreToAssignOnto = coreToAssignOnto;
440 reqData.fnPtr = fnPtr;
441 reqData.initData = initData;
442 reqData.sendPr = creatingPr;
444 VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
446 return creatingPr->dataRetFromReq;
447 }
450 void
451 SSR__dissipate_procr( SlaveVP *procrToDissipate )
452 {
453 VMS_WL__send_dissipate_req( procrToDissipate );
454 }
457 //===========================================================================
459 void *
460 SSR__malloc_to( int32 sizeToMalloc, SlaveVP *owningPr )
461 { SSRSemReq reqData;
463 reqData.reqType = malloc_req;
464 reqData.sendPr = owningPr;
465 reqData.sizeToMalloc = sizeToMalloc;
467 VMS_WL__send_sem_request( &reqData, owningPr );
469 return owningPr->dataRetFromReq;
470 }
473 /*Sends request to Master, which does the work of freeing
474 */
475 void
476 SSR__free( void *ptrToFree, SlaveVP *owningPr )
477 { SSRSemReq reqData;
479 reqData.reqType = free_req;
480 reqData.sendPr = owningPr;
481 reqData.ptrToFree = ptrToFree;
483 VMS_WL__send_sem_request( &reqData, owningPr );
484 }
487 void
488 SSR__transfer_ownership_of_from_to( void *data, SlaveVP *oldOwnerSlv,
489 SlaveVP *newOwnerPr )
490 {
491 //TODO: put in the ownership system that automatically frees when no
492 // owners of data left -- will need keeper for keeping data around when
493 // future created processors might need it but don't exist yet
494 }
497 void
498 SSR__add_ownership_by_to( SlaveVP *newOwnerSlv, void *data )
499 {
501 }
504 void
505 SSR__remove_ownership_by_from( SlaveVP *loserSlv, void *dataLosing )
506 {
508 }
511 /*Causes the SSR system to remove internal ownership, so data won't be
512 * freed when SSR shuts down, and will persist in the external program.
513 *
514 *Must be called from the processor that currently owns the data.
515 *
516 *IMPL: Transferring ownership touches two different virtual processor's
517 * state -- which means it has to be done carefully -- the VMS rules for
518 * semantic layers say that a work-unit is only allowed to touch the
519 * virtual processor it is part of, and that only a single work-unit per
520 * virtual processor be assigned to a slave at a time. So, this has to
521 * modify the virtual processor that owns the work-unit that called this
522 * function, then create a request to have the other processor modified.
523 *However, in this case, the TO processor is the outside, and transfers
524 * are only allowed to be called by the giver-upper, so can mark caller of
525 * this function as no longer owner, and return -- done.
526 */
527 void
528 SSR__transfer_ownership_to_outside( void *data )
529 {
530 //TODO: removeAllOwnersFrom( data );
531 }
534 //===========================================================================
536 void
537 SSR__send_of_type_to( SlaveVP *sendPr, void *msg, const int type,
538 SlaveVP *receivePr)
539 { SSRSemReq reqData;
541 reqData.receivePr = receivePr;
542 reqData.sendPr = sendPr;
543 reqData.reqType = send_type;
544 reqData.msgType = type;
545 reqData.msg = msg;
546 reqData.nextReqInHashEntry = NULL;
548 //On ownership -- remove inside the send and let ownership sit in limbo
549 // as a potential in an entry in the hash table, when this receive msg
550 // gets paired to a send, the ownership gets added to the receivePr --
551 // the next work-unit in the receivePr's trace will have ownership.
552 VMS_WL__send_sem_request( &reqData, sendPr );
554 //When come back from suspend, no longer own data reachable from msg
555 //TODO: release ownership here
556 }
558 void
559 SSR__send_from_to( void *msg, SlaveVP *sendPr, SlaveVP *receivePr )
560 { SSRSemReq reqData;
562 //hash on the receiver, 'cause always know it, but sometimes want to
563 // receive from anonymous sender
565 reqData.receivePr = receivePr;
566 reqData.sendPr = sendPr;
567 reqData.reqType = send_from_to;
568 reqData.msg = msg;
569 reqData.nextReqInHashEntry = NULL;
571 VMS_WL__send_sem_request( &reqData, sendPr );
572 }
575 //===========================================================================
577 void *
578 SSR__receive_any_to( SlaveVP *receivePr )
579 {
581 }
583 void *
584 SSR__receive_type_to( const int type, SlaveVP *receivePr )
585 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to: %d", receivePr->slaveID);
586 SSRSemReq reqData;
588 reqData.receivePr = receivePr;
589 reqData.reqType = receive_type;
590 reqData.msgType = type;
591 reqData.nextReqInHashEntry = NULL;
593 VMS_WL__send_sem_request( &reqData, receivePr );
595 return receivePr->dataRetFromReq;
596 }
600 /*Call this at point receiving virt pr wants in-coming data.
601 *
602 *The reason receivePr must call this is that it modifies the receivPr
603 * loc structure directly -- and the VMS rules state a virtual processor
604 * loc structure can only be modified by itself.
605 */
606 void *
607 SSR__receive_from_to( SlaveVP *sendPr, SlaveVP *receivePr )
608 { DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", sendPr->slaveID, receivePr->slaveID);
609 SSRSemReq reqData;
611 //hash on the receiver, 'cause always know it, but sometimes want to
612 // receive from anonymous sender
614 reqData.receivePr = receivePr;
615 reqData.sendPr = sendPr;
616 reqData.reqType = receive_from_to;
617 reqData.nextReqInHashEntry = NULL;
619 VMS_WL__send_sem_request( &reqData, receivePr );
621 return receivePr->dataRetFromReq;
622 }
625 //===========================================================================
626 //
627 /*A function singleton is a function whose body executes exactly once, on a
628 * single core, no matter how many times the fuction is called and no
629 * matter how many cores or the timing of cores calling it.
630 *
631 *A data singleton is a ticket attached to data. That ticket can be used
632 * to get the data through the function exactly once, no matter how many
633 * times the data is given to the function, and no matter the timing of
634 * trying to get the data through from different cores.
635 */
637 /*asm function declarations*/
638 void asm_save_ret_to_singleton(SSRSingleton *singletonPtrAddr);
639 void asm_write_ret_from_singleton(SSRSingleton *singletonPtrAddr);
641 /*Fn singleton uses ID as index into array of singleton structs held in the
642 * semantic environment.
643 */
644 void
645 SSR__start_fn_singleton( int32 singletonID, SlaveVP *animPr )
646 {
647 SSRSemReq reqData;
649 //
650 reqData.reqType = singleton_fn_start;
651 reqData.singletonID = singletonID;
653 VMS_WL__send_sem_request( &reqData, animPr );
654 if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton
655 {
656 SSRSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
657 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
658 }
659 }
661 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
662 * The start_data_singleton makes the structure and puts its addr into the
663 * location.
664 */
665 void
666 SSR__start_data_singleton( SSRSingleton **singletonAddr, SlaveVP *animPr )
667 {
668 SSRSemReq reqData;
670 if( *singletonAddr && (*singletonAddr)->hasFinished )
671 goto JmpToEndSingleton;
673 reqData.reqType = singleton_data_start;
674 reqData.singletonPtrAddr = singletonAddr;
676 VMS_WL__send_sem_request( &reqData, animPr );
677 if( animPr->dataRetFromReq ) //either 0 or end singleton's return addr
678 { //Assembly code changes the return addr on the stack to the one
679 // saved into the singleton by the end-singleton-fn
680 //The return addr is at 0x4(%%ebp)
681 JmpToEndSingleton:
682 asm_write_ret_from_singleton(*singletonAddr);
683 }
684 //now, simply return
685 //will exit either from the start singleton call or the end-singleton call
686 }
688 /*Uses ID as index into array of flags. If flag already set, resumes from
689 * end-label. Else, sets flag and resumes normally.
690 *
691 *Note, this call cannot be inlined because the instr addr at the label
692 * inside is shared by all invocations of a given singleton ID.
693 */
694 void
695 SSR__end_fn_singleton( int32 singletonID, SlaveVP *animPr )
696 {
697 SSRSemReq reqData;
699 //don't need this addr until after at least one singleton has reached
700 // this function
701 SSRSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
702 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
704 reqData.reqType = singleton_fn_end;
705 reqData.singletonID = singletonID;
707 VMS_WL__send_sem_request( &reqData, animPr );
709 EndSingletonInstrAddr:
710 return;
711 }
713 void
714 SSR__end_data_singleton( SSRSingleton **singletonPtrAddr, SlaveVP *animPr )
715 {
716 SSRSemReq reqData;
718 //don't need this addr until after singleton struct has reached
719 // this function for first time
720 //do assembly that saves the return addr of this fn call into the
721 // data singleton -- that data-singleton can only be given to exactly
722 // one instance in the code of this function. However, can use this
723 // function in different places for different data-singletons.
724 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
727 asm_save_ret_to_singleton(*singletonPtrAddr);
729 reqData.reqType = singleton_data_end;
730 reqData.singletonPtrAddr = singletonPtrAddr;
732 VMS_WL__send_sem_request( &reqData, animPr );
733 }
735 /*This executes the function in the masterVP, so it executes in isolation
736 * from any other copies -- only one copy of the function can ever execute
737 * at a time.
738 *
739 *It suspends to the master, and the request handler takes the function
740 * pointer out of the request and calls it, then resumes the VP.
741 *Only very short functions should be called this way -- for longer-running
742 * isolation, use transaction-start and transaction-end, which run the code
743 * between as work-code.
744 */
745 void
746 SSR__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
747 void *data, SlaveVP *animPr )
748 {
749 SSRSemReq reqData;
751 //
752 reqData.reqType = atomic;
753 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
754 reqData.dataForFn = data;
756 VMS_WL__send_sem_request( &reqData, animPr );
757 }
760 /*This suspends to the master.
761 *First, it looks at the VP's data, to see the highest transactionID that VP
762 * already has entered. If the current ID is not larger, it throws an
763 * exception stating a bug in the code. Otherwise it puts the current ID
764 * there, and adds the ID to a linked list of IDs entered -- the list is
765 * used to check that exits are properly ordered.
766 *Next it is uses transactionID as index into an array of transaction
767 * structures.
768 *If the "VP_currently_executing" field is non-null, then put requesting VP
769 * into queue in the struct. (At some point a holder will request
770 * end-transaction, which will take this VP from the queue and resume it.)
771 *If NULL, then write requesting into the field and resume.
772 */
773 void
774 SSR__start_transaction( int32 transactionID, SlaveVP *animPr )
775 {
776 SSRSemReq reqData;
778 //
779 reqData.sendPr = animPr;
780 reqData.reqType = trans_start;
781 reqData.transID = transactionID;
783 VMS_WL__send_sem_request( &reqData, animPr );
784 }
786 /*This suspends to the master, then uses transactionID as index into an
787 * array of transaction structures.
788 *It looks at VP_currently_executing to be sure it's same as requesting VP.
789 * If different, throws an exception, stating there's a bug in the code.
790 *Next it looks at the queue in the structure.
791 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
792 *If something in, gets it, sets VP_currently_executing to that VP, then
793 * resumes both.
794 */
795 void
796 SSR__end_transaction( int32 transactionID, SlaveVP *animPr )
797 {
798 SSRSemReq reqData;
800 //
801 reqData.sendPr = animPr;
802 reqData.reqType = trans_end;
803 reqData.transID = transactionID;
805 VMS_WL__send_sem_request( &reqData, animPr );
806 }
