VMS/VMS_Implementations/VOMP_impls/VOMP__MC_shared_impl

view VOMP.c @ 1:21cf36019f0d

Partially converted SSR to VOMP -- start of changes
author Some Random Person <seanhalle@yahoo.com>
date Thu, 24 May 2012 08:57:24 -0700
parents b311282ec174
children
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
14 #include "VOMP.h"
15 #include "VOMP_Counter_Recording.h"
17 //==========================================================================
19 void
20 VOMP__init();
22 void
23 VOMP__init_Helper();
24 //==========================================================================
28 //===========================================================================
31 /*These are the library functions *called in the application*
32 *
33 *There's a pattern for the outside sequential code to interact with the
34 * language code.
35 *The language system is inside a boundary.. every VOMP application is in its
36 * own directory that contains the functions for each of the processor types.
37 * One of the processor types is the "seed" processor that starts the
38 * cascade of creating all the processors that do the work.
39 *So, in the directory is a file called "EntryPoint.c" that contains the
40 * function, named appropriately to the work performed, that the outside
41 * sequential code calls. This function follows a pattern:
42 *1) it calls VOMP__init()
43 *2) it creates the initial data for the seed processor, which is passed
44 * in to the function
45 *3) it creates the seed VOMP processor, with the data to start it with.
46 *4) it calls startVOMPThenWaitUntilWorkDone
47 *5) it gets the returnValue from the transfer struc and returns that
48 * from the function
49 *
50 *For now, a new VOMP system has to be created via VOMP__init every
51 * time an entry point function is called -- later, might add letting the
52 * VOMP system be created once, and let all the entry points just reuse
53 * it -- want to be as simple as possible now, and see by using what makes
54 * sense for later..
55 */
59 //===========================================================================
61 /*This is the "border crossing" function -- the thing that crosses from the
62 * outside world, into the VMS_HW world. It initializes and starts up the
63 * VMS system, then creates one processor from the specified function and
64 * puts it into the readyQ. From that point, that one function is resp.
65 * for creating all the other processors, that then create others, and so
66 * forth.
67 *When all the processors, including the seed, have dissipated, then this
68 * function returns. The results will have been written by side-effect via
69 * pointers read from, or written into initData.
70 *
71 *NOTE: no Threads should exist in the outside program that might touch
72 * any of the data reachable from initData passed in to here
73 */
74 void
75 VOMP__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData )
76 { VOMPSemEnv *semEnv;
77 SlaveVP *seedPr;
79 VOMP__init(); //normal multi-thd
81 semEnv = _VMSMasterEnv->semanticEnv;
83 //VOMP starts with one processor, which is put into initial environ,
84 // and which then calls create() to create more, thereby expanding work
85 seedPr = VOMP__create_procr_helper( fnPtr, initData,
86 semEnv, semEnv->nextCoreToGetNewPr++ );
88 resume_slaveVP( seedPr, semEnv );
90 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
92 VOMP__cleanup_after_shutdown();
93 }
96 int32
97 VOMP__giveMinWorkUnitCycles( float32 percentOverhead )
98 {
99 return MIN_WORK_UNIT_CYCLES;
100 }
102 int32
103 VOMP__giveIdealNumWorkUnits()
104 {
105 return NUM_ANIM_SLOTS * NUM_CORES;
106 }
108 int32
109 VOMP__give_number_of_cores_to_schedule_onto()
110 {
111 return NUM_CORES;
112 }
114 /*For now, use TSC -- later, make these two macros with assembly that first
115 * saves jump point, and second jumps back several times to get reliable time
116 */
117 void
118 VOMP__start_primitive()
119 { saveLowTimeStampCountInto( ((VOMPSemEnv *)(_VMSMasterEnv->semanticEnv))->
120 primitiveStartTime );
121 }
123 /*Just quick and dirty for now -- make reliable later
124 * will want this to jump back several times -- to be sure cache is warm
125 * because don't want comm time included in calc-time measurement -- and
126 * also to throw out any "weird" values due to OS interrupt or TSC rollover
127 */
128 int32
129 VOMP__end_primitive_and_give_cycles()
130 { int32 endTime, startTime;
131 //TODO: fix by repeating time-measurement
132 saveLowTimeStampCountInto( endTime );
133 startTime =((VOMPSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
134 return (endTime - startTime);
135 }
137 //===========================================================================
139 /*Initializes all the data-structures for a VOMP system -- but doesn't
140 * start it running yet!
141 *
142 *This runs in the main thread -- before VMS starts up
143 *
144 *This sets up the semantic layer over the VMS system
145 *
146 *First, calls VMS_Setup, then creates own environment, making it ready
147 * for creating the seed processor and then starting the work.
148 */
149 void
150 VOMP__init()
151 {
152 VMS_SS__init();
153 //masterEnv, a global var, now is partially set up by init_VMS
154 // after this, have VMS_int__malloc and VMS_int__free available
156 VOMP__init_Helper();
157 }
160 void idle_fn(void* data, SlaveVP *animatingSlv){
161 while(1){
162 VMS_int__suspend_slaveVP_and_send_req(animatingSlv);
163 }
164 }
166 void
167 VOMP__init_Helper()
168 { VOMPSemEnv *semanticEnv;
169 PrivQueueStruc **readyVPQs;
170 int coreIdx, i, j;
172 //Hook up the semantic layer's plug-ins to the Master virt procr
173 _VMSMasterEnv->requestHandler = &VOMP__Request_Handler;
174 _VMSMasterEnv->slaveAssigner = &VOMP__assign_slaveVP_to_slot;
175 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
176 _VMSMasterEnv->counterHandler = &VOMP__counter_handler;
177 #endif
179 //create the semantic layer's environment (all its data) and add to
180 // the master environment
181 semanticEnv = VMS_int__malloc( sizeof( VOMPSemEnv ) );
182 _VMSMasterEnv->semanticEnv = semanticEnv;
184 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
185 VOMP__init_counter_data_structs();
186 #endif
187 semanticEnv->shutdownInitiated = FALSE;
188 for(i=0;i<NUM_CORES;++i){
189 for(j=0;j<NUM_ANIM_SLOTS;++j){
190 semanticEnv->idlePr[i][j] = VMS_int__create_slaveVP(&idle_fn,NULL);
191 semanticEnv->idlePr[i][j]->coreAnimatedBy = i;
192 }
193 }
195 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
196 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
197 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
198 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
199 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
200 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
202 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
203 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
204 #endif
206 //create the ready queue, hash tables used for pairing send to receive
207 // and so forth
208 //TODO: add hash tables for pairing sends with receives, and
209 // initialize the data ownership system
210 readyVPQs = VMS_int__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
212 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
213 {
214 readyVPQs[ coreIdx ] = makeVMSQ();
215 }
217 semanticEnv->readyVPQs = readyVPQs;
219 semanticEnv->nextCoreToGetNewPr = 0;
220 semanticEnv->numSlaveVP = 0;
222 semanticEnv->commHashTbl = makeHashTable( 1<<16, &VMS_int__free );//start big
224 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
225 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
226 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
227 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
228 {
229 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
230 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
231 semanticEnv->fnSingletons[i].hasFinished = FALSE;
232 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
233 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
234 }
235 }
238 /*Frees any memory allocated by VOMP__init() then calls VMS_int__shutdown
239 */
240 void
241 VOMP__cleanup_after_shutdown()
242 { VOMPSemEnv *semanticEnv;
244 semanticEnv = _VMSMasterEnv->semanticEnv;
246 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
247 //UCC
248 FILE* output;
249 int n;
250 char filename[255];
251 for(n=0;n<255;n++)
252 {
253 sprintf(filename, "./counters/UCC.%d",n);
254 output = fopen(filename,"r");
255 if(output)
256 {
257 fclose(output);
258 }else{
259 break;
260 }
261 }
262 if(n<255){
263 printf("Saving UCC to File: %s ...\n", filename);
264 output = fopen(filename,"w+");
265 if(output!=NULL){
266 set_dependency_file(output);
267 //fprintf(output,"digraph Dependencies {\n");
268 //set_dot_file(output);
269 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
270 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
271 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
272 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
273 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
274 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
275 //fprintf(output,"}\n");
276 fflush(output);
278 } else
279 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
280 } else {
281 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
282 }
283 //Loop Graph
284 for(n=0;n<255;n++)
285 {
286 sprintf(filename, "./counters/LoopGraph.%d",n);
287 output = fopen(filename,"r");
288 if(output)
289 {
290 fclose(output);
291 }else{
292 break;
293 }
294 }
295 if(n<255){
296 printf("Saving LoopGraph to File: %s ...\n", filename);
297 output = fopen(filename,"w+");
298 if(output!=NULL){
299 set_dependency_file(output);
300 //fprintf(output,"digraph Dependencies {\n");
301 //set_dot_file(output);
302 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
303 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
304 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
305 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
306 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
307 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
308 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
309 //fprintf(output,"}\n");
310 fflush(output);
312 } else
313 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
314 } else {
315 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
316 }
319 freeListOfArrays(semanticEnv->unitList);
320 freeListOfArrays(semanticEnv->commDependenciesList);
321 freeListOfArrays(semanticEnv->ctlDependenciesList);
322 freeListOfArrays(semanticEnv->dynDependenciesList);
324 #endif
325 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
326 for(n=0;n<255;n++)
327 {
328 sprintf(filename, "./counters/Counters.%d.csv",n);
329 output = fopen(filename,"r");
330 if(output)
331 {
332 fclose(output);
333 }else{
334 break;
335 }
336 }
337 if(n<255){
338 printf("Saving Counter measurements to File: %s ...\n", filename);
339 output = fopen(filename,"w+");
340 if(output!=NULL){
341 set_counter_file(output);
342 int i;
343 for(i=0;i<NUM_CORES;i++){
344 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
345 fflush(output);
346 }
348 } else
349 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
350 } else {
351 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
352 }
354 #endif
355 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
356 * nothing to do here
359 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
360 {
361 VMS_int__free( semanticEnv->readyVPQs[coreIdx]->startOfData );
362 VMS_int__free( semanticEnv->readyVPQs[coreIdx] );
363 }
364 VMS_int__free( semanticEnv->readyVPQs );
366 freeHashTable( semanticEnv->commHashTbl );
367 VMS_int__free( _VMSMasterEnv->semanticEnv );
368 */
369 VMS_SS__cleanup_at_end_of_shutdown();
370 }
373 //===========================================================================
375 /*
376 */
377 SlaveVP *
378 VOMP__create_procr_with( TopLevelFnPtr fnPtr, void *initData,
379 SlaveVP *creatingPr )
380 { VOMPSemReq reqData;
382 //the semantic request data is on the stack and disappears when this
383 // call returns -- it's guaranteed to remain in the VP's stack for as
384 // long as the VP is suspended.
385 reqData.reqType = 0; //know type because in a VMS create req
386 reqData.coreToAssignOnto = -1; //means round-robin assign
387 reqData.fnPtr = fnPtr;
388 reqData.initData = initData;
389 reqData.sendPr = creatingPr;
391 VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
393 return creatingPr->dataRetFromReq;
394 }
396 SlaveVP *
397 VOMP__create_procr_with_affinity( TopLevelFnPtr fnPtr, void *initData,
398 SlaveVP *creatingPr, int32 coreToAssignOnto )
399 { VOMPSemReq reqData;
401 //the semantic request data is on the stack and disappears when this
402 // call returns -- it's guaranteed to remain in the VP's stack for as
403 // long as the VP is suspended.
404 reqData.reqType = 0; //know type because in a VMS create req
405 reqData.coreToAssignOnto = coreToAssignOnto;
406 reqData.fnPtr = fnPtr;
407 reqData.initData = initData;
408 reqData.sendPr = creatingPr;
410 VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
412 return creatingPr->dataRetFromReq;
413 }
416 void
417 VOMP__dissipate_procr( SlaveVP *procrToDissipate )
418 {
419 VMS_WL__send_dissipate_req( procrToDissipate );
420 }
423 //===========================================================================
425 void *
426 VOMP__malloc_to( int32 sizeToMalloc, SlaveVP *owningPr )
427 { VOMPSemReq reqData;
429 reqData.reqType = malloc_req;
430 reqData.sendPr = owningPr;
431 reqData.sizeToMalloc = sizeToMalloc;
433 VMS_WL__send_sem_request( &reqData, owningPr );
435 return owningPr->dataRetFromReq;
436 }
439 /*Sends request to Master, which does the work of freeing
440 */
441 void
442 VOMP__free( void *ptrToFree, SlaveVP *owningPr )
443 { VOMPSemReq reqData;
445 reqData.reqType = free_req;
446 reqData.sendPr = owningPr;
447 reqData.ptrToFree = ptrToFree;
449 VMS_WL__send_sem_request( &reqData, owningPr );
450 }
453 void
454 VOMP__transfer_ownership_of_from_to( void *data, SlaveVP *oldOwnerSlv,
455 SlaveVP *newOwnerPr )
456 {
457 //TODO: put in the ownership system that automatically frees when no
458 // owners of data left -- will need keeper for keeping data around when
459 // future created processors might need it but don't exist yet
460 }
463 void
464 VOMP__add_ownership_by_to( SlaveVP *newOwnerSlv, void *data )
465 {
467 }
470 void
471 VOMP__remove_ownership_by_from( SlaveVP *loserSlv, void *dataLosing )
472 {
474 }
477 /*Causes the VOMP system to remove internal ownership, so data won't be
478 * freed when VOMP shuts down, and will persist in the external program.
479 *
480 *Must be called from the processor that currently owns the data.
481 *
482 *IMPL: Transferring ownership touches two different virtual processor's
483 * state -- which means it has to be done carefully -- the VMS rules for
484 * semantic layers say that a work-unit is only allowed to touch the
485 * virtual processor it is part of, and that only a single work-unit per
486 * virtual processor be assigned to a slave at a time. So, this has to
487 * modify the virtual processor that owns the work-unit that called this
488 * function, then create a request to have the other processor modified.
489 *However, in this case, the TO processor is the outside, and transfers
490 * are only allowed to be called by the giver-upper, so can mark caller of
491 * this function as no longer owner, and return -- done.
492 */
493 void
494 VOMP__transfer_ownership_to_outside( void *data )
495 {
496 //TODO: removeAllOwnersFrom( data );
497 }
500 //===========================================================================
502 void
503 VOMP__send_of_type_to( SlaveVP *sendPr, void *msg, const int type,
504 SlaveVP *receivePr)
505 { VOMPSemReq reqData;
507 reqData.receivePr = receivePr;
508 reqData.sendPr = sendPr;
509 reqData.reqType = send_type;
510 reqData.msgType = type;
511 reqData.msg = msg;
512 reqData.nextReqInHashEntry = NULL;
514 //On ownership -- remove inside the send and let ownership sit in limbo
515 // as a potential in an entry in the hash table, when this receive msg
516 // gets paired to a send, the ownership gets added to the receivePr --
517 // the next work-unit in the receivePr's trace will have ownership.
518 VMS_WL__send_sem_request( &reqData, sendPr );
520 //When come back from suspend, no longer own data reachable from msg
521 //TODO: release ownership here
522 }
524 void
525 VOMP__send_from_to( void *msg, SlaveVP *sendPr, SlaveVP *receivePr )
526 { VOMPSemReq reqData;
528 //hash on the receiver, 'cause always know it, but sometimes want to
529 // receive from anonymous sender
531 reqData.receivePr = receivePr;
532 reqData.sendPr = sendPr;
533 reqData.reqType = send_from_to;
534 reqData.msg = msg;
535 reqData.nextReqInHashEntry = NULL;
537 VMS_WL__send_sem_request( &reqData, sendPr );
538 }
541 //===========================================================================
543 void *
544 VOMP__receive_any_to( SlaveVP *receivePr )
545 {
547 }
549 void *
550 VOMP__receive_type_to( const int type, SlaveVP *receivePr )
551 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to: %d", receivePr->slaveID);
552 VOMPSemReq reqData;
554 reqData.receivePr = receivePr;
555 reqData.reqType = receive_type;
556 reqData.msgType = type;
557 reqData.nextReqInHashEntry = NULL;
559 VMS_WL__send_sem_request( &reqData, receivePr );
561 return receivePr->dataRetFromReq;
562 }
566 /*Call this at point receiving virt pr wants in-coming data.
567 *
568 *The reason receivePr must call this is that it modifies the receivPr
569 * loc structure directly -- and the VMS rules state a virtual processor
570 * loc structure can only be modified by itself.
571 */
572 void *
573 VOMP__receive_from_to( SlaveVP *sendPr, SlaveVP *receivePr )
574 { DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", sendPr->slaveID, receivePr->slaveID);
575 VOMPSemReq reqData;
577 //hash on the receiver, 'cause always know it, but sometimes want to
578 // receive from anonymous sender
580 reqData.receivePr = receivePr;
581 reqData.sendPr = sendPr;
582 reqData.reqType = receive_from_to;
583 reqData.nextReqInHashEntry = NULL;
585 VMS_WL__send_sem_request( &reqData, receivePr );
587 return receivePr->dataRetFromReq;
588 }
591 //===========================================================================
592 //
593 /*A function singleton is a function whose body executes exactly once, on a
594 * single core, no matter how many times the fuction is called and no
595 * matter how many cores or the timing of cores calling it.
596 *
597 *A data singleton is a ticket attached to data. That ticket can be used
598 * to get the data through the function exactly once, no matter how many
599 * times the data is given to the function, and no matter the timing of
600 * trying to get the data through from different cores.
601 */
603 /*asm function declarations*/
604 void asm_save_ret_to_singleton(VOMPSingleton *singletonPtrAddr);
605 void asm_write_ret_from_singleton(VOMPSingleton *singletonPtrAddr);
607 /*Fn singleton uses ID as index into array of singleton structs held in the
608 * semantic environment.
609 */
610 void
611 VOMP__start_fn_singleton( int32 singletonID, SlaveVP *animPr )
612 {
613 VOMPSemReq reqData;
615 //
616 reqData.reqType = singleton_fn_start;
617 reqData.singletonID = singletonID;
619 VMS_WL__send_sem_request( &reqData, animPr );
620 if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton
621 {
622 VOMPSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
623 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
624 }
625 }
627 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
628 * The start_data_singleton makes the structure and puts its addr into the
629 * location.
630 */
631 void
632 VOMP__start_data_singleton( VOMPSingleton **singletonAddr, SlaveVP *animPr )
633 {
634 VOMPSemReq reqData;
636 if( *singletonAddr && (*singletonAddr)->hasFinished )
637 goto JmpToEndSingleton;
639 reqData.reqType = singleton_data_start;
640 reqData.singletonPtrAddr = singletonAddr;
642 VMS_WL__send_sem_request( &reqData, animPr );
643 if( animPr->dataRetFromReq ) //either 0 or end singleton's return addr
644 { //Assembly code changes the return addr on the stack to the one
645 // saved into the singleton by the end-singleton-fn
646 //The return addr is at 0x4(%%ebp)
647 JmpToEndSingleton:
648 asm_write_ret_from_singleton(*singletonAddr);
649 }
650 //now, simply return
651 //will exit either from the start singleton call or the end-singleton call
652 }
654 /*Uses ID as index into array of flags. If flag already set, resumes from
655 * end-label. Else, sets flag and resumes normally.
656 *
657 *Note, this call cannot be inlined because the instr addr at the label
658 * inside is shared by all invocations of a given singleton ID.
659 */
660 void
661 VOMP__end_fn_singleton( int32 singletonID, SlaveVP *animPr )
662 {
663 VOMPSemReq reqData;
665 //don't need this addr until after at least one singleton has reached
666 // this function
667 VOMPSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
668 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
670 reqData.reqType = singleton_fn_end;
671 reqData.singletonID = singletonID;
673 VMS_WL__send_sem_request( &reqData, animPr );
675 EndSingletonInstrAddr:
676 return;
677 }
679 void
680 VOMP__end_data_singleton( VOMPSingleton **singletonPtrAddr, SlaveVP *animPr )
681 {
682 VOMPSemReq reqData;
684 //don't need this addr until after singleton struct has reached
685 // this function for first time
686 //do assembly that saves the return addr of this fn call into the
687 // data singleton -- that data-singleton can only be given to exactly
688 // one instance in the code of this function. However, can use this
689 // function in different places for different data-singletons.
690 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
693 asm_save_ret_to_singleton(*singletonPtrAddr);
695 reqData.reqType = singleton_data_end;
696 reqData.singletonPtrAddr = singletonPtrAddr;
698 VMS_WL__send_sem_request( &reqData, animPr );
699 }
701 /*This executes the function in the masterVP, so it executes in isolation
702 * from any other copies -- only one copy of the function can ever execute
703 * at a time.
704 *
705 *It suspends to the master, and the request handler takes the function
706 * pointer out of the request and calls it, then resumes the VP.
707 *Only very short functions should be called this way -- for longer-running
708 * isolation, use transaction-start and transaction-end, which run the code
709 * between as work-code.
710 */
711 void
712 VOMP__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
713 void *data, SlaveVP *animPr )
714 {
715 VOMPSemReq reqData;
717 //
718 reqData.reqType = atomic;
719 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
720 reqData.dataForFn = data;
722 VMS_WL__send_sem_request( &reqData, animPr );
723 }
726 /*This suspends to the master.
727 *First, it looks at the VP's data, to see the highest transactionID that VP
728 * already has entered. If the current ID is not larger, it throws an
729 * exception stating a bug in the code. Otherwise it puts the current ID
730 * there, and adds the ID to a linked list of IDs entered -- the list is
731 * used to check that exits are properly ordered.
732 *Next it is uses transactionID as index into an array of transaction
733 * structures.
734 *If the "VP_currently_executing" field is non-null, then put requesting VP
735 * into queue in the struct. (At some point a holder will request
736 * end-transaction, which will take this VP from the queue and resume it.)
737 *If NULL, then write requesting into the field and resume.
738 */
739 void
740 VOMP__start_transaction( int32 transactionID, SlaveVP *animPr )
741 {
742 VOMPSemReq reqData;
744 //
745 reqData.sendPr = animPr;
746 reqData.reqType = trans_start;
747 reqData.transID = transactionID;
749 VMS_WL__send_sem_request( &reqData, animPr );
750 }
752 /*This suspends to the master, then uses transactionID as index into an
753 * array of transaction structures.
754 *It looks at VP_currently_executing to be sure it's same as requesting VP.
755 * If different, throws an exception, stating there's a bug in the code.
756 *Next it looks at the queue in the structure.
757 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
758 *If something in, gets it, sets VP_currently_executing to that VP, then
759 * resumes both.
760 */
761 void
762 VOMP__end_transaction( int32 transactionID, SlaveVP *animPr )
763 {
764 VOMPSemReq reqData;
766 //
767 reqData.sendPr = animPr;
768 reqData.reqType = trans_end;
769 reqData.transID = transactionID;
771 VMS_WL__send_sem_request( &reqData, animPr );
772 }