view VSs.c @ 14:b2bc97318262

bug fix -- always turn SlotTaskSlv into ExtraTaskSlv inside request handler
author Sean Halle <seanhalle@yahoo.com>
date Thu, 23 Aug 2012 01:27:26 -0700
parents f56e3beac86b
children 459055db7fc0
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
14 #include "VSs.h"
15 #include "Measurement/VSs_Counter_Recording.h"
17 //==========================================================================
19 void
20 VSs__init();
22 void
23 VSs__init_Helper();
24 //==========================================================================
28 //===========================================================================
31 /*These are the library functions *called in the application*
32 *
33 *There's a pattern for the outside sequential code to interact with the
34 * VMS_HW code.
35 *The VMS_HW system is inside a boundary.. every VSs system is in its
36 * own directory that contains the functions for each of the processor types.
37 * One of the processor types is the "seed" processor that starts the
38 * cascade of creating all the processors that do the work.
39 *So, in the directory is a file called "EntryPoint.c" that contains the
40 * function, named appropriately to the work performed, that the outside
41 * sequential code calls. This function follows a pattern:
42 *1) it calls VSs__init()
43 *2) it creates the initial data for the seed processor, which is passed
44 * in to the function
45 *3) it creates the seed VSs processor, with the data to start it with.
46 *4) it calls startVSsThenWaitUntilWorkDone
47 *5) it gets the returnValue from the transfer struc and returns that
48 * from the function
49 *
50 *For now, a new VSs system has to be created via VSs__init every
51 * time an entry point function is called -- later, might add letting the
52 * VSs system be created once, and let all the entry points just reuse
53 * it -- want to be as simple as possible now, and see by using what makes
54 * sense for later..
55 */
59 //===========================================================================
61 /*This is the "border crossing" function -- the thing that crosses from the
62 * outside world, into the VMS_HW world. It initializes and starts up the
63 * VMS system, then creates one processor from the specified function and
64 * puts it into the readyQ. From that point, that one function is resp.
65 * for creating all the other processors, that then create others, and so
66 * forth.
67 *When all the processors, including the seed, have dissipated, then this
68 * function returns. The results will have been written by side-effect via
69 * pointers read from, or written into initData.
70 *
71 *NOTE: no Threads should exist in the outside program that might touch
72 * any of the data reachable from initData passed in to here
73 */
74 void
75 VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
76 { VSsSemEnv *semEnv;
77 SlaveVP *seedSlv;
78 VSsSemData *semData;
79 VSsTaskStub *threadTaskStub, *parentTaskStub;
81 VSs__init(); //normal multi-thd
83 semEnv = _VMSMasterEnv->semanticEnv;
85 //VSs starts with one processor, which is put into initial environ,
86 // and which then calls create() to create more, thereby expanding work
87 seedSlv = VSs__create_slave_helper( fnPtr, initData,
88 semEnv, semEnv->nextCoreToGetNewSlv++ );
90 //seed slave is a thread slave, so make a thread's task stub for it
91 // and then make another to stand for the seed's parent task. Make
92 // the parent be already ended, and have one child (the seed). This
93 // will make the dissipate handler do the right thing when the seed
94 // is dissipated.
95 threadTaskStub = create_thread_task_stub( initData );
96 parentTaskStub = create_thread_task_stub( NULL );
97 parentTaskStub->isEnded = TRUE;
98 parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
99 threadTaskStub->parentTaskStub = parentTaskStub;
101 semData = (VSsSemData *)seedSlv->semanticData;
102 //seedVP is a thread, so has a permanent task
103 semData->needsTaskAssigned = FALSE;
104 semData->taskStub = threadTaskStub;
105 semData->slaveType = ThreadSlv;
107 resume_slaveVP( seedSlv, semEnv ); //returns right away, just queues Slv
109 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
111 VSs__cleanup_after_shutdown();
112 }
115 int32
116 VSs__giveMinWorkUnitCycles( float32 percentOverhead )
117 {
118 return MIN_WORK_UNIT_CYCLES;
119 }
121 int32
122 VSs__giveIdealNumWorkUnits()
123 {
124 return NUM_ANIM_SLOTS * NUM_CORES;
125 }
127 int32
128 VSs__give_number_of_cores_to_schedule_onto()
129 {
130 return NUM_CORES;
131 }
133 /*For now, use TSC -- later, make these two macros with assembly that first
134 * saves jump point, and second jumps back several times to get reliable time
135 */
136 void
137 VSs__start_primitive()
138 { saveLowTimeStampCountInto( ((VSsSemEnv *)(_VMSMasterEnv->semanticEnv))->
139 primitiveStartTime );
140 }
142 /*Just quick and dirty for now -- make reliable later
143 * will want this to jump back several times -- to be sure cache is warm
144 * because don't want comm time included in calc-time measurement -- and
145 * also to throw out any "weird" values due to OS interrupt or TSC rollover
146 */
147 int32
148 VSs__end_primitive_and_give_cycles()
149 { int32 endTime, startTime;
150 //TODO: fix by repeating time-measurement
151 saveLowTimeStampCountInto( endTime );
152 startTime =((VSsSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
153 return (endTime - startTime);
154 }
156 //===========================================================================
158 /*Initializes all the data-structures for a VSs system -- but doesn't
159 * start it running yet!
160 *
161 *This runs in the main thread -- before VMS starts up
162 *
163 *This sets up the semantic layer over the VMS system
164 *
165 *First, calls VMS_Setup, then creates own environment, making it ready
166 * for creating the seed processor and then starting the work.
167 */
168 void
169 VSs__init()
170 {
171 VMS_SS__init();
172 //masterEnv, a global var, now is partially set up by init_VMS
173 // after this, have VMS_int__malloc and VMS_int__free available
175 VSs__init_Helper();
176 }
179 void idle_fn(void* data, SlaveVP *animatingSlv){
180 while(1){
181 VMS_int__suspend_slaveVP_and_send_req(animatingSlv);
182 }
183 }
185 void
186 VSs__init_Helper()
187 { VSsSemEnv *semanticEnv;
188 int32 i, coreNum, slotNum;
189 VSsSemData *semData;
191 //Hook up the semantic layer's plug-ins to the Master virt procr
192 _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
193 _VMSMasterEnv->slaveAssigner = &VSs__assign_slaveVP_to_slot;
194 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
195 _VMSMasterEnv->counterHandler = &VSs__counter_handler;
196 #endif
198 //create the semantic layer's environment (all its data) and add to
199 // the master environment
200 semanticEnv = VMS_int__malloc( sizeof( VSsSemEnv ) );
201 _VMSMasterEnv->semanticEnv = semanticEnv;
203 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
204 VSs__init_counter_data_structs();
205 #endif
207 semanticEnv->shutdownInitiated = FALSE;
208 semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) );
209 //For each animation slot, there is an idle slave, and an initial
210 // slave assigned as the current-task-slave. Create them here.
211 SlaveVP *idleSlv, *slotTaskSlv;
212 for( coreNum = 0; coreNum < NUM_CORES; coreNum++ )
213 { semanticEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
215 for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
216 { idleSlv = VSs__create_slave_helper( &idle_fn, NULL, semanticEnv, 0);
217 idleSlv->coreAnimatedBy = coreNum;
218 idleSlv->animSlotAssignedTo =
219 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
220 semanticEnv->idleSlv[coreNum][slotNum] = idleSlv;
222 slotTaskSlv = VSs__create_slave_helper( &idle_fn, NULL, semanticEnv, 0);
223 slotTaskSlv->coreAnimatedBy = coreNum;
224 slotTaskSlv->animSlotAssignedTo =
225 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
227 semData = slotTaskSlv->semanticData;
228 semData->needsTaskAssigned = TRUE;
229 semData->slaveType = SlotTaskSlv;
230 semanticEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
231 }
232 }
234 //create the ready queues, hash tables used for matching and so forth
235 semanticEnv->slavesReadyToResumeQ = makeVMSQ();
236 semanticEnv->freeExtraTaskSlvQ = makeVMSQ();
237 semanticEnv->taskReadyQ = makeVMSQ();
239 semanticEnv->argPtrHashTbl = makeHashTable32( 16, &VMS_int__free );
240 semanticEnv->commHashTbl = makeHashTable32( 16, &VMS_int__free );
242 semanticEnv->nextCoreToGetNewSlv = 0;
245 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
246 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
247 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
248 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
249 {
250 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
251 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
252 semanticEnv->fnSingletons[i].hasFinished = FALSE;
253 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
254 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
255 }
257 semanticEnv->numLiveExtraTaskSlvs = 0; //must be last
258 semanticEnv->numLiveThreadSlvs = 1; //must be last, count the seed
260 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
261 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
262 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
263 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
264 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
265 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
267 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
268 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
269 #endif
270 }
273 /*Frees any memory allocated by VSs__init() then calls VMS_int__shutdown
274 */
275 void
276 VSs__cleanup_after_shutdown()
277 { VSsSemEnv *semanticEnv;
279 semanticEnv = _VMSMasterEnv->semanticEnv;
281 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
282 //UCC
283 FILE* output;
284 int n;
285 char filename[255];
286 for(n=0;n<255;n++)
287 {
288 sprintf(filename, "./counters/UCC.%d",n);
289 output = fopen(filename,"r");
290 if(output)
291 {
292 fclose(output);
293 }else{
294 break;
295 }
296 }
297 if(n<255){
298 printf("Saving UCC to File: %s ...\n", filename);
299 output = fopen(filename,"w+");
300 if(output!=NULL){
301 set_dependency_file(output);
302 //fprintf(output,"digraph Dependencies {\n");
303 //set_dot_file(output);
304 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
305 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
306 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
307 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
308 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
309 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
310 //fprintf(output,"}\n");
311 fflush(output);
313 } else
314 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
315 } else {
316 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
317 }
318 //Loop Graph
319 for(n=0;n<255;n++)
320 {
321 sprintf(filename, "./counters/LoopGraph.%d",n);
322 output = fopen(filename,"r");
323 if(output)
324 {
325 fclose(output);
326 }else{
327 break;
328 }
329 }
330 if(n<255){
331 printf("Saving LoopGraph to File: %s ...\n", filename);
332 output = fopen(filename,"w+");
333 if(output!=NULL){
334 set_dependency_file(output);
335 //fprintf(output,"digraph Dependencies {\n");
336 //set_dot_file(output);
337 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
338 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
339 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
340 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
341 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
342 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
343 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
344 //fprintf(output,"}\n");
345 fflush(output);
347 } else
348 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
349 } else {
350 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
351 }
354 freeListOfArrays(semanticEnv->unitList);
355 freeListOfArrays(semanticEnv->commDependenciesList);
356 freeListOfArrays(semanticEnv->ctlDependenciesList);
357 freeListOfArrays(semanticEnv->dynDependenciesList);
359 #endif
360 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
361 for(n=0;n<255;n++)
362 {
363 sprintf(filename, "./counters/Counters.%d.csv",n);
364 output = fopen(filename,"r");
365 if(output)
366 {
367 fclose(output);
368 }else{
369 break;
370 }
371 }
372 if(n<255){
373 printf("Saving Counter measurements to File: %s ...\n", filename);
374 output = fopen(filename,"w+");
375 if(output!=NULL){
376 set_counter_file(output);
377 int i;
378 for(i=0;i<NUM_CORES;i++){
379 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
380 fflush(output);
381 }
383 } else
384 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
385 } else {
386 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
387 }
389 #endif
390 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
391 * nothing to do here
394 for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
395 {
396 VMS_int__free( semanticEnv->readyVPQs[coreIdx]->startOfData );
397 VMS_int__free( semanticEnv->readyVPQs[coreIdx] );
398 }
399 VMS_int__free( semanticEnv->readyVPQs );
401 freeHashTable( semanticEnv->commHashTbl );
402 VMS_int__free( _VMSMasterEnv->semanticEnv );
403 */
404 VMS_SS__cleanup_at_end_of_shutdown();
405 }
408 //===========================================================================
410 SlaveVP *
411 VSs__create_thread( TopLevelFnPtr fnPtr, void *initData,
412 SlaveVP *creatingThd )
413 { VSsSemReq reqData;
415 //the semantic request data is on the stack and disappears when this
416 // call returns -- it's guaranteed to remain in the VP's stack for as
417 // long as the VP is suspended.
418 reqData.reqType = 0; //know type because in a VMS create req
419 reqData.fnPtr = fnPtr;
420 reqData.initData = initData;
421 reqData.callingSlv = creatingThd;
423 VMS_WL__send_create_slaveVP_req( &reqData, creatingThd );
425 return creatingThd->dataRetFromReq;
426 }
428 /*This is always the last thing done in the code animated by a thread VP.
429 * Normally, this would be the last line of the thread's top level function.
430 * But, if the thread exits from any point, it has to do so by calling
431 * this.
432 *
433 *It simply sends a dissipate request, which handles all the state cleanup.
434 */
435 void
436 VSs__end_thread( SlaveVP *thdToEnd )
437 { VSsSemData *semData;
439 VMS_WL__send_dissipate_req( thdToEnd );
440 }
444 //===========================================================================
447 //======================= task submit and end ==============================
448 /*
449 */
450 void
451 VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv)
452 { VSsSemReq reqData;
454 reqData.reqType = submit_task;
456 reqData.taskType = taskType;
457 reqData.args = args;
458 reqData.callingSlv = animSlv;
460 reqData.taskID = NULL;
462 VMS_WL__send_sem_request( &reqData, animSlv );
463 }
465 inline int32 *
466 VSs__create_taskID_of_size( int32 numInts, SlaveVP *animSlv )
467 { int32 *taskID;
469 taskID = VMS_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
470 taskID[0] = numInts;
471 return taskID;
472 }
474 void
475 VSs__submit_task_with_ID( VSsTaskType *taskType, void *args, int32 *taskID,
476 SlaveVP *animSlv)
477 { VSsSemReq reqData;
479 reqData.reqType = submit_task;
481 reqData.taskType = taskType;
482 reqData.args = args;
483 reqData.taskID = taskID;
484 reqData.callingSlv = animSlv;
486 VMS_WL__send_sem_request( &reqData, animSlv );
487 }
490 /*This call is the last to happen in every task. It causes the slave to
491 * suspend and get the next task out of the task-queue. Notice there is no
492 * assigner here.. only one slave, no slave ReadyQ, and so on..
493 *Can either make the assigner take the next task out of the taskQ, or can
494 * leave all as it is, and make task-end take the next task.
495 *Note: this fits the case in the new VMS for no-context tasks, so will use
496 * the built-in taskQ of new VMS, and should be local and much faster.
497 *
498 *The task-stub is saved in the animSlv, so the request handler will get it
499 * from there, along with the task-type which has arg types, and so on..
500 *
501 * NOTE: if want, don't need to send the animating SlaveVP around..
502 * instead, can make a single slave per core, and coreCtrlr looks up the
503 * slave from having the core number.
504 *
505 *But, to stay compatible with all the other VMS languages, leave it in..
506 */
507 void
508 VSs__end_task( SlaveVP *animSlv )
509 { VSsSemReq reqData;
511 reqData.reqType = end_task;
512 reqData.callingSlv = animSlv;
514 VMS_WL__send_sem_request( &reqData, animSlv );
515 }
518 void
519 VSs__taskwait(SlaveVP *animSlv)
520 {
521 VSsSemReq reqData;
523 reqData.reqType = taskwait;
524 reqData.callingSlv = animSlv;
526 VMS_WL__send_sem_request( &reqData, animSlv );
527 }
531 //========================== send and receive ============================
532 //
534 inline int32 *
535 VSs__give_self_taskID( SlaveVP *animSlv )
536 {
537 return ((VSsSemData*)animSlv->semanticData)->taskStub->taskID;
538 }
540 //================================ send ===================================
542 void
543 VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
544 SlaveVP *senderSlv )
545 { VSsSemReq reqData;
547 reqData.reqType = send_type_to;
549 reqData.msg = msg;
550 reqData.msgType = type;
551 reqData.receiverID = receiverID;
552 reqData.senderSlv = senderSlv;
554 reqData.nextReqInHashEntry = NULL;
556 VMS_WL__send_sem_request( &reqData, senderSlv );
558 //When come back from suspend, no longer own data reachable from msg
559 }
561 void
562 VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv )
563 { VSsSemReq reqData;
565 reqData.reqType = send_from_to;
567 reqData.msg = msg;
568 reqData.senderID = senderID;
569 reqData.receiverID = receiverID;
570 reqData.senderSlv = senderSlv;
572 reqData.nextReqInHashEntry = NULL;
574 VMS_WL__send_sem_request( &reqData, senderSlv );
575 }
578 //================================ receive ================================
580 /*The "type" version of send and receive creates a many-to-one relationship.
581 * The sender is anonymous, and many sends can stack up, waiting to be
582 * received. The same receiver can also have send from-to's
583 * waiting for it, and those will be kept separate from the "type"
584 * messages.
585 */
586 void *
587 VSs__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv )
588 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
589 VSsSemReq reqData;
591 reqData.reqType = receive_type_to;
593 reqData.msgType = type;
594 reqData.receiverID = receiverID;
595 reqData.receiverSlv = receiverSlv;
597 reqData.nextReqInHashEntry = NULL;
599 VMS_WL__send_sem_request( &reqData, receiverSlv );
601 return receiverSlv->dataRetFromReq;
602 }
606 /*Call this at the point a receiving task wants in-coming data.
607 * Use this from-to form when know senderID -- it makes a direct channel
608 * between sender and receiver.
609 */
610 void *
611 VSs__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv )
612 {
613 VSsSemReq reqData;
615 reqData.reqType = receive_from_to;
617 reqData.senderID = senderID;
618 reqData.receiverID = receiverID;
619 reqData.receiverSlv = receiverSlv;
621 reqData.nextReqInHashEntry = NULL;
622 DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
624 VMS_WL__send_sem_request( &reqData, receiverSlv );
626 return receiverSlv->dataRetFromReq;
627 }
632 //==========================================================================
633 //
634 /*A function singleton is a function whose body executes exactly once, on a
635 * single core, no matter how many times the fuction is called and no
636 * matter how many cores or the timing of cores calling it.
637 *
638 *A data singleton is a ticket attached to data. That ticket can be used
639 * to get the data through the function exactly once, no matter how many
640 * times the data is given to the function, and no matter the timing of
641 * trying to get the data through from different cores.
642 */
644 /*asm function declarations*/
645 void asm_save_ret_to_singleton(VSsSingleton *singletonPtrAddr);
646 void asm_write_ret_from_singleton(VSsSingleton *singletonPtrAddr);
648 /*Fn singleton uses ID as index into array of singleton structs held in the
649 * semantic environment.
650 */
651 void
652 VSs__start_fn_singleton( int32 singletonID, SlaveVP *animSlv )
653 {
654 VSsSemReq reqData;
656 //
657 reqData.reqType = singleton_fn_start;
658 reqData.singletonID = singletonID;
660 VMS_WL__send_sem_request( &reqData, animSlv );
661 if( animSlv->dataRetFromReq ) //will be 0 or addr of label in end singleton
662 {
663 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
664 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
665 }
666 }
668 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
669 * The start_data_singleton makes the structure and puts its addr into the
670 * location.
671 */
672 void
673 VSs__start_data_singleton( VSsSingleton **singletonAddr, SlaveVP *animSlv )
674 {
675 VSsSemReq reqData;
677 if( *singletonAddr && (*singletonAddr)->hasFinished )
678 goto JmpToEndSingleton;
680 reqData.reqType = singleton_data_start;
681 reqData.singletonPtrAddr = singletonAddr;
683 VMS_WL__send_sem_request( &reqData, animSlv );
684 if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
685 { //Assembly code changes the return addr on the stack to the one
686 // saved into the singleton by the end-singleton-fn
687 //The return addr is at 0x4(%%ebp)
688 JmpToEndSingleton:
689 asm_write_ret_from_singleton(*singletonAddr);
690 }
691 //now, simply return
692 //will exit either from the start singleton call or the end-singleton call
693 }
695 /*Uses ID as index into array of flags. If flag already set, resumes from
696 * end-label. Else, sets flag and resumes normally.
697 *
698 *Note, this call cannot be inlined because the instr addr at the label
699 * inside is shared by all invocations of a given singleton ID.
700 */
701 void
702 VSs__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
703 {
704 VSsSemReq reqData;
706 //don't need this addr until after at least one singleton has reached
707 // this function
708 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
709 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
711 reqData.reqType = singleton_fn_end;
712 reqData.singletonID = singletonID;
714 VMS_WL__send_sem_request( &reqData, animSlv );
716 EndSingletonInstrAddr:
717 return;
718 }
720 void
721 VSs__end_data_singleton( VSsSingleton **singletonPtrAddr, SlaveVP *animSlv )
722 {
723 VSsSemReq reqData;
725 //don't need this addr until after singleton struct has reached
726 // this function for first time
727 //do assembly that saves the return addr of this fn call into the
728 // data singleton -- that data-singleton can only be given to exactly
729 // one instance in the code of this function. However, can use this
730 // function in different places for different data-singletons.
731 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
734 asm_save_ret_to_singleton(*singletonPtrAddr);
736 reqData.reqType = singleton_data_end;
737 reqData.singletonPtrAddr = singletonPtrAddr;
739 VMS_WL__send_sem_request( &reqData, animSlv );
740 }
742 /*This executes the function in the masterVP, so it executes in isolation
743 * from any other copies -- only one copy of the function can ever execute
744 * at a time.
745 *
746 *It suspends to the master, and the request handler takes the function
747 * pointer out of the request and calls it, then resumes the VP.
748 *Only very short functions should be called this way -- for longer-running
749 * isolation, use transaction-start and transaction-end, which run the code
750 * between as work-code.
751 */
752 void
753 VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
754 void *data, SlaveVP *animSlv )
755 {
756 VSsSemReq reqData;
758 //
759 reqData.reqType = atomic;
760 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
761 reqData.dataForFn = data;
763 VMS_WL__send_sem_request( &reqData, animSlv );
764 }
767 /*This suspends to the master.
768 *First, it looks at the VP's data, to see the highest transactionID that VP
769 * already has entered. If the current ID is not larger, it throws an
770 * exception stating a bug in the code. Otherwise it puts the current ID
771 * there, and adds the ID to a linked list of IDs entered -- the list is
772 * used to check that exits are properly ordered.
773 *Next it is uses transactionID as index into an array of transaction
774 * structures.
775 *If the "VP_currently_executing" field is non-null, then put requesting VP
776 * into queue in the struct. (At some point a holder will request
777 * end-transaction, which will take this VP from the queue and resume it.)
778 *If NULL, then write requesting into the field and resume.
779 */
780 void
781 VSs__start_transaction( int32 transactionID, SlaveVP *animSlv )
782 {
783 VSsSemReq reqData;
785 //
786 reqData.callingSlv = animSlv;
787 reqData.reqType = trans_start;
788 reqData.transID = transactionID;
790 VMS_WL__send_sem_request( &reqData, animSlv );
791 }
793 /*This suspends to the master, then uses transactionID as index into an
794 * array of transaction structures.
795 *It looks at VP_currently_executing to be sure it's same as requesting VP.
796 * If different, throws an exception, stating there's a bug in the code.
797 *Next it looks at the queue in the structure.
798 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
799 *If something in, gets it, sets VP_currently_executing to that VP, then
800 * resumes both.
801 */
802 void
803 VSs__end_transaction( int32 transactionID, SlaveVP *animSlv )
804 {
805 VSsSemReq reqData;
807 //
808 reqData.callingSlv = animSlv;
809 reqData.reqType = trans_end;
810 reqData.transID = transactionID;
812 VMS_WL__send_sem_request( &reqData, animSlv );
813 }
815 //======================== Internal ==================================
816 /*
817 */
818 SlaveVP *
819 VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
820 SlaveVP *creatingSlv )
821 { VSsSemReq reqData;
823 //the semantic request data is on the stack and disappears when this
824 // call returns -- it's guaranteed to remain in the VP's stack for as
825 // long as the VP is suspended.
826 reqData.reqType = 0; //know type because in a VMS create req
827 reqData.coreToAssignOnto = -1; //means round-robin assign
828 reqData.fnPtr = fnPtr;
829 reqData.initData = initData;
830 reqData.callingSlv = creatingSlv;
832 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
834 return creatingSlv->dataRetFromReq;
835 }
837 SlaveVP *
838 VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
839 SlaveVP *creatingSlv, int32 coreToAssignOnto )
840 { VSsSemReq reqData;
842 //the semantic request data is on the stack and disappears when this
843 // call returns -- it's guaranteed to remain in the VP's stack for as
844 // long as the VP is suspended.
845 reqData.reqType = create_slave_w_aff; //not used, May 2012
846 reqData.coreToAssignOnto = coreToAssignOnto;
847 reqData.fnPtr = fnPtr;
848 reqData.initData = initData;
849 reqData.callingSlv = creatingSlv;
851 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
853 return creatingSlv->dataRetFromReq;
854 }