view VSs.c @ 32:552192f088a2

Merge
author Sean Halle <seanhalle@yahoo.com>
date Mon, 04 Mar 2013 04:16:12 -0800
parents b787a5234406
children 227db52cbd93
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
14 #include "VSs.h"
15 #include "Measurement/VSs_Counter_Recording.h"
17 //==========================================================================
19 void
20 VSs__init();
22 void
23 VSs__init_Helper();
24 //==========================================================================
28 //===========================================================================
31 /*These are the library functions *called in the application*
32 *
33 *There's a pattern for the outside sequential code to interact with the
34 * VMS_HW code.
35 *The VMS_HW system is inside a boundary.. every VSs system is in its
36 * own directory that contains the functions for each of the processor types.
37 * One of the processor types is the "seed" processor that starts the
38 * cascade of creating all the processors that do the work.
39 *So, in the directory is a file called "EntryPoint.c" that contains the
40 * function, named appropriately to the work performed, that the outside
41 * sequential code calls. This function follows a pattern:
42 *1) it calls VSs__init()
43 *2) it creates the initial data for the seed processor, which is passed
44 * in to the function
45 *3) it creates the seed VSs processor, with the data to start it with.
46 *4) it calls startVSsThenWaitUntilWorkDone
47 *5) it gets the returnValue from the transfer struc and returns that
48 * from the function
49 *
50 *For now, a new VSs system has to be created via VSs__init every
51 * time an entry point function is called -- later, might add letting the
52 * VSs system be created once, and let all the entry points just reuse
53 * it -- want to be as simple as possible now, and see by using what makes
54 * sense for later..
55 */
59 //===========================================================================
61 /*This is the "border crossing" function -- the thing that crosses from the
62 * outside world, into the VMS_HW world. It initializes and starts up the
63 * VMS system, then creates one processor from the specified function and
64 * puts it into the readyQ. From that point, that one function is resp.
65 * for creating all the other processors, that then create others, and so
66 * forth.
67 *When all the processors, including the seed, have dissipated, then this
68 * function returns. The results will have been written by side-effect via
69 * pointers read from, or written into initData.
70 *
71 *NOTE: no Threads should exist in the outside program that might touch
72 * any of the data reachable from initData passed in to here
73 */
74 void
75 VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
76 { VSsSemEnv *semEnv;
77 SlaveVP *seedSlv;
78 VSsSemData *semData;
79 VSsTaskStub *threadTaskStub, *parentTaskStub;
80 int32* taskID;
82 VSs__init(); //normal multi-thd
84 semEnv = _VMSMasterEnv->semanticEnv;
86 //VSs starts with one processor, which is put into initial environ,
87 // and which then calls create() to create more, thereby expanding work
88 seedSlv = VSs__create_slave_helper( fnPtr, initData,
89 semEnv, semEnv->nextCoreToGetNewSlv++ );
91 //seed slave is a thread slave, so make a thread's task stub for it
92 // and then make another to stand for the seed's parent task. Make
93 // the parent be already ended, and have one child (the seed). This
94 // will make the dissipate handler do the right thing when the seed
95 // is dissipated.
96 threadTaskStub = create_thread_task_stub( initData );
97 parentTaskStub = create_thread_task_stub( NULL );
98 parentTaskStub->isEnded = TRUE;
99 parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
100 threadTaskStub->parentTaskStub = parentTaskStub;
101 threadTaskStub->slaveAssignedTo = seedSlv;
103 taskID = VMS_WL__malloc(2 * sizeof(int32) );
104 taskID[0] = 1;
105 taskID[1] = -1;
106 threadTaskStub->taskID = taskID;
108 semData = (VSsSemData *)seedSlv->semanticData;
109 //seedVP is a thread, so has a permanent task
110 semData->needsTaskAssigned = FALSE;
111 semData->taskStub = threadTaskStub;
112 semData->slaveType = ThreadSlv;
114 resume_slaveVP( seedSlv, semEnv ); //returns right away, just queues Slv
116 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
118 VSs__cleanup_after_shutdown();
119 }
122 int32
123 VSs__giveMinWorkUnitCycles( float32 percentOverhead )
124 {
125 return MIN_WORK_UNIT_CYCLES;
126 }
128 int32
129 VSs__giveIdealNumWorkUnits()
130 {
131 return NUM_ANIM_SLOTS * NUM_CORES;
132 }
134 int32
135 VSs__give_number_of_cores_to_schedule_onto()
136 {
137 return NUM_CORES;
138 }
140 /*For now, use TSC -- later, make these two macros with assembly that first
141 * saves jump point, and second jumps back several times to get reliable time
142 */
143 void
144 VSs__start_primitive()
145 { saveLowTimeStampCountInto( ((VSsSemEnv *)(_VMSMasterEnv->semanticEnv))->
146 primitiveStartTime );
147 }
149 /*Just quick and dirty for now -- make reliable later
150 * will want this to jump back several times -- to be sure cache is warm
151 * because don't want comm time included in calc-time measurement -- and
152 * also to throw out any "weird" values due to OS interrupt or TSC rollover
153 */
154 int32
155 VSs__end_primitive_and_give_cycles()
156 { int32 endTime, startTime;
157 //TODO: fix by repeating time-measurement
158 saveLowTimeStampCountInto( endTime );
159 startTime =((VSsSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
160 return (endTime - startTime);
161 }
163 //===========================================================================
165 /*Initializes all the data-structures for a VSs system -- but doesn't
166 * start it running yet!
167 *
168 *This runs in the main thread -- before VMS starts up
169 *
170 *This sets up the semantic layer over the VMS system
171 *
172 *First, calls VMS_Setup, then creates own environment, making it ready
173 * for creating the seed processor and then starting the work.
174 */
175 void
176 VSs__init()
177 {
178 VMS_SS__init();
179 //masterEnv, a global var, now is partially set up by init_VMS
180 // after this, have VMS_int__malloc and VMS_int__free available
182 VSs__init_Helper();
183 }
186 void idle_fn(void* data, SlaveVP *animatingSlv){
187 while(1){
188 VMS_int__suspend_slaveVP_and_send_req(animatingSlv);
189 }
190 }
192 void
193 VSs__init_Helper()
194 { VSsSemEnv *semanticEnv;
195 int32 i, coreNum, slotNum;
196 VSsSemData *semData;
198 //Hook up the semantic layer's plug-ins to the Master virt procr
199 _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
200 _VMSMasterEnv->slaveAssigner = &VSs__assign_slaveVP_to_slot;
202 //create the semantic layer's environment (all its data) and add to
203 // the master environment
204 semanticEnv = VMS_int__malloc( sizeof( VSsSemEnv ) );
205 _VMSMasterEnv->semanticEnv = semanticEnv;
207 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
208 _VMSMasterEnv->counterHandler = &VSs__counter_handler;
209 VSs__init_counter_data_structs();
210 #endif
212 semanticEnv->shutdownInitiated = FALSE;
213 semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) );
214 semanticEnv->numCoresDone = 0;
215 //For each animation slot, there is an idle slave, and an initial
216 // slave assigned as the current-task-slave. Create them here.
217 SlaveVP *idleSlv, *slotTaskSlv;
218 for( coreNum = 0; coreNum < NUM_CORES; coreNum++ )
219 { semanticEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
221 for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
222 {
223 #ifdef IDLE_SLAVES
224 idleSlv = VSs__create_slave_helper( &idle_fn, NULL, semanticEnv, 0);
225 idleSlv->coreAnimatedBy = coreNum;
226 idleSlv->animSlotAssignedTo =
227 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
228 semanticEnv->idleSlv[coreNum][slotNum] = idleSlv;
229 #endif
231 slotTaskSlv = VSs__create_slave_helper( &idle_fn, NULL, semanticEnv, 0);
232 slotTaskSlv->coreAnimatedBy = coreNum;
233 slotTaskSlv->animSlotAssignedTo =
234 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
236 semData = slotTaskSlv->semanticData;
237 semData->needsTaskAssigned = TRUE;
238 semData->slaveType = SlotTaskSlv;
239 semanticEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
240 }
241 }
243 //create the ready queues, hash tables used for matching and so forth
244 semanticEnv->slavesReadyToResumeQ = makeVMSQ();
245 semanticEnv->freeExtraTaskSlvQ = makeVMSQ();
246 semanticEnv->taskReadyQ = makeVMSQ();
248 semanticEnv->argPtrHashTbl = makeHashTable32( 16, &VMS_int__free );
249 semanticEnv->commHashTbl = makeHashTable32( 16, &VMS_int__free );
251 semanticEnv->nextCoreToGetNewSlv = 0;
253 semanticEnv->numInFlightTasks = 0;
254 semanticEnv->deferredSubmitsQ = makeVMSQ();
255 #ifdef EXTERNAL_SCHEDULER
256 VSs__init_ext_scheduler();
257 #endif
258 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
259 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
260 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
261 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
262 {
263 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
264 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
265 semanticEnv->fnSingletons[i].hasFinished = FALSE;
266 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
267 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
268 semanticEnv->criticalSection[i].isOccupied = FALSE;
269 semanticEnv->criticalSection[i].waitQ = makeVMSQ();
270 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
271 semanticEnv->criticalSection[i].previous.vp = 0;
272 semanticEnv->criticalSection[i].previous.task = 0;
273 #endif
274 }
276 semanticEnv->numLiveExtraTaskSlvs = 0; //must be last
277 semanticEnv->numLiveThreadSlvs = 1; //must be last, counts the seed
279 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
280 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
281 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
282 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
283 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
284 semanticEnv->dataDependenciesList = makeListOfArrays(sizeof(Dependency),128);
285 semanticEnv->singletonDependenciesList = makeListOfArrays(sizeof(Dependency),128);
286 semanticEnv->warDependenciesList = makeListOfArrays(sizeof(Dependency),128);
287 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
289 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
290 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
291 #endif
292 }
295 /*Frees any memory allocated by VSs__init() then calls VMS_int__shutdown
296 */
297 void
298 VSs__cleanup_after_shutdown()
299 { VSsSemEnv *semanticEnv;
301 semanticEnv = _VMSMasterEnv->semanticEnv;
303 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
304 FILE* output;
305 int n;
306 char filename[255];
307 //UCC
308 for(n=0;n<255;n++)
309 {
310 sprintf(filename, "./counters/UCC.%d",n);
311 output = fopen(filename,"r");
312 if(output)
313 {
314 fclose(output);
315 }else{
316 break;
317 }
318 }
319 if(n<255){
320 printf("Saving UCC to File: %s ...\n", filename);
321 output = fopen(filename,"w+");
322 if(output!=NULL){
323 set_dependency_file(output);
324 //fprintf(output,"digraph Dependencies {\n");
325 //set_dot_file(output);
326 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
327 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
328 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
329 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
330 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
331 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
332 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
333 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
334 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
335 //fprintf(output,"}\n");
336 fflush(output);
338 } else
339 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
340 } else {
341 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
342 }
343 //Loop Graph
344 for(n=0;n<255;n++)
345 {
346 sprintf(filename, "./counters/LoopGraph.%d",n);
347 output = fopen(filename,"r");
348 if(output)
349 {
350 fclose(output);
351 }else{
352 break;
353 }
354 }
355 if(n<255){
356 printf("Saving LoopGraph to File: %s ...\n", filename);
357 output = fopen(filename,"w+");
358 if(output!=NULL){
359 set_dependency_file(output);
360 //fprintf(output,"digraph Dependencies {\n");
361 //set_dot_file(output);
362 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
363 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
364 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
365 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
366 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
367 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
368 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
369 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
370 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
371 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
372 //fprintf(output,"}\n");
373 fflush(output);
375 } else
376 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
377 } else {
378 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
379 }
382 freeListOfArrays(semanticEnv->unitList);
383 freeListOfArrays(semanticEnv->commDependenciesList);
384 freeListOfArrays(semanticEnv->ctlDependenciesList);
385 freeListOfArrays(semanticEnv->dynDependenciesList);
386 freeListOfArrays(semanticEnv->dataDependenciesList);
387 freeListOfArrays(semanticEnv->warDependenciesList);
388 freeListOfArrays(semanticEnv->singletonDependenciesList);
389 freeListOfArrays(semanticEnv->hwArcs);
391 #endif
392 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
393 FILE* output2;
394 int n2;
395 char filename2[255];
396 for(n2=0;n2<255;n2++)
397 {
398 sprintf(filename2, "./counters/Counters.%d.csv",n2);
399 output2 = fopen(filename2,"r");
400 if(output2)
401 {
402 fclose(output2);
403 }else{
404 break;
405 }
406 }
407 if(n2<255){
408 printf("Saving Counter measurements to File: %s ...\n", filename2);
409 output2 = fopen(filename2,"w+");
410 if(output2!=NULL){
411 set_counter_file(output2);
412 int i;
413 for(i=0;i<NUM_CORES;i++){
414 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
415 fflush(output2);
416 }
418 } else
419 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
420 } else {
421 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
422 }
424 #endif
425 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
426 * nothing to do here */
427 /*
428 int coreIdx, slotIdx;
429 SlaveVP* slotSlv;
430 for (coreIdx = 0; coreIdx < NUM_CORES; coreIdx++) {
431 for (slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++) {
432 slotSlv = semanticEnv->slotTaskSlvs[coreIdx][slotIdx];
433 VMS_int__free(slotSlv->semanticData);
434 VMS_int__free( slotSlv->startOfStack );
435 VMS_int__free( slotSlv );
436 #ifdef IDLE_SLAVES
437 slotSlv = semanticEnv->idleSlv[coreIdx][slotIdx];
438 VMS_int__free(slotSlv->semanticData);
439 VMS_int__free( slotSlv->startOfStack );
440 VMS_int__free( slotSlv );
441 #endif
442 }
443 }
445 freePrivQ(semanticEnv->freeExtraTaskSlvQ);
446 freePrivQ(semanticEnv->slavesReadyToResumeQ);
447 freePrivQ(semanticEnv->taskReadyQ);
448 freeHashTable( semanticEnv->argPtrHashTbl );
449 freeHashTable( semanticEnv->commHashTbl );
450 VMS_int__free( _VMSMasterEnv->semanticEnv );
451 */
452 VMS_SS__cleanup_at_end_of_shutdown();
453 }
456 //===========================================================================
458 SlaveVP *
459 VSs__create_thread( TopLevelFnPtr fnPtr, void *initData,
460 SlaveVP *creatingThd )
461 { VSsSemReq reqData;
463 //the semantic request data is on the stack and disappears when this
464 // call returns -- it's guaranteed to remain in the VP's stack for as
465 // long as the VP is suspended.
466 reqData.reqType = 0; //know type because in a VMS create req
467 reqData.fnPtr = fnPtr;
468 reqData.initData = initData;
469 reqData.callingSlv = creatingThd;
471 VMS_WL__send_create_slaveVP_req( &reqData, creatingThd );
473 return creatingThd->dataRetFromReq;
474 }
476 /*This is always the last thing done in the code animated by a thread VP.
477 * Normally, this would be the last line of the thread's top level function.
478 * But, if the thread exits from any point, it has to do so by calling
479 * this.
480 *
481 *It simply sends a dissipate request, which handles all the state cleanup.
482 */
483 void
484 VSs__end_thread( SlaveVP *thdToEnd )
485 {
487 VMS_WL__send_dissipate_req( thdToEnd );
488 }
492 //===========================================================================
495 //======================= task submit and end ==============================
496 /*
497 */
498 void
499 VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv)
500 { VSsSemReq reqData;
502 reqData.reqType = submit_task;
504 reqData.taskType = taskType;
505 reqData.args = args;
506 reqData.callingSlv = animSlv;
508 reqData.taskID = NULL;
510 VMS_WL__send_sem_request( &reqData, animSlv );
511 }
513 int32 *
514 VSs__create_taskID_of_size( int32 numInts, SlaveVP *animSlv )
515 { int32 *taskID;
517 taskID = VMS_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
518 taskID[0] = numInts;
519 return taskID;
520 }
522 void
523 VSs__submit_task_with_ID( VSsTaskType *taskType, void *args, int32 *taskID,
524 SlaveVP *animSlv)
525 { VSsSemReq reqData;
527 reqData.reqType = submit_task;
529 reqData.taskType = taskType;
530 reqData.args = args;
531 reqData.taskID = taskID;
532 reqData.callingSlv = animSlv;
534 VMS_WL__send_sem_request( &reqData, animSlv );
535 }
538 /*This call is the last to happen in every task. It causes the slave to
539 * suspend and get the next task out of the task-queue. Notice there is no
540 * assigner here.. only one slave, no slave ReadyQ, and so on..
541 *Can either make the assigner take the next task out of the taskQ, or can
542 * leave all as it is, and make task-end take the next task.
543 *Note: this fits the case in the new VMS for no-context tasks, so will use
544 * the built-in taskQ of new VMS, and should be local and much faster.
545 *
546 *The task-stub is saved in the animSlv, so the request handler will get it
547 * from there, along with the task-type which has arg types, and so on..
548 *
549 * NOTE: if want, don't need to send the animating SlaveVP around..
550 * instead, can make a single slave per core, and coreCtrlr looks up the
551 * slave from having the core number.
552 *
553 *But, to stay compatible with all the other VMS languages, leave it in..
554 */
555 void
556 VSs__end_task( SlaveVP *animSlv )
557 { VSsSemReq reqData;
559 reqData.reqType = end_task;
560 reqData.callingSlv = animSlv;
562 VMS_WL__send_sem_request( &reqData, animSlv );
563 }
566 void
567 VSs__taskwait(SlaveVP *animSlv)
568 {
569 VSsSemReq reqData;
571 reqData.reqType = taskwait;
572 reqData.callingSlv = animSlv;
574 VMS_WL__send_sem_request( &reqData, animSlv );
575 }
577 void
578 VSs__taskwait_on(SlaveVP *animSlv,void* ptr){
579 VSsSemReq reqData;
581 reqData.reqType = taskwait_on;
582 reqData.callingSlv = animSlv;
584 reqData.args = ptr;
586 VMS_WL__send_sem_request( &reqData, animSlv );
587 }
589 void
590 VSs__start_critical(SlaveVP *animSlv,int32 name){
591 VSsSemReq reqData;
593 reqData.reqType = critical_start;
594 reqData.callingSlv = animSlv;
596 reqData.criticalID = name;
598 VMS_WL__send_sem_request( &reqData, animSlv );
599 }
601 void
602 VSs__end_critical(SlaveVP *animSlv,int32 name){
603 VSsSemReq reqData;
605 reqData.reqType = critical_end;
606 reqData.callingSlv = animSlv;
608 reqData.criticalID = name;
610 VMS_WL__send_sem_request( &reqData, animSlv );
611 }
613 //========================== send and receive ============================
614 //
616 int32 *
617 VSs__give_self_taskID( SlaveVP *animSlv )
618 {
619 return ((VSsSemData*)animSlv->semanticData)->taskStub->taskID;
620 }
622 //================================ send ===================================
624 void
625 VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
626 SlaveVP *senderSlv )
627 { VSsSemReq reqData;
629 reqData.reqType = send_type_to;
631 reqData.msg = msg;
632 reqData.msgType = type;
633 reqData.receiverID = receiverID;
634 reqData.senderSlv = senderSlv;
636 reqData.nextReqInHashEntry = NULL;
638 VMS_WL__send_sem_request( &reqData, senderSlv );
640 //When come back from suspend, no longer own data reachable from msg
641 }
643 void
644 VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv )
645 { VSsSemReq reqData;
647 reqData.reqType = send_from_to;
649 reqData.msg = msg;
650 reqData.senderID = senderID;
651 reqData.receiverID = receiverID;
652 reqData.senderSlv = senderSlv;
654 reqData.nextReqInHashEntry = NULL;
656 VMS_WL__send_sem_request( &reqData, senderSlv );
657 }
660 //================================ receive ================================
662 /*The "type" version of send and receive creates a many-to-one relationship.
663 * The sender is anonymous, and many sends can stack up, waiting to be
664 * received. The same receiver can also have send from-to's
665 * waiting for it, and those will be kept separate from the "type"
666 * messages.
667 */
668 void *
669 VSs__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv )
670 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
671 VSsSemReq reqData;
673 reqData.reqType = receive_type_to;
675 reqData.msgType = type;
676 reqData.receiverID = receiverID;
677 reqData.receiverSlv = receiverSlv;
679 reqData.nextReqInHashEntry = NULL;
681 VMS_WL__send_sem_request( &reqData, receiverSlv );
683 return receiverSlv->dataRetFromReq;
684 }
688 /*Call this at the point a receiving task wants in-coming data.
689 * Use this from-to form when know senderID -- it makes a direct channel
690 * between sender and receiver.
691 */
692 void *
693 VSs__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv )
694 {
695 VSsSemReq reqData;
697 reqData.reqType = receive_from_to;
699 reqData.senderID = senderID;
700 reqData.receiverID = receiverID;
701 reqData.receiverSlv = receiverSlv;
703 reqData.nextReqInHashEntry = NULL;
704 DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
706 VMS_WL__send_sem_request( &reqData, receiverSlv );
708 return receiverSlv->dataRetFromReq;
709 }
714 //==========================================================================
715 //
716 /*A function singleton is a function whose body executes exactly once, on a
717 * single core, no matter how many times the fuction is called and no
718 * matter how many cores or the timing of cores calling it.
719 *
720 *A data singleton is a ticket attached to data. That ticket can be used
721 * to get the data through the function exactly once, no matter how many
722 * times the data is given to the function, and no matter the timing of
723 * trying to get the data through from different cores.
724 */
726 /*asm function declarations*/
727 void asm_save_ret_to_singleton(VSsSingleton *singletonPtrAddr);
728 void asm_write_ret_from_singleton(VSsSingleton *singletonPtrAddr);
730 /*Fn singleton uses ID as index into array of singleton structs held in the
731 * semantic environment.
732 */
733 void
734 VSs__start_fn_singleton( int32 singletonID, SlaveVP *animSlv )
735 {
736 VSsSemReq reqData;
738 //
739 reqData.reqType = singleton_fn_start;
740 reqData.singletonID = singletonID;
742 VMS_WL__send_sem_request( &reqData, animSlv );
743 if( animSlv->dataRetFromReq ) //will be 0 or addr of label in end singleton
744 {
745 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
746 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
747 }
748 }
750 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
751 * The start_data_singleton makes the structure and puts its addr into the
752 * location.
753 */
754 void
755 VSs__start_data_singleton( VSsSingleton **singletonAddr, SlaveVP *animSlv )
756 {
757 VSsSemReq reqData;
759 if( *singletonAddr && (*singletonAddr)->hasFinished )
760 goto JmpToEndSingleton;
762 reqData.reqType = singleton_data_start;
763 reqData.singletonPtrAddr = singletonAddr;
765 VMS_WL__send_sem_request( &reqData, animSlv );
766 if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
767 { //Assembly code changes the return addr on the stack to the one
768 // saved into the singleton by the end-singleton-fn
769 //The return addr is at 0x4(%%ebp)
770 JmpToEndSingleton:
771 asm_write_ret_from_singleton(*singletonAddr);
772 }
773 //now, simply return
774 //will exit either from the start singleton call or the end-singleton call
775 }
777 /*Uses ID as index into array of flags. If flag already set, resumes from
778 * end-label. Else, sets flag and resumes normally.
779 *
780 *Note, this call cannot be inlined because the instr addr at the label
781 * inside is shared by all invocations of a given singleton ID.
782 */
783 void
784 VSs__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
785 {
786 VSsSemReq reqData;
788 //don't need this addr until after at least one singleton has reached
789 // this function
790 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
791 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
793 reqData.reqType = singleton_fn_end;
794 reqData.singletonID = singletonID;
796 VMS_WL__send_sem_request( &reqData, animSlv );
798 //EndSingletonInstrAddr:
799 return;
800 }
802 void
803 VSs__end_data_singleton( VSsSingleton **singletonPtrAddr, SlaveVP *animSlv )
804 {
805 VSsSemReq reqData;
807 //don't need this addr until after singleton struct has reached
808 // this function for first time
809 //do assembly that saves the return addr of this fn call into the
810 // data singleton -- that data-singleton can only be given to exactly
811 // one instance in the code of this function. However, can use this
812 // function in different places for different data-singletons.
813 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
816 asm_save_ret_to_singleton(*singletonPtrAddr);
818 reqData.reqType = singleton_data_end;
819 reqData.singletonPtrAddr = singletonPtrAddr;
821 VMS_WL__send_sem_request( &reqData, animSlv );
822 }
824 /*This executes the function in the masterVP, so it executes in isolation
825 * from any other copies -- only one copy of the function can ever execute
826 * at a time.
827 *
828 *It suspends to the master, and the request handler takes the function
829 * pointer out of the request and calls it, then resumes the VP.
830 *Only very short functions should be called this way -- for longer-running
831 * isolation, use transaction-start and transaction-end, which run the code
832 * between as work-code.
833 */
834 void
835 VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
836 void *data, SlaveVP *animSlv )
837 {
838 VSsSemReq reqData;
840 //
841 reqData.reqType = atomic;
842 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
843 reqData.dataForFn = data;
845 VMS_WL__send_sem_request( &reqData, animSlv );
846 }
849 /*This suspends to the master.
850 *First, it looks at the VP's data, to see the highest transactionID that VP
851 * already has entered. If the current ID is not larger, it throws an
852 * exception stating a bug in the code. Otherwise it puts the current ID
853 * there, and adds the ID to a linked list of IDs entered -- the list is
854 * used to check that exits are properly ordered.
855 *Next it is uses transactionID as index into an array of transaction
856 * structures.
857 *If the "VP_currently_executing" field is non-null, then put requesting VP
858 * into queue in the struct. (At some point a holder will request
859 * end-transaction, which will take this VP from the queue and resume it.)
860 *If NULL, then write requesting into the field and resume.
861 */
862 void
863 VSs__start_transaction( int32 transactionID, SlaveVP *animSlv )
864 {
865 VSsSemReq reqData;
867 //
868 reqData.callingSlv = animSlv;
869 reqData.reqType = trans_start;
870 reqData.transID = transactionID;
872 VMS_WL__send_sem_request( &reqData, animSlv );
873 }
875 /*This suspends to the master, then uses transactionID as index into an
876 * array of transaction structures.
877 *It looks at VP_currently_executing to be sure it's same as requesting VP.
878 * If different, throws an exception, stating there's a bug in the code.
879 *Next it looks at the queue in the structure.
880 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
881 *If something in, gets it, sets VP_currently_executing to that VP, then
882 * resumes both.
883 */
884 void
885 VSs__end_transaction( int32 transactionID, SlaveVP *animSlv )
886 {
887 VSsSemReq reqData;
889 //
890 reqData.callingSlv = animSlv;
891 reqData.reqType = trans_end;
892 reqData.transID = transactionID;
894 VMS_WL__send_sem_request( &reqData, animSlv );
895 }
897 //======================== Internal ==================================
898 /*
899 */
900 SlaveVP *
901 VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
902 SlaveVP *creatingSlv )
903 { VSsSemReq reqData;
905 //the semantic request data is on the stack and disappears when this
906 // call returns -- it's guaranteed to remain in the VP's stack for as
907 // long as the VP is suspended.
908 reqData.reqType = 0; //know type because in a VMS create req
909 reqData.coreToAssignOnto = -1; //means round-robin assign
910 reqData.fnPtr = fnPtr;
911 reqData.initData = initData;
912 reqData.callingSlv = creatingSlv;
914 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
916 return creatingSlv->dataRetFromReq;
917 }
919 SlaveVP *
920 VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
921 SlaveVP *creatingSlv, int32 coreToAssignOnto )
922 { VSsSemReq reqData;
924 //the semantic request data is on the stack and disappears when this
925 // call returns -- it's guaranteed to remain in the VP's stack for as
926 // long as the VP is suspended.
927 reqData.reqType = create_slave_w_aff; //not used, May 2012
928 reqData.coreToAssignOnto = coreToAssignOnto;
929 reqData.fnPtr = fnPtr;
930 reqData.initData = initData;
931 reqData.callingSlv = creatingSlv;
933 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
935 return creatingSlv->dataRetFromReq;
936 }