view VSs.c @ 22:b787a5234406

add task throttle
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Thu, 27 Dec 2012 12:27:45 +0100
parents feea343d202f
children 3787df8b95f9
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
14 #include "VSs.h"
15 #include "Measurement/VSs_Counter_Recording.h"
17 //==========================================================================
19 void
20 VSs__init();
22 void
23 VSs__init_Helper();
24 //==========================================================================
28 //===========================================================================
31 /*These are the library functions *called in the application*
32 *
33 *There's a pattern for the outside sequential code to interact with the
34 * VMS_HW code.
35 *The VMS_HW system is inside a boundary.. every VSs system is in its
36 * own directory that contains the functions for each of the processor types.
37 * One of the processor types is the "seed" processor that starts the
38 * cascade of creating all the processors that do the work.
39 *So, in the directory is a file called "EntryPoint.c" that contains the
40 * function, named appropriately to the work performed, that the outside
41 * sequential code calls. This function follows a pattern:
42 *1) it calls VSs__init()
43 *2) it creates the initial data for the seed processor, which is passed
44 * in to the function
45 *3) it creates the seed VSs processor, with the data to start it with.
46 *4) it calls startVSsThenWaitUntilWorkDone
47 *5) it gets the returnValue from the transfer struc and returns that
48 * from the function
49 *
50 *For now, a new VSs system has to be created via VSs__init every
51 * time an entry point function is called -- later, might add letting the
52 * VSs system be created once, and let all the entry points just reuse
53 * it -- want to be as simple as possible now, and see by using what makes
54 * sense for later..
55 */
59 //===========================================================================
61 /*This is the "border crossing" function -- the thing that crosses from the
62 * outside world, into the VMS_HW world. It initializes and starts up the
63 * VMS system, then creates one processor from the specified function and
64 * puts it into the readyQ. From that point, that one function is resp.
65 * for creating all the other processors, that then create others, and so
66 * forth.
67 *When all the processors, including the seed, have dissipated, then this
68 * function returns. The results will have been written by side-effect via
69 * pointers read from, or written into initData.
70 *
71 *NOTE: no Threads should exist in the outside program that might touch
72 * any of the data reachable from initData passed in to here
73 */
74 void
75 VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
76 { VSsSemEnv *semEnv;
77 SlaveVP *seedSlv;
78 VSsSemData *semData;
79 VSsTaskStub *threadTaskStub, *parentTaskStub;
81 VSs__init(); //normal multi-thd
83 semEnv = _VMSMasterEnv->semanticEnv;
85 //VSs starts with one processor, which is put into initial environ,
86 // and which then calls create() to create more, thereby expanding work
87 seedSlv = VSs__create_slave_helper( fnPtr, initData,
88 semEnv, semEnv->nextCoreToGetNewSlv++ );
90 //seed slave is a thread slave, so make a thread's task stub for it
91 // and then make another to stand for the seed's parent task. Make
92 // the parent be already ended, and have one child (the seed). This
93 // will make the dissipate handler do the right thing when the seed
94 // is dissipated.
95 threadTaskStub = create_thread_task_stub( initData );
96 parentTaskStub = create_thread_task_stub( NULL );
97 parentTaskStub->isEnded = TRUE;
98 parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
99 threadTaskStub->parentTaskStub = parentTaskStub;
100 threadTaskStub->slaveAssignedTo = seedSlv;
102 semData = (VSsSemData *)seedSlv->semanticData;
103 //seedVP is a thread, so has a permanent task
104 semData->needsTaskAssigned = FALSE;
105 semData->taskStub = threadTaskStub;
106 semData->slaveType = ThreadSlv;
108 resume_slaveVP( seedSlv, semEnv ); //returns right away, just queues Slv
110 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
112 VSs__cleanup_after_shutdown();
113 }
116 int32
117 VSs__giveMinWorkUnitCycles( float32 percentOverhead )
118 {
119 return MIN_WORK_UNIT_CYCLES;
120 }
122 int32
123 VSs__giveIdealNumWorkUnits()
124 {
125 return NUM_ANIM_SLOTS * NUM_CORES;
126 }
128 int32
129 VSs__give_number_of_cores_to_schedule_onto()
130 {
131 return NUM_CORES;
132 }
134 /*For now, use TSC -- later, make these two macros with assembly that first
135 * saves jump point, and second jumps back several times to get reliable time
136 */
137 void
138 VSs__start_primitive()
139 { saveLowTimeStampCountInto( ((VSsSemEnv *)(_VMSMasterEnv->semanticEnv))->
140 primitiveStartTime );
141 }
143 /*Just quick and dirty for now -- make reliable later
144 * will want this to jump back several times -- to be sure cache is warm
145 * because don't want comm time included in calc-time measurement -- and
146 * also to throw out any "weird" values due to OS interrupt or TSC rollover
147 */
148 int32
149 VSs__end_primitive_and_give_cycles()
150 { int32 endTime, startTime;
151 //TODO: fix by repeating time-measurement
152 saveLowTimeStampCountInto( endTime );
153 startTime =((VSsSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
154 return (endTime - startTime);
155 }
157 //===========================================================================
159 /*Initializes all the data-structures for a VSs system -- but doesn't
160 * start it running yet!
161 *
162 *This runs in the main thread -- before VMS starts up
163 *
164 *This sets up the semantic layer over the VMS system
165 *
166 *First, calls VMS_Setup, then creates own environment, making it ready
167 * for creating the seed processor and then starting the work.
168 */
169 void
170 VSs__init()
171 {
172 VMS_SS__init();
173 //masterEnv, a global var, now is partially set up by init_VMS
174 // after this, have VMS_int__malloc and VMS_int__free available
176 VSs__init_Helper();
177 }
180 void idle_fn(void* data, SlaveVP *animatingSlv){
181 while(1){
182 VMS_int__suspend_slaveVP_and_send_req(animatingSlv);
183 }
184 }
186 void
187 VSs__init_Helper()
188 { VSsSemEnv *semanticEnv;
189 int32 i, coreNum, slotNum;
190 VSsSemData *semData;
192 //Hook up the semantic layer's plug-ins to the Master virt procr
193 _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
194 _VMSMasterEnv->slaveAssigner = &VSs__assign_slaveVP_to_slot;
196 //create the semantic layer's environment (all its data) and add to
197 // the master environment
198 semanticEnv = VMS_int__malloc( sizeof( VSsSemEnv ) );
199 _VMSMasterEnv->semanticEnv = semanticEnv;
201 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
202 _VMSMasterEnv->counterHandler = &VSs__counter_handler;
203 VSs__init_counter_data_structs();
204 #endif
206 semanticEnv->shutdownInitiated = FALSE;
207 semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) );
208 semanticEnv->numCoresDone = 0;
209 //For each animation slot, there is an idle slave, and an initial
210 // slave assigned as the current-task-slave. Create them here.
211 SlaveVP *idleSlv, *slotTaskSlv;
212 for( coreNum = 0; coreNum < NUM_CORES; coreNum++ )
213 { semanticEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
215 for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
216 {
217 #ifdef IDLE_SLAVES
218 idleSlv = VSs__create_slave_helper( &idle_fn, NULL, semanticEnv, 0);
219 idleSlv->coreAnimatedBy = coreNum;
220 idleSlv->animSlotAssignedTo =
221 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
222 semanticEnv->idleSlv[coreNum][slotNum] = idleSlv;
223 #endif
225 slotTaskSlv = VSs__create_slave_helper( &idle_fn, NULL, semanticEnv, 0);
226 slotTaskSlv->coreAnimatedBy = coreNum;
227 slotTaskSlv->animSlotAssignedTo =
228 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
230 semData = slotTaskSlv->semanticData;
231 semData->needsTaskAssigned = TRUE;
232 semData->slaveType = SlotTaskSlv;
233 semanticEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
234 }
235 }
237 //create the ready queues, hash tables used for matching and so forth
238 semanticEnv->slavesReadyToResumeQ = makeVMSQ();
239 semanticEnv->freeExtraTaskSlvQ = makeVMSQ();
240 semanticEnv->taskReadyQ = makeVMSQ();
242 semanticEnv->argPtrHashTbl = makeHashTable32( 16, &VMS_int__free );
243 semanticEnv->commHashTbl = makeHashTable32( 16, &VMS_int__free );
245 semanticEnv->nextCoreToGetNewSlv = 0;
247 semanticEnv->numInFlightTasks = 0;
248 semanticEnv->deferredSubmitsQ = makeVMSQ();
249 #ifdef EXTERNAL_SCHEDULER
250 VSs__init_ext_scheduler();
251 #endif
252 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
253 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
254 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
255 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
256 {
257 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
258 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
259 semanticEnv->fnSingletons[i].hasFinished = FALSE;
260 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
261 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
262 semanticEnv->criticalSection[i].isOccupied = FALSE;
263 semanticEnv->criticalSection[i].waitQ = makeVMSQ();
264 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
265 semanticEnv->criticalSection[i].previous.vp = 0;
266 semanticEnv->criticalSection[i].previous.task = 0;
267 #endif
268 }
270 semanticEnv->numLiveExtraTaskSlvs = 0; //must be last
271 semanticEnv->numLiveThreadSlvs = 1; //must be last, counts the seed
273 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
274 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
275 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
276 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
277 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
278 semanticEnv->dataDependenciesList = makeListOfArrays(sizeof(Dependency),128);
279 semanticEnv->singletonDependenciesList = makeListOfArrays(sizeof(Dependency),128);
280 semanticEnv->warDependenciesList = makeListOfArrays(sizeof(Dependency),128);
281 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
283 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
284 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
285 #endif
286 }
289 /*Frees any memory allocated by VSs__init() then calls VMS_int__shutdown
290 */
291 void
292 VSs__cleanup_after_shutdown()
293 { VSsSemEnv *semanticEnv;
295 semanticEnv = _VMSMasterEnv->semanticEnv;
297 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
298 FILE* output;
299 int n;
300 char filename[255];
301 //UCC
302 for(n=0;n<255;n++)
303 {
304 sprintf(filename, "./counters/UCC.%d",n);
305 output = fopen(filename,"r");
306 if(output)
307 {
308 fclose(output);
309 }else{
310 break;
311 }
312 }
313 if(n<255){
314 printf("Saving UCC to File: %s ...\n", filename);
315 output = fopen(filename,"w+");
316 if(output!=NULL){
317 set_dependency_file(output);
318 //fprintf(output,"digraph Dependencies {\n");
319 //set_dot_file(output);
320 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
321 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
322 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
323 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
324 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
325 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
326 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
327 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
328 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
329 //fprintf(output,"}\n");
330 fflush(output);
332 } else
333 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
334 } else {
335 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
336 }
337 //Loop Graph
338 for(n=0;n<255;n++)
339 {
340 sprintf(filename, "./counters/LoopGraph.%d",n);
341 output = fopen(filename,"r");
342 if(output)
343 {
344 fclose(output);
345 }else{
346 break;
347 }
348 }
349 if(n<255){
350 printf("Saving LoopGraph to File: %s ...\n", filename);
351 output = fopen(filename,"w+");
352 if(output!=NULL){
353 set_dependency_file(output);
354 //fprintf(output,"digraph Dependencies {\n");
355 //set_dot_file(output);
356 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
357 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
358 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
359 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
360 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
361 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
362 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
363 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
364 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
365 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
366 //fprintf(output,"}\n");
367 fflush(output);
369 } else
370 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
371 } else {
372 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
373 }
376 freeListOfArrays(semanticEnv->unitList);
377 freeListOfArrays(semanticEnv->commDependenciesList);
378 freeListOfArrays(semanticEnv->ctlDependenciesList);
379 freeListOfArrays(semanticEnv->dynDependenciesList);
380 freeListOfArrays(semanticEnv->dataDependenciesList);
381 freeListOfArrays(semanticEnv->warDependenciesList);
382 freeListOfArrays(semanticEnv->singletonDependenciesList);
383 freeListOfArrays(semanticEnv->hwArcs);
385 #endif
386 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
387 FILE* output2;
388 int n2;
389 char filename2[255];
390 for(n2=0;n2<255;n2++)
391 {
392 sprintf(filename2, "./counters/Counters.%d.csv",n2);
393 output2 = fopen(filename2,"r");
394 if(output2)
395 {
396 fclose(output2);
397 }else{
398 break;
399 }
400 }
401 if(n2<255){
402 printf("Saving Counter measurements to File: %s ...\n", filename2);
403 output2 = fopen(filename2,"w+");
404 if(output2!=NULL){
405 set_counter_file(output2);
406 int i;
407 for(i=0;i<NUM_CORES;i++){
408 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
409 fflush(output2);
410 }
412 } else
413 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
414 } else {
415 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
416 }
418 #endif
419 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
420 * nothing to do here */
421 /*
422 int coreIdx, slotIdx;
423 SlaveVP* slotSlv;
424 for (coreIdx = 0; coreIdx < NUM_CORES; coreIdx++) {
425 for (slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++) {
426 slotSlv = semanticEnv->slotTaskSlvs[coreIdx][slotIdx];
427 VMS_int__free(slotSlv->semanticData);
428 VMS_int__free( slotSlv->startOfStack );
429 VMS_int__free( slotSlv );
430 #ifdef IDLE_SLAVES
431 slotSlv = semanticEnv->idleSlv[coreIdx][slotIdx];
432 VMS_int__free(slotSlv->semanticData);
433 VMS_int__free( slotSlv->startOfStack );
434 VMS_int__free( slotSlv );
435 #endif
436 }
437 }
439 freePrivQ(semanticEnv->freeExtraTaskSlvQ);
440 freePrivQ(semanticEnv->slavesReadyToResumeQ);
441 freePrivQ(semanticEnv->taskReadyQ);
442 freeHashTable( semanticEnv->argPtrHashTbl );
443 freeHashTable( semanticEnv->commHashTbl );
444 VMS_int__free( _VMSMasterEnv->semanticEnv );
445 */
446 VMS_SS__cleanup_at_end_of_shutdown();
447 }
450 //===========================================================================
452 SlaveVP *
453 VSs__create_thread( TopLevelFnPtr fnPtr, void *initData,
454 SlaveVP *creatingThd )
455 { VSsSemReq reqData;
457 //the semantic request data is on the stack and disappears when this
458 // call returns -- it's guaranteed to remain in the VP's stack for as
459 // long as the VP is suspended.
460 reqData.reqType = 0; //know type because in a VMS create req
461 reqData.fnPtr = fnPtr;
462 reqData.initData = initData;
463 reqData.callingSlv = creatingThd;
465 VMS_WL__send_create_slaveVP_req( &reqData, creatingThd );
467 return creatingThd->dataRetFromReq;
468 }
470 /*This is always the last thing done in the code animated by a thread VP.
471 * Normally, this would be the last line of the thread's top level function.
472 * But, if the thread exits from any point, it has to do so by calling
473 * this.
474 *
475 *It simply sends a dissipate request, which handles all the state cleanup.
476 */
477 void
478 VSs__end_thread( SlaveVP *thdToEnd )
479 {
481 VMS_WL__send_dissipate_req( thdToEnd );
482 }
486 //===========================================================================
489 //======================= task submit and end ==============================
490 /*
491 */
492 void
493 VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv)
494 { VSsSemReq reqData;
496 reqData.reqType = submit_task;
498 reqData.taskType = taskType;
499 reqData.args = args;
500 reqData.callingSlv = animSlv;
502 reqData.taskID = NULL;
504 VMS_WL__send_sem_request( &reqData, animSlv );
505 }
507 int32 *
508 VSs__create_taskID_of_size( int32 numInts, SlaveVP *animSlv )
509 { int32 *taskID;
511 taskID = VMS_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
512 taskID[0] = numInts;
513 return taskID;
514 }
516 void
517 VSs__submit_task_with_ID( VSsTaskType *taskType, void *args, int32 *taskID,
518 SlaveVP *animSlv)
519 { VSsSemReq reqData;
521 reqData.reqType = submit_task;
523 reqData.taskType = taskType;
524 reqData.args = args;
525 reqData.taskID = taskID;
526 reqData.callingSlv = animSlv;
528 VMS_WL__send_sem_request( &reqData, animSlv );
529 }
532 /*This call is the last to happen in every task. It causes the slave to
533 * suspend and get the next task out of the task-queue. Notice there is no
534 * assigner here.. only one slave, no slave ReadyQ, and so on..
535 *Can either make the assigner take the next task out of the taskQ, or can
536 * leave all as it is, and make task-end take the next task.
537 *Note: this fits the case in the new VMS for no-context tasks, so will use
538 * the built-in taskQ of new VMS, and should be local and much faster.
539 *
540 *The task-stub is saved in the animSlv, so the request handler will get it
541 * from there, along with the task-type which has arg types, and so on..
542 *
543 * NOTE: if want, don't need to send the animating SlaveVP around..
544 * instead, can make a single slave per core, and coreCtrlr looks up the
545 * slave from having the core number.
546 *
547 *But, to stay compatible with all the other VMS languages, leave it in..
548 */
549 void
550 VSs__end_task( SlaveVP *animSlv )
551 { VSsSemReq reqData;
553 reqData.reqType = end_task;
554 reqData.callingSlv = animSlv;
556 VMS_WL__send_sem_request( &reqData, animSlv );
557 }
560 void
561 VSs__taskwait(SlaveVP *animSlv)
562 {
563 VSsSemReq reqData;
565 reqData.reqType = taskwait;
566 reqData.callingSlv = animSlv;
568 VMS_WL__send_sem_request( &reqData, animSlv );
569 }
571 void
572 VSs__taskwait_on(SlaveVP *animSlv,void* ptr){
573 VSsSemReq reqData;
575 reqData.reqType = taskwait_on;
576 reqData.callingSlv = animSlv;
578 reqData.args = ptr;
580 VMS_WL__send_sem_request( &reqData, animSlv );
581 }
583 void
584 VSs__start_critical(SlaveVP *animSlv,int32 name){
585 VSsSemReq reqData;
587 reqData.reqType = critical_start;
588 reqData.callingSlv = animSlv;
590 reqData.criticalID = name;
592 VMS_WL__send_sem_request( &reqData, animSlv );
593 }
595 void
596 VSs__end_critical(SlaveVP *animSlv,int32 name){
597 VSsSemReq reqData;
599 reqData.reqType = critical_end;
600 reqData.callingSlv = animSlv;
602 reqData.criticalID = name;
604 VMS_WL__send_sem_request( &reqData, animSlv );
605 }
607 //========================== send and receive ============================
608 //
610 int32 *
611 VSs__give_self_taskID( SlaveVP *animSlv )
612 {
613 return ((VSsSemData*)animSlv->semanticData)->taskStub->taskID;
614 }
616 //================================ send ===================================
618 void
619 VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
620 SlaveVP *senderSlv )
621 { VSsSemReq reqData;
623 reqData.reqType = send_type_to;
625 reqData.msg = msg;
626 reqData.msgType = type;
627 reqData.receiverID = receiverID;
628 reqData.senderSlv = senderSlv;
630 reqData.nextReqInHashEntry = NULL;
632 VMS_WL__send_sem_request( &reqData, senderSlv );
634 //When come back from suspend, no longer own data reachable from msg
635 }
637 void
638 VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv )
639 { VSsSemReq reqData;
641 reqData.reqType = send_from_to;
643 reqData.msg = msg;
644 reqData.senderID = senderID;
645 reqData.receiverID = receiverID;
646 reqData.senderSlv = senderSlv;
648 reqData.nextReqInHashEntry = NULL;
650 VMS_WL__send_sem_request( &reqData, senderSlv );
651 }
654 //================================ receive ================================
656 /*The "type" version of send and receive creates a many-to-one relationship.
657 * The sender is anonymous, and many sends can stack up, waiting to be
658 * received. The same receiver can also have send from-to's
659 * waiting for it, and those will be kept separate from the "type"
660 * messages.
661 */
662 void *
663 VSs__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv )
664 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
665 VSsSemReq reqData;
667 reqData.reqType = receive_type_to;
669 reqData.msgType = type;
670 reqData.receiverID = receiverID;
671 reqData.receiverSlv = receiverSlv;
673 reqData.nextReqInHashEntry = NULL;
675 VMS_WL__send_sem_request( &reqData, receiverSlv );
677 return receiverSlv->dataRetFromReq;
678 }
682 /*Call this at the point a receiving task wants in-coming data.
683 * Use this from-to form when know senderID -- it makes a direct channel
684 * between sender and receiver.
685 */
686 void *
687 VSs__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv )
688 {
689 VSsSemReq reqData;
691 reqData.reqType = receive_from_to;
693 reqData.senderID = senderID;
694 reqData.receiverID = receiverID;
695 reqData.receiverSlv = receiverSlv;
697 reqData.nextReqInHashEntry = NULL;
698 DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
700 VMS_WL__send_sem_request( &reqData, receiverSlv );
702 return receiverSlv->dataRetFromReq;
703 }
708 //==========================================================================
709 //
710 /*A function singleton is a function whose body executes exactly once, on a
711 * single core, no matter how many times the fuction is called and no
712 * matter how many cores or the timing of cores calling it.
713 *
714 *A data singleton is a ticket attached to data. That ticket can be used
715 * to get the data through the function exactly once, no matter how many
716 * times the data is given to the function, and no matter the timing of
717 * trying to get the data through from different cores.
718 */
720 /*asm function declarations*/
721 void asm_save_ret_to_singleton(VSsSingleton *singletonPtrAddr);
722 void asm_write_ret_from_singleton(VSsSingleton *singletonPtrAddr);
724 /*Fn singleton uses ID as index into array of singleton structs held in the
725 * semantic environment.
726 */
727 void
728 VSs__start_fn_singleton( int32 singletonID, SlaveVP *animSlv )
729 {
730 VSsSemReq reqData;
732 //
733 reqData.reqType = singleton_fn_start;
734 reqData.singletonID = singletonID;
736 VMS_WL__send_sem_request( &reqData, animSlv );
737 if( animSlv->dataRetFromReq ) //will be 0 or addr of label in end singleton
738 {
739 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
740 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
741 }
742 }
744 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
745 * The start_data_singleton makes the structure and puts its addr into the
746 * location.
747 */
748 void
749 VSs__start_data_singleton( VSsSingleton **singletonAddr, SlaveVP *animSlv )
750 {
751 VSsSemReq reqData;
753 if( *singletonAddr && (*singletonAddr)->hasFinished )
754 goto JmpToEndSingleton;
756 reqData.reqType = singleton_data_start;
757 reqData.singletonPtrAddr = singletonAddr;
759 VMS_WL__send_sem_request( &reqData, animSlv );
760 if( animSlv->dataRetFromReq ) //either 0 or end singleton's return addr
761 { //Assembly code changes the return addr on the stack to the one
762 // saved into the singleton by the end-singleton-fn
763 //The return addr is at 0x4(%%ebp)
764 JmpToEndSingleton:
765 asm_write_ret_from_singleton(*singletonAddr);
766 }
767 //now, simply return
768 //will exit either from the start singleton call or the end-singleton call
769 }
771 /*Uses ID as index into array of flags. If flag already set, resumes from
772 * end-label. Else, sets flag and resumes normally.
773 *
774 *Note, this call cannot be inlined because the instr addr at the label
775 * inside is shared by all invocations of a given singleton ID.
776 */
777 void
778 VSs__end_fn_singleton( int32 singletonID, SlaveVP *animSlv )
779 {
780 VSsSemReq reqData;
782 //don't need this addr until after at least one singleton has reached
783 // this function
784 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( animSlv );
785 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
787 reqData.reqType = singleton_fn_end;
788 reqData.singletonID = singletonID;
790 VMS_WL__send_sem_request( &reqData, animSlv );
792 //EndSingletonInstrAddr:
793 return;
794 }
796 void
797 VSs__end_data_singleton( VSsSingleton **singletonPtrAddr, SlaveVP *animSlv )
798 {
799 VSsSemReq reqData;
801 //don't need this addr until after singleton struct has reached
802 // this function for first time
803 //do assembly that saves the return addr of this fn call into the
804 // data singleton -- that data-singleton can only be given to exactly
805 // one instance in the code of this function. However, can use this
806 // function in different places for different data-singletons.
807 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
810 asm_save_ret_to_singleton(*singletonPtrAddr);
812 reqData.reqType = singleton_data_end;
813 reqData.singletonPtrAddr = singletonPtrAddr;
815 VMS_WL__send_sem_request( &reqData, animSlv );
816 }
818 /*This executes the function in the masterVP, so it executes in isolation
819 * from any other copies -- only one copy of the function can ever execute
820 * at a time.
821 *
822 *It suspends to the master, and the request handler takes the function
823 * pointer out of the request and calls it, then resumes the VP.
824 *Only very short functions should be called this way -- for longer-running
825 * isolation, use transaction-start and transaction-end, which run the code
826 * between as work-code.
827 */
828 void
829 VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
830 void *data, SlaveVP *animSlv )
831 {
832 VSsSemReq reqData;
834 //
835 reqData.reqType = atomic;
836 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
837 reqData.dataForFn = data;
839 VMS_WL__send_sem_request( &reqData, animSlv );
840 }
843 /*This suspends to the master.
844 *First, it looks at the VP's data, to see the highest transactionID that VP
845 * already has entered. If the current ID is not larger, it throws an
846 * exception stating a bug in the code. Otherwise it puts the current ID
847 * there, and adds the ID to a linked list of IDs entered -- the list is
848 * used to check that exits are properly ordered.
849 *Next it is uses transactionID as index into an array of transaction
850 * structures.
851 *If the "VP_currently_executing" field is non-null, then put requesting VP
852 * into queue in the struct. (At some point a holder will request
853 * end-transaction, which will take this VP from the queue and resume it.)
854 *If NULL, then write requesting into the field and resume.
855 */
856 void
857 VSs__start_transaction( int32 transactionID, SlaveVP *animSlv )
858 {
859 VSsSemReq reqData;
861 //
862 reqData.callingSlv = animSlv;
863 reqData.reqType = trans_start;
864 reqData.transID = transactionID;
866 VMS_WL__send_sem_request( &reqData, animSlv );
867 }
869 /*This suspends to the master, then uses transactionID as index into an
870 * array of transaction structures.
871 *It looks at VP_currently_executing to be sure it's same as requesting VP.
872 * If different, throws an exception, stating there's a bug in the code.
873 *Next it looks at the queue in the structure.
874 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
875 *If something in, gets it, sets VP_currently_executing to that VP, then
876 * resumes both.
877 */
878 void
879 VSs__end_transaction( int32 transactionID, SlaveVP *animSlv )
880 {
881 VSsSemReq reqData;
883 //
884 reqData.callingSlv = animSlv;
885 reqData.reqType = trans_end;
886 reqData.transID = transactionID;
888 VMS_WL__send_sem_request( &reqData, animSlv );
889 }
891 //======================== Internal ==================================
892 /*
893 */
894 SlaveVP *
895 VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
896 SlaveVP *creatingSlv )
897 { VSsSemReq reqData;
899 //the semantic request data is on the stack and disappears when this
900 // call returns -- it's guaranteed to remain in the VP's stack for as
901 // long as the VP is suspended.
902 reqData.reqType = 0; //know type because in a VMS create req
903 reqData.coreToAssignOnto = -1; //means round-robin assign
904 reqData.fnPtr = fnPtr;
905 reqData.initData = initData;
906 reqData.callingSlv = creatingSlv;
908 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
910 return creatingSlv->dataRetFromReq;
911 }
913 SlaveVP *
914 VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
915 SlaveVP *creatingSlv, int32 coreToAssignOnto )
916 { VSsSemReq reqData;
918 //the semantic request data is on the stack and disappears when this
919 // call returns -- it's guaranteed to remain in the VP's stack for as
920 // long as the VP is suspended.
921 reqData.reqType = create_slave_w_aff; //not used, May 2012
922 reqData.coreToAssignOnto = coreToAssignOnto;
923 reqData.fnPtr = fnPtr;
924 reqData.initData = initData;
925 reqData.callingSlv = creatingSlv;
927 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
929 return creatingSlv->dataRetFromReq;
930 }