Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VSs_impls > VSs__MC_shared_impl
view VSs.c @ 38:a951b38d2cfc
remove need for end_thread()
| author | Nina Engelhardt <nengel@mailbox.tu-berlin.de> |
|---|---|
| date | Fri, 17 May 2013 17:49:49 +0200 |
| parents | c8d4f6d3c7d3 |
| children | 0715109abb08 |
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "Queue_impl/PrivateQueue.h"
12 #include "Hash_impl/PrivateHash.h"
14 #include "VSs.h"
15 #include "Measurement/VSs_Counter_Recording.h"
17 //==========================================================================
19 void
20 VSs__init();
22 void
23 VSs__init_Helper();
24 //==========================================================================
28 //===========================================================================
31 /*These are the library functions *called in the application*
32 *
33 *There's a pattern for the outside sequential code to interact with the
34 * VMS_HW code.
35 *The VMS_HW system is inside a boundary.. every VSs system is in its
36 * own directory that contains the functions for each of the processor types.
37 * One of the processor types is the "seed" processor that starts the
38 * cascade of creating all the processors that do the work.
39 *So, in the directory is a file called "EntryPoint.c" that contains the
40 * function, named appropriately to the work performed, that the outside
41 * sequential code calls. This function follows a pattern:
42 *1) it calls VSs__init()
43 *2) it creates the initial data for the seed processor, which is passed
44 * in to the function
45 *3) it creates the seed VSs processor, with the data to start it with.
46 *4) it calls startVSsThenWaitUntilWorkDone
47 *5) it gets the returnValue from the transfer struc and returns that
48 * from the function
49 *
50 *For now, a new VSs system has to be created via VSs__init every
51 * time an entry point function is called -- later, might add letting the
52 * VSs system be created once, and let all the entry points just reuse
53 * it -- want to be as simple as possible now, and see by using what makes
54 * sense for later..
55 */
59 //===========================================================================
61 /*This is the "border crossing" function -- the thing that crosses from the
62 * outside world, into the VMS_HW world. It initializes and starts up the
63 * VMS system, then creates one processor from the specified function and
64 * puts it into the readyQ. From that point, that one function is resp.
65 * for creating all the other processors, that then create others, and so
66 * forth.
67 *When all the processors, including the seed, have dissipated, then this
68 * function returns. The results will have been written by side-effect via
69 * pointers read from, or written into initData.
70 *
71 *NOTE: no Threads should exist in the outside program that might touch
72 * any of the data reachable from initData passed in to here
73 */
74 void
75 VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
76 { VSsSemEnv *semEnv;
77 SlaveVP *seedSlv;
78 VSsSemData *semData;
79 VSsTaskStub *threadTaskStub, *parentTaskStub;
80 int32* taskID;
82 VSs__init(); //normal multi-thd
84 semEnv = _VMSMasterEnv->semanticEnv;
86 //VSs starts with one processor, which is put into initial environ,
87 // and which then calls create() to create more, thereby expanding work
88 seedSlv = VSs__create_slave_helper( &VSs__run_thread , fnPtr, initData, semEnv, semEnv->nextCoreToGetNewSlv++ );
89 //NB: this assumes that after VSs_init() nextCoreToGetNewSlv is still 0,
90 // and also that there is more than 1 core.
92 //seed slave is a thread slave, so make a thread's task stub for it
93 // and then make another to stand for the seed's parent task. Make
94 // the parent be already ended, and have one child (the seed). This
95 // will make the dissipate handler do the right thing when the seed
96 // is dissipated.
97 threadTaskStub = create_thread_task_stub( initData );
98 parentTaskStub = create_thread_task_stub( NULL );
99 parentTaskStub->isEnded = TRUE;
100 parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
101 threadTaskStub->parentTaskStub = parentTaskStub;
102 threadTaskStub->slaveAssignedTo = seedSlv;
104 taskID = VMS_WL__malloc(2 * sizeof(int32) );
105 taskID[0] = 1;
106 taskID[1] = -1;
107 threadTaskStub->taskID = taskID;
109 semData = (VSsSemData *)seedSlv->semanticData;
110 //seedVP is a thread, so has a permanent task
111 semData->needsTaskAssigned = FALSE;
112 semData->taskStub = threadTaskStub;
113 semData->slaveType = ThreadSlv;
115 resume_slaveVP( seedSlv, semEnv ); //returns right away, just queues Slv
117 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
119 VSs__cleanup_after_shutdown();
120 }
123 int32
124 VSs__giveMinWorkUnitCycles( float32 percentOverhead )
125 {
126 return MIN_WORK_UNIT_CYCLES;
127 }
129 int32
130 VSs__giveIdealNumWorkUnits()
131 {
132 return NUM_ANIM_SLOTS * NUM_CORES;
133 }
135 int32
136 VSs__give_number_of_cores_to_schedule_onto()
137 {
138 return NUM_CORES;
139 }
141 /*For now, use TSC -- later, make these two macros with assembly that first
142 * saves jump point, and second jumps back several times to get reliable time
143 */
144 void
145 VSs__start_primitive()
146 { saveLowTimeStampCountInto( ((VSsSemEnv *)(_VMSMasterEnv->semanticEnv))->
147 primitiveStartTime );
148 }
150 /*Just quick and dirty for now -- make reliable later
151 * will want this to jump back several times -- to be sure cache is warm
152 * because don't want comm time included in calc-time measurement -- and
153 * also to throw out any "weird" values due to OS interrupt or TSC rollover
154 */
155 int32
156 VSs__end_primitive_and_give_cycles()
157 { int32 endTime, startTime;
158 //TODO: fix by repeating time-measurement
159 saveLowTimeStampCountInto( endTime );
160 startTime =((VSsSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
161 return (endTime - startTime);
162 }
164 //===========================================================================
166 /*Initializes all the data-structures for a VSs system -- but doesn't
167 * start it running yet!
168 *
169 *This runs in the main thread -- before VMS starts up
170 *
171 *This sets up the semantic layer over the VMS system
172 *
173 *First, calls VMS_Setup, then creates own environment, making it ready
174 * for creating the seed processor and then starting the work.
175 */
176 void
177 VSs__init()
178 {
179 VMS_SS__init();
180 //masterEnv, a global var, now is partially set up by init_VMS
181 // after this, have VMS_int__malloc and VMS_int__free available
183 VSs__init_Helper();
184 }
187 void idle_fn(void* data){
188 while(1){
189 VMS_int__suspend_slaveVP_and_send_req(currVP);
190 }
191 }
193 void
194 VSs__init_Helper()
195 { VSsSemEnv *semanticEnv;
196 int32 i, coreNum, slotNum;
197 VSsSemData *semData;
199 //Hook up the semantic layer's plug-ins to the Master virt procr
200 _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
201 _VMSMasterEnv->slaveAssigner = &VSs__assign_slaveVP_to_slot;
203 //create the semantic layer's environment (all its data) and add to
204 // the master environment
205 semanticEnv = VMS_int__malloc( sizeof( VSsSemEnv ) );
206 _VMSMasterEnv->semanticEnv = semanticEnv;
208 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
209 _VMSMasterEnv->counterHandler = &VSs__counter_handler;
210 VSs__init_counter_data_structs();
211 #endif
213 //semanticEnv->shutdownInitiated = FALSE;
214 semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) );
215 semanticEnv->numCoresDone = 0;
216 //For each animation slot, there is an idle slave, and an initial
217 // slave assigned as the current-task-slave. Create them here.
218 SlaveVP *idleSlv, *slotTaskSlv;
219 for( coreNum = 0; coreNum < NUM_CORES; coreNum++ )
220 { semanticEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
222 for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
223 {
224 #ifdef IDLE_SLAVES
225 idleSlv = VSs__create_slave_helper( &VSs__run_thread, &idle_fn, NULL, semanticEnv, 0);
226 idleSlv->coreAnimatedBy = coreNum;
227 idleSlv->animSlotAssignedTo =
228 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
229 semanticEnv->idleSlv[coreNum][slotNum] = idleSlv;
230 #endif
232 slotTaskSlv = VSs__create_slave_helper(&VSs__run_thread, &idle_fn, NULL, semanticEnv, 0);
233 slotTaskSlv->coreAnimatedBy = coreNum;
234 slotTaskSlv->animSlotAssignedTo =
235 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
237 semData = slotTaskSlv->semanticData;
238 semData->needsTaskAssigned = TRUE;
239 semData->slaveType = SlotTaskSlv;
240 semanticEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
241 }
242 }
244 //create the ready queues, hash tables used for matching and so forth
245 semanticEnv->slavesReadyToResumeQ = makeVMSQ();
246 semanticEnv->freeExtraTaskSlvQ = makeVMSQ();
247 semanticEnv->taskReadyQ = makeVMSQ();
249 semanticEnv->argPtrHashTbl = makeHashTable32( 20, &free_pointer_entry );
250 semanticEnv->commHashTbl = makeHashTable32( 16, &VMS_int__free );
252 semanticEnv->nextCoreToGetNewSlv = 0;
254 semanticEnv->numInFlightTasks = 0;
255 semanticEnv->deferredSubmitsQ = makeVMSQ();
256 #ifdef EXTERNAL_SCHEDULER
257 VSs__init_ext_scheduler();
258 #endif
259 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
260 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
261 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
262 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
263 {
264 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
265 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
266 semanticEnv->fnSingletons[i].hasFinished = FALSE;
267 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
268 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
269 semanticEnv->criticalSection[i].isOccupied = FALSE;
270 semanticEnv->criticalSection[i].waitQ = makeVMSQ();
271 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
272 semanticEnv->criticalSection[i].previous.vp = 0;
273 semanticEnv->criticalSection[i].previous.task = 0;
274 #endif
275 }
277 semanticEnv->numLiveExtraTaskSlvs = 0; //must be last
278 semanticEnv->numLiveThreadSlvs = 1; //must be last, counts the seed
280 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
281 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
282 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
283 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
284 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
285 semanticEnv->dataDependenciesList = makeListOfArrays(sizeof(Dependency),128);
286 semanticEnv->singletonDependenciesList = makeListOfArrays(sizeof(Dependency),128);
287 semanticEnv->warDependenciesList = makeListOfArrays(sizeof(Dependency),128);
288 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
290 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
291 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
292 #endif
293 }
296 /*Frees any memory allocated by VSs__init() then calls VMS_int__shutdown
297 */
298 void
299 VSs__cleanup_after_shutdown()
300 { VSsSemEnv *semanticEnv;
302 semanticEnv = _VMSMasterEnv->semanticEnv;
304 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
305 FILE* output;
306 int n;
307 char filename[255];
308 //UCC
309 for(n=0;n<255;n++)
310 {
311 sprintf(filename, "./counters/UCC.%d",n);
312 output = fopen(filename,"r");
313 if(output)
314 {
315 fclose(output);
316 }else{
317 break;
318 }
319 }
320 if(n<255){
321 printf("Saving UCC to File: %s ...\n", filename);
322 output = fopen(filename,"w+");
323 if(output!=NULL){
324 set_dependency_file(output);
325 //fprintf(output,"digraph Dependencies {\n");
326 //set_dot_file(output);
327 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
328 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
329 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
330 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
331 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
332 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
333 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
334 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
335 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
336 //fprintf(output,"}\n");
337 fflush(output);
339 } else
340 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
341 } else {
342 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
343 }
344 //Loop Graph
345 for(n=0;n<255;n++)
346 {
347 sprintf(filename, "./counters/LoopGraph.%d",n);
348 output = fopen(filename,"r");
349 if(output)
350 {
351 fclose(output);
352 }else{
353 break;
354 }
355 }
356 if(n<255){
357 printf("Saving LoopGraph to File: %s ...\n", filename);
358 output = fopen(filename,"w+");
359 if(output!=NULL){
360 set_dependency_file(output);
361 //fprintf(output,"digraph Dependencies {\n");
362 //set_dot_file(output);
363 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
364 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
365 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
366 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
367 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
368 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
369 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
370 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
371 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
372 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
373 //fprintf(output,"}\n");
374 fflush(output);
376 } else
377 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
378 } else {
379 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
380 }
383 freeListOfArrays(semanticEnv->unitList);
384 freeListOfArrays(semanticEnv->commDependenciesList);
385 freeListOfArrays(semanticEnv->ctlDependenciesList);
386 freeListOfArrays(semanticEnv->dynDependenciesList);
387 freeListOfArrays(semanticEnv->dataDependenciesList);
388 freeListOfArrays(semanticEnv->warDependenciesList);
389 freeListOfArrays(semanticEnv->singletonDependenciesList);
390 freeListOfArrays(semanticEnv->hwArcs);
392 #endif
393 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
394 FILE* output2;
395 int n2;
396 char filename2[255];
397 for(n2=0;n2<255;n2++)
398 {
399 sprintf(filename2, "./counters/Counters.%d.csv",n2);
400 output2 = fopen(filename2,"r");
401 if(output2)
402 {
403 fclose(output2);
404 }else{
405 break;
406 }
407 }
408 if(n2<255){
409 printf("Saving Counter measurements to File: %s ...\n", filename2);
410 output2 = fopen(filename2,"w+");
411 if(output2!=NULL){
412 set_counter_file(output2);
413 int i;
414 for(i=0;i<NUM_CORES;i++){
415 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
416 fflush(output2);
417 }
419 } else
420 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
421 } else {
422 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
423 }
425 #endif
426 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
427 * nothing to do here */
428 //_VMSMasterEnv->shutdownInitiated = TRUE;
429 int coreIdx, slotIdx;
430 SlaveVP* slotSlv;
431 for (coreIdx = 0; coreIdx < NUM_CORES; coreIdx++) {
432 for (slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++) {
433 slotSlv = semanticEnv->slotTaskSlvs[coreIdx][slotIdx];
434 VMS_int__free(slotSlv->semanticData);
435 VMS_int__dissipate_slaveVP(slotSlv);
436 #ifdef IDLE_SLAVES
437 slotSlv = semanticEnv->idleSlv[coreIdx][slotIdx];
438 VMS_int__free(slotSlv->semanticData);
439 VMS_int__dissipate_slaveVP(slotSlv);
440 #endif
441 }
442 }
443 int i;
444 for (i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++) {
445 freePrivQ(semanticEnv->fnSingletons[i].waitQ);
446 freePrivQ(semanticEnv->transactionStrucs[i].waitingVPQ);
447 freePrivQ(semanticEnv->criticalSection[i].waitQ);
448 }
450 freePrivQ(semanticEnv->freeExtraTaskSlvQ);
451 freePrivQ(semanticEnv->slavesReadyToResumeQ);
452 freePrivQ(semanticEnv->taskReadyQ);
453 freePrivQ(semanticEnv->deferredSubmitsQ);
454 freeHashTable(semanticEnv->argPtrHashTbl);
455 freeHashTable(semanticEnv->commHashTbl);
456 VMS_int__free(semanticEnv->coreIsDone);
457 VMS_int__free(_VMSMasterEnv->semanticEnv);
459 VMS_SS__cleanup_at_end_of_shutdown();
460 }
463 //===========================================================================
465 SlaveVP *
466 VSs__create_thread( TopLevelFnPtr fnPtr, void *initData,
467 SlaveVP *creatingThd )
468 { VSsSemReq reqData;
470 //the semantic request data is on the stack and disappears when this
471 // call returns -- it's guaranteed to remain in the VP's stack for as
472 // long as the VP is suspended.
473 reqData.reqType = 0; //know type because in a VMS create req
474 reqData.fnPtr = fnPtr;
475 reqData.initData = initData;
476 reqData.callingSlv = creatingThd;
478 VMS_WL__send_create_slaveVP_req( &reqData, creatingThd );
480 return creatingThd->dataRetFromReq;
481 }
483 /*This is always the last thing done in the code animated by a thread VP.
484 * Normally, this would be the last line of the thread's top level function.
485 * But, if the thread exits from any point, it has to do so by calling
486 * this.
487 *
488 *It simply sends a dissipate request, which handles all the state cleanup.
489 */
490 void
491 VSs__end_thread()
492 {
494 VMS_WL__send_dissipate_req( currVP );
495 }
497 void VSs__run_thread(TopLevelFnPtr fnPtr, void *initData){
498 (*fnPtr)(initData);
499 VSs__end_thread();
500 }
502 //===========================================================================
505 //======================= task submit and end ==============================
506 /*
507 */
508 void
509 VSs__submit_task( VSsTaskType *taskType, void *args)
510 { VSsSemReq reqData;
512 reqData.reqType = submit_task;
514 reqData.taskType = taskType;
515 reqData.args = args;
516 reqData.callingSlv = currVP;
518 reqData.taskID = NULL;
520 VMS_WL__send_sem_request( &reqData, currVP );
521 }
523 int32 *
524 VSs__create_taskID_of_size( int32 numInts)
525 { int32 *taskID;
527 taskID = VMS_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
528 taskID[0] = numInts;
529 return taskID;
530 }
532 void
533 VSs__submit_task_with_ID( VSsTaskType *taskType, void *args, int32 *taskID)
534 { VSsSemReq reqData;
536 reqData.reqType = submit_task;
538 reqData.taskType = taskType;
539 reqData.args = args;
540 reqData.taskID = taskID;
541 reqData.callingSlv = currVP;
543 VMS_WL__send_sem_request( &reqData, currVP );
544 }
547 /*This call is the last to happen in every task. It causes the slave to
548 * suspend and get the next task out of the task-queue. Notice there is no
549 * assigner here.. only one slave, no slave ReadyQ, and so on..
550 *Can either make the assigner take the next task out of the taskQ, or can
551 * leave all as it is, and make task-end take the next task.
552 *Note: this fits the case in the new VMS for no-context tasks, so will use
553 * the built-in taskQ of new VMS, and should be local and much faster.
554 *
555 *The task-stub is saved in the animSlv, so the request handler will get it
556 * from there, along with the task-type which has arg types, and so on..
557 *
558 * NOTE: if want, don't need to send the animating SlaveVP around..
559 * instead, can make a single slave per core, and coreCtrlr looks up the
560 * slave from having the core number.
561 *
562 *But, to stay compatible with all the other VMS languages, leave it in..
563 */
564 void
565 VSs__end_task()
566 { VSsSemReq reqData;
568 reqData.reqType = end_task;
569 reqData.callingSlv = currVP;
571 VMS_WL__send_sem_request( &reqData, currVP );
572 }
574 void VSs__run_task(TopLevelFnPtr fnPtr, void *initData){
575 (*fnPtr)(initData);
576 VSs__end_task();
577 }
579 void
580 VSs__taskwait()
581 {
582 VSsSemReq reqData;
584 reqData.reqType = taskwait;
585 reqData.callingSlv = currVP;
587 VMS_WL__send_sem_request( &reqData, currVP );
588 }
590 void
591 VSs__taskwait_on(void* ptr){
592 VSsSemReq reqData;
594 reqData.reqType = taskwait_on;
595 reqData.callingSlv = currVP;
597 reqData.args = ptr;
599 VMS_WL__send_sem_request( &reqData, currVP );
600 }
602 void
603 VSs__start_critical(int32 name){
604 VSsSemReq reqData;
606 reqData.reqType = critical_start;
607 reqData.callingSlv = currVP;
609 reqData.criticalID = name;
611 VMS_WL__send_sem_request( &reqData, currVP );
612 }
614 void
615 VSs__end_critical(int32 name){
616 VSsSemReq reqData;
618 reqData.reqType = critical_end;
619 reqData.callingSlv = currVP;
621 reqData.criticalID = name;
623 VMS_WL__send_sem_request( &reqData, currVP );
624 }
626 //========================== send and receive ============================
627 //
629 int32 *
630 VSs__give_self_taskID()
631 {
632 return ((VSsSemData*)currVP->semanticData)->taskStub->taskID;
633 }
635 //================================ send ===================================
637 void
638 VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID)
639 { VSsSemReq reqData;
641 reqData.reqType = send_type_to;
643 reqData.msg = msg;
644 reqData.msgType = type;
645 reqData.receiverID = receiverID;
646 reqData.senderSlv = currVP;
648 reqData.nextReqInHashEntry = NULL;
650 VMS_WL__send_sem_request( &reqData, currVP );
652 //When come back from suspend, no longer own data reachable from msg
653 }
655 void
656 VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID)
657 { VSsSemReq reqData;
659 reqData.reqType = send_from_to;
661 reqData.msg = msg;
662 reqData.senderID = senderID;
663 reqData.receiverID = receiverID;
664 reqData.senderSlv = currVP;
666 reqData.nextReqInHashEntry = NULL;
668 VMS_WL__send_sem_request( &reqData, currVP );
669 }
672 //================================ receive ================================
674 /*The "type" version of send and receive creates a many-to-one relationship.
675 * The sender is anonymous, and many sends can stack up, waiting to be
676 * received. The same receiver can also have send from-to's
677 * waiting for it, and those will be kept separate from the "type"
678 * messages.
679 */
680 void *
681 VSs__receive_type_to( const int32 type, int32* receiverID )
682 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
683 VSsSemReq reqData;
685 reqData.reqType = receive_type_to;
687 reqData.msgType = type;
688 reqData.receiverID = receiverID;
689 reqData.receiverSlv = currVP;
691 reqData.nextReqInHashEntry = NULL;
693 VMS_WL__send_sem_request( &reqData, currVP );
695 return currVP->dataRetFromReq;
696 }
700 /*Call this at the point a receiving task wants in-coming data.
701 * Use this from-to form when know senderID -- it makes a direct channel
702 * between sender and receiver.
703 */
704 void *
705 VSs__receive_from_to( int32 *senderID, int32 *receiverID)
706 {
707 VSsSemReq reqData;
709 reqData.reqType = receive_from_to;
711 reqData.senderID = senderID;
712 reqData.receiverID = receiverID;
713 reqData.receiverSlv = currVP;
715 reqData.nextReqInHashEntry = NULL;
716 DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
718 VMS_WL__send_sem_request( &reqData, currVP );
720 return currVP->dataRetFromReq;
721 }
726 //==========================================================================
727 //
728 /*A function singleton is a function whose body executes exactly once, on a
729 * single core, no matter how many times the fuction is called and no
730 * matter how many cores or the timing of cores calling it.
731 *
732 *A data singleton is a ticket attached to data. That ticket can be used
733 * to get the data through the function exactly once, no matter how many
734 * times the data is given to the function, and no matter the timing of
735 * trying to get the data through from different cores.
736 */
738 /*asm function declarations*/
739 void asm_save_ret_to_singleton(VSsSingleton *singletonPtrAddr);
740 void asm_write_ret_from_singleton(VSsSingleton *singletonPtrAddr);
742 /*Fn singleton uses ID as index into array of singleton structs held in the
743 * semantic environment.
744 */
745 void
746 VSs__start_fn_singleton( int32 singletonID)
747 {
748 VSsSemReq reqData;
750 //
751 reqData.reqType = singleton_fn_start;
752 reqData.singletonID = singletonID;
754 VMS_WL__send_sem_request( &reqData, currVP );
755 if( currVP->dataRetFromReq ) //will be 0 or addr of label in end singleton
756 {
757 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( currVP );
758 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
759 }
760 }
762 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
763 * The start_data_singleton makes the structure and puts its addr into the
764 * location.
765 */
766 void
767 VSs__start_data_singleton( VSsSingleton **singletonAddr )
768 {
769 VSsSemReq reqData;
771 if( *singletonAddr && (*singletonAddr)->hasFinished )
772 goto JmpToEndSingleton;
774 reqData.reqType = singleton_data_start;
775 reqData.singletonPtrAddr = singletonAddr;
777 VMS_WL__send_sem_request( &reqData, currVP );
778 if( currVP->dataRetFromReq ) //either 0 or end singleton's return addr
779 { //Assembly code changes the return addr on the stack to the one
780 // saved into the singleton by the end-singleton-fn
781 //The return addr is at 0x4(%%ebp)
782 JmpToEndSingleton:
783 asm_write_ret_from_singleton(*singletonAddr);
784 }
785 //now, simply return
786 //will exit either from the start singleton call or the end-singleton call
787 }
789 /*Uses ID as index into array of flags. If flag already set, resumes from
790 * end-label. Else, sets flag and resumes normally.
791 *
792 *Note, this call cannot be inlined because the instr addr at the label
793 * inside is shared by all invocations of a given singleton ID.
794 */
795 void
796 VSs__end_fn_singleton( int32 singletonID )
797 {
798 VSsSemReq reqData;
800 //don't need this addr until after at least one singleton has reached
801 // this function
802 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( currVP );
803 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
805 reqData.reqType = singleton_fn_end;
806 reqData.singletonID = singletonID;
808 VMS_WL__send_sem_request( &reqData, currVP );
810 //EndSingletonInstrAddr:
811 return;
812 }
814 void
815 VSs__end_data_singleton( VSsSingleton **singletonPtrAddr )
816 {
817 VSsSemReq reqData;
819 //don't need this addr until after singleton struct has reached
820 // this function for first time
821 //do assembly that saves the return addr of this fn call into the
822 // data singleton -- that data-singleton can only be given to exactly
823 // one instance in the code of this function. However, can use this
824 // function in different places for different data-singletons.
825 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
828 asm_save_ret_to_singleton(*singletonPtrAddr);
830 reqData.reqType = singleton_data_end;
831 reqData.singletonPtrAddr = singletonPtrAddr;
833 VMS_WL__send_sem_request( &reqData, currVP );
834 }
836 /*This executes the function in the masterVP, so it executes in isolation
837 * from any other copies -- only one copy of the function can ever execute
838 * at a time.
839 *
840 *It suspends to the master, and the request handler takes the function
841 * pointer out of the request and calls it, then resumes the VP.
842 *Only very short functions should be called this way -- for longer-running
843 * isolation, use transaction-start and transaction-end, which run the code
844 * between as work-code.
845 */
846 void
847 VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
848 void *data )
849 {
850 VSsSemReq reqData;
852 //
853 reqData.reqType = atomic;
854 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
855 reqData.dataForFn = data;
857 VMS_WL__send_sem_request( &reqData, currVP );
858 }
861 /*This suspends to the master.
862 *First, it looks at the VP's data, to see the highest transactionID that VP
863 * already has entered. If the current ID is not larger, it throws an
864 * exception stating a bug in the code. Otherwise it puts the current ID
865 * there, and adds the ID to a linked list of IDs entered -- the list is
866 * used to check that exits are properly ordered.
867 *Next it is uses transactionID as index into an array of transaction
868 * structures.
869 *If the "VP_currently_executing" field is non-null, then put requesting VP
870 * into queue in the struct. (At some point a holder will request
871 * end-transaction, which will take this VP from the queue and resume it.)
872 *If NULL, then write requesting into the field and resume.
873 */
874 void
875 VSs__start_transaction( int32 transactionID )
876 {
877 VSsSemReq reqData;
879 //
880 reqData.callingSlv = currVP;
881 reqData.reqType = trans_start;
882 reqData.transID = transactionID;
884 VMS_WL__send_sem_request( &reqData, currVP );
885 }
887 /*This suspends to the master, then uses transactionID as index into an
888 * array of transaction structures.
889 *It looks at VP_currently_executing to be sure it's same as requesting VP.
890 * If different, throws an exception, stating there's a bug in the code.
891 *Next it looks at the queue in the structure.
892 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
893 *If something in, gets it, sets VP_currently_executing to that VP, then
894 * resumes both.
895 */
896 void
897 VSs__end_transaction( int32 transactionID )
898 {
899 VSsSemReq reqData;
901 //
902 reqData.callingSlv = currVP;
903 reqData.reqType = trans_end;
904 reqData.transID = transactionID;
906 VMS_WL__send_sem_request( &reqData, currVP );
907 }
909 //======================== Internal ==================================
910 /*
911 */
912 SlaveVP *
913 VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
914 SlaveVP *creatingSlv )
915 { VSsSemReq reqData;
917 //the semantic request data is on the stack and disappears when this
918 // call returns -- it's guaranteed to remain in the VP's stack for as
919 // long as the VP is suspended.
920 reqData.reqType = 0; //know type because in a VMS create req
921 reqData.coreToAssignOnto = -1; //means round-robin assign
922 reqData.fnPtr = fnPtr;
923 reqData.initData = initData;
924 reqData.callingSlv = creatingSlv;
926 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
928 return creatingSlv->dataRetFromReq;
929 }
931 SlaveVP *
932 VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
933 SlaveVP *creatingSlv, int32 coreToAssignOnto )
934 { VSsSemReq reqData;
936 //the semantic request data is on the stack and disappears when this
937 // call returns -- it's guaranteed to remain in the VP's stack for as
938 // long as the VP is suspended.
939 reqData.reqType = create_slave_w_aff; //not used, May 2012
940 reqData.coreToAssignOnto = coreToAssignOnto;
941 reqData.fnPtr = fnPtr;
942 reqData.initData = initData;
943 reqData.callingSlv = creatingSlv;
945 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
947 return creatingSlv->dataRetFromReq;
948 }
950 int __main_ret;
952 void __entry_point(void* _args) {
953 __main_args* args = (__main_args*) _args;
954 __main_ret = __program_main(args->argc, args->argv);
955 }
957 #undef main
959 int main(int argc, char** argv) {
960 __main_args args;
961 args.argc = argc;
962 args.argv = argv;
963 VSs__create_seed_slave_and_do_work(__entry_point, (void*) &args);
964 return __main_ret;
965 }
