annotate VSs.c @ 40:df464a215387

add implementations of (some) nanos api functions
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Mon, 03 Jun 2013 18:49:19 +0200
parents a951b38d2cfc
children 8733d1299c3a
rev   line source
seanhalle@0 1 /*
seanhalle@0 2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
seanhalle@0 3 *
seanhalle@0 4 * Licensed under BSD
seanhalle@0 5 */
seanhalle@0 6
seanhalle@0 7 #include <stdio.h>
seanhalle@0 8 #include <stdlib.h>
seanhalle@0 9 #include <malloc.h>
seanhalle@0 10
seanhalle@0 11 #include "Queue_impl/PrivateQueue.h"
seanhalle@0 12 #include "Hash_impl/PrivateHash.h"
seanhalle@0 13
seanhalle@2 14 #include "VSs.h"
seanhalle@3 15 #include "Measurement/VSs_Counter_Recording.h"
seanhalle@0 16
seanhalle@0 17 //==========================================================================
seanhalle@0 18
seanhalle@0 19 void
seanhalle@2 20 VSs__init();
seanhalle@0 21
seanhalle@0 22 void
seanhalle@2 23 VSs__init_Helper();
seanhalle@0 24 //==========================================================================
seanhalle@0 25
seanhalle@0 26
seanhalle@0 27
seanhalle@0 28 //===========================================================================
seanhalle@0 29
seanhalle@0 30
seanhalle@0 31 /*These are the library functions *called in the application*
seanhalle@0 32 *
seanhalle@0 33 *There's a pattern for the outside sequential code to interact with the
seanhalle@0 34 * VMS_HW code.
seanhalle@2 35 *The VMS_HW system is inside a boundary.. every VSs system is in its
seanhalle@0 36 * own directory that contains the functions for each of the processor types.
seanhalle@0 37 * One of the processor types is the "seed" processor that starts the
seanhalle@0 38 * cascade of creating all the processors that do the work.
seanhalle@0 39 *So, in the directory is a file called "EntryPoint.c" that contains the
seanhalle@0 40 * function, named appropriately to the work performed, that the outside
seanhalle@0 41 * sequential code calls. This function follows a pattern:
seanhalle@2 42 *1) it calls VSs__init()
seanhalle@0 43 *2) it creates the initial data for the seed processor, which is passed
seanhalle@0 44 * in to the function
seanhalle@2 45 *3) it creates the seed VSs processor, with the data to start it with.
seanhalle@2 46 *4) it calls startVSsThenWaitUntilWorkDone
seanhalle@0 47 *5) it gets the returnValue from the transfer struc and returns that
seanhalle@0 48 * from the function
seanhalle@0 49 *
seanhalle@2 50 *For now, a new VSs system has to be created via VSs__init every
seanhalle@0 51 * time an entry point function is called -- later, might add letting the
seanhalle@2 52 * VSs system be created once, and let all the entry points just reuse
seanhalle@0 53 * it -- want to be as simple as possible now, and see by using what makes
seanhalle@0 54 * sense for later..
seanhalle@0 55 */
seanhalle@0 56
seanhalle@0 57
seanhalle@0 58
seanhalle@0 59 //===========================================================================
seanhalle@0 60
seanhalle@0 61 /*This is the "border crossing" function -- the thing that crosses from the
seanhalle@0 62 * outside world, into the VMS_HW world. It initializes and starts up the
seanhalle@0 63 * VMS system, then creates one processor from the specified function and
seanhalle@0 64 * puts it into the readyQ. From that point, that one function is resp.
seanhalle@0 65 * for creating all the other processors, that then create others, and so
seanhalle@0 66 * forth.
seanhalle@0 67 *When all the processors, including the seed, have dissipated, then this
seanhalle@0 68 * function returns. The results will have been written by side-effect via
seanhalle@0 69 * pointers read from, or written into initData.
seanhalle@0 70 *
seanhalle@0 71 *NOTE: no Threads should exist in the outside program that might touch
seanhalle@0 72 * any of the data reachable from initData passed in to here
seanhalle@0 73 */
seanhalle@0 74 void
seanhalle@2 75 VSs__create_seed_slave_and_do_work( TopLevelFnPtr fnPtr, void *initData )
seanhalle@6 76 { VSsSemEnv *semEnv;
seanhalle@6 77 SlaveVP *seedSlv;
seanhalle@6 78 VSsSemData *semData;
seanhalle@8 79 VSsTaskStub *threadTaskStub, *parentTaskStub;
nengel@23 80 int32* taskID;
seanhalle@0 81
seanhalle@2 82 VSs__init(); //normal multi-thd
seanhalle@0 83
seanhalle@0 84 semEnv = _VMSMasterEnv->semanticEnv;
seanhalle@0 85
seanhalle@2 86 //VSs starts with one processor, which is put into initial environ,
seanhalle@0 87 // and which then calls create() to create more, thereby expanding work
nengel@38 88 seedSlv = VSs__create_slave_helper( &VSs__run_thread , fnPtr, initData, semEnv, semEnv->nextCoreToGetNewSlv++ );
nengel@38 89 //NB: this assumes that after VSs_init() nextCoreToGetNewSlv is still 0,
nengel@38 90 // and also that there is more than 1 core.
seanhalle@3 91
seanhalle@8 92 //seed slave is a thread slave, so make a thread's task stub for it
seanhalle@8 93 // and then make another to stand for the seed's parent task. Make
seanhalle@8 94 // the parent be already ended, and have one child (the seed). This
seanhalle@8 95 // will make the dissipate handler do the right thing when the seed
seanhalle@8 96 // is dissipated.
nengel@39 97 threadTaskStub = create_thread_task_stub( initData);
seanhalle@8 98 parentTaskStub = create_thread_task_stub( NULL );
seanhalle@8 99 parentTaskStub->isEnded = TRUE;
seanhalle@8 100 parentTaskStub->numLiveChildThreads = 1; //so dissipate works for seed
nengel@11 101 threadTaskStub->parentTaskStub = parentTaskStub;
nengel@16 102 threadTaskStub->slaveAssignedTo = seedSlv;
nengel@23 103
nengel@23 104 taskID = VMS_WL__malloc(2 * sizeof(int32) );
nengel@23 105 taskID[0] = 1;
nengel@23 106 taskID[1] = -1;
nengel@23 107 threadTaskStub->taskID = taskID;
nengel@16 108
seanhalle@6 109 semData = (VSsSemData *)seedSlv->semanticData;
seanhalle@8 110 //seedVP is a thread, so has a permanent task
seanhalle@6 111 semData->needsTaskAssigned = FALSE;
seanhalle@8 112 semData->taskStub = threadTaskStub;
nengel@13 113 semData->slaveType = ThreadSlv;
seanhalle@0 114
seanhalle@6 115 resume_slaveVP( seedSlv, semEnv ); //returns right away, just queues Slv
seanhalle@0 116
seanhalle@0 117 VMS_SS__start_the_work_then_wait_until_done(); //normal multi-thd
seanhalle@0 118
seanhalle@2 119 VSs__cleanup_after_shutdown();
seanhalle@0 120 }
seanhalle@0 121
seanhalle@0 122
seanhalle@0 123 int32
seanhalle@2 124 VSs__giveMinWorkUnitCycles( float32 percentOverhead )
seanhalle@0 125 {
seanhalle@0 126 return MIN_WORK_UNIT_CYCLES;
seanhalle@0 127 }
seanhalle@0 128
seanhalle@0 129 int32
seanhalle@2 130 VSs__giveIdealNumWorkUnits()
seanhalle@0 131 {
seanhalle@0 132 return NUM_ANIM_SLOTS * NUM_CORES;
seanhalle@0 133 }
seanhalle@0 134
seanhalle@0 135 int32
seanhalle@2 136 VSs__give_number_of_cores_to_schedule_onto()
seanhalle@0 137 {
seanhalle@0 138 return NUM_CORES;
seanhalle@0 139 }
seanhalle@0 140
seanhalle@0 141 /*For now, use TSC -- later, make these two macros with assembly that first
seanhalle@0 142 * saves jump point, and second jumps back several times to get reliable time
seanhalle@0 143 */
seanhalle@0 144 void
seanhalle@2 145 VSs__start_primitive()
seanhalle@2 146 { saveLowTimeStampCountInto( ((VSsSemEnv *)(_VMSMasterEnv->semanticEnv))->
seanhalle@0 147 primitiveStartTime );
seanhalle@0 148 }
seanhalle@0 149
seanhalle@0 150 /*Just quick and dirty for now -- make reliable later
seanhalle@0 151 * will want this to jump back several times -- to be sure cache is warm
seanhalle@0 152 * because don't want comm time included in calc-time measurement -- and
seanhalle@0 153 * also to throw out any "weird" values due to OS interrupt or TSC rollover
seanhalle@0 154 */
seanhalle@0 155 int32
seanhalle@2 156 VSs__end_primitive_and_give_cycles()
seanhalle@0 157 { int32 endTime, startTime;
seanhalle@0 158 //TODO: fix by repeating time-measurement
seanhalle@0 159 saveLowTimeStampCountInto( endTime );
seanhalle@2 160 startTime =((VSsSemEnv*)(_VMSMasterEnv->semanticEnv))->primitiveStartTime;
seanhalle@0 161 return (endTime - startTime);
seanhalle@0 162 }
seanhalle@0 163
seanhalle@0 164 //===========================================================================
seanhalle@0 165
seanhalle@2 166 /*Initializes all the data-structures for a VSs system -- but doesn't
seanhalle@0 167 * start it running yet!
seanhalle@0 168 *
seanhalle@0 169 *This runs in the main thread -- before VMS starts up
seanhalle@0 170 *
seanhalle@0 171 *This sets up the semantic layer over the VMS system
seanhalle@0 172 *
seanhalle@0 173 *First, calls VMS_Setup, then creates own environment, making it ready
seanhalle@0 174 * for creating the seed processor and then starting the work.
seanhalle@0 175 */
seanhalle@0 176 void
seanhalle@2 177 VSs__init()
seanhalle@0 178 {
seanhalle@0 179 VMS_SS__init();
seanhalle@0 180 //masterEnv, a global var, now is partially set up by init_VMS
seanhalle@0 181 // after this, have VMS_int__malloc and VMS_int__free available
seanhalle@0 182
seanhalle@2 183 VSs__init_Helper();
seanhalle@0 184 }
seanhalle@0 185
seanhalle@0 186
nengel@37 187 void idle_fn(void* data){
seanhalle@0 188 while(1){
nengel@37 189 VMS_int__suspend_slaveVP_and_send_req(currVP);
seanhalle@0 190 }
seanhalle@0 191 }
seanhalle@0 192
seanhalle@0 193 void
seanhalle@2 194 VSs__init_Helper()
seanhalle@2 195 { VSsSemEnv *semanticEnv;
seanhalle@6 196 int32 i, coreNum, slotNum;
seanhalle@10 197 VSsSemData *semData;
seanhalle@0 198
seanhalle@0 199 //Hook up the semantic layer's plug-ins to the Master virt procr
seanhalle@2 200 _VMSMasterEnv->requestHandler = &VSs__Request_Handler;
seanhalle@2 201 _VMSMasterEnv->slaveAssigner = &VSs__assign_slaveVP_to_slot;
seanhalle@0 202
seanhalle@0 203 //create the semantic layer's environment (all its data) and add to
seanhalle@0 204 // the master environment
seanhalle@2 205 semanticEnv = VMS_int__malloc( sizeof( VSsSemEnv ) );
seanhalle@0 206 _VMSMasterEnv->semanticEnv = semanticEnv;
seanhalle@0 207
seanhalle@0 208 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
seanhalle@15 209 _VMSMasterEnv->counterHandler = &VSs__counter_handler;
seanhalle@2 210 VSs__init_counter_data_structs();
seanhalle@0 211 #endif
seanhalle@3 212
nengel@33 213 //semanticEnv->shutdownInitiated = FALSE;
seanhalle@3 214 semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) );
nengel@22 215 semanticEnv->numCoresDone = 0;
seanhalle@6 216 //For each animation slot, there is an idle slave, and an initial
seanhalle@6 217 // slave assigned as the current-task-slave. Create them here.
seanhalle@10 218 SlaveVP *idleSlv, *slotTaskSlv;
seanhalle@6 219 for( coreNum = 0; coreNum < NUM_CORES; coreNum++ )
seanhalle@6 220 { semanticEnv->coreIsDone[coreNum] = FALSE; //use during shutdown
seanhalle@6 221
seanhalle@6 222 for( slotNum = 0; slotNum < NUM_ANIM_SLOTS; ++slotNum )
nengel@22 223 {
nengel@22 224 #ifdef IDLE_SLAVES
nengel@38 225 idleSlv = VSs__create_slave_helper( &VSs__run_thread, &idle_fn, NULL, semanticEnv, 0);
seanhalle@6 226 idleSlv->coreAnimatedBy = coreNum;
seanhalle@9 227 idleSlv->animSlotAssignedTo =
seanhalle@9 228 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
seanhalle@6 229 semanticEnv->idleSlv[coreNum][slotNum] = idleSlv;
nengel@22 230 #endif
seanhalle@6 231
nengel@38 232 slotTaskSlv = VSs__create_slave_helper(&VSs__run_thread, &idle_fn, NULL, semanticEnv, 0);
seanhalle@10 233 slotTaskSlv->coreAnimatedBy = coreNum;
seanhalle@10 234 slotTaskSlv->animSlotAssignedTo =
seanhalle@9 235 _VMSMasterEnv->allAnimSlots[coreNum][slotNum];
seanhalle@10 236
seanhalle@10 237 semData = slotTaskSlv->semanticData;
seanhalle@10 238 semData->needsTaskAssigned = TRUE;
seanhalle@10 239 semData->slaveType = SlotTaskSlv;
seanhalle@10 240 semanticEnv->slotTaskSlvs[coreNum][slotNum] = slotTaskSlv;
seanhalle@0 241 }
seanhalle@3 242 }
seanhalle@0 243
seanhalle@6 244 //create the ready queues, hash tables used for matching and so forth
seanhalle@6 245 semanticEnv->slavesReadyToResumeQ = makeVMSQ();
seanhalle@9 246 semanticEnv->freeExtraTaskSlvQ = makeVMSQ();
seanhalle@6 247 semanticEnv->taskReadyQ = makeVMSQ();
seanhalle@0 248
nengel@33 249 semanticEnv->argPtrHashTbl = makeHashTable32( 20, &free_pointer_entry );
seanhalle@4 250 semanticEnv->commHashTbl = makeHashTable32( 16, &VMS_int__free );
nengel@39 251 semanticEnv->criticalHashTbl = makeHashTable32( 16, &VMS_int__free );
seanhalle@6 252
seanhalle@6 253 semanticEnv->nextCoreToGetNewSlv = 0;
seanhalle@6 254
nengel@22 255 semanticEnv->numInFlightTasks = 0;
nengel@22 256 semanticEnv->deferredSubmitsQ = makeVMSQ();
nengel@18 257 #ifdef EXTERNAL_SCHEDULER
nengel@18 258 VSs__init_ext_scheduler();
nengel@18 259 #endif
seanhalle@0 260 //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
seanhalle@0 261 //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
seanhalle@0 262 //semanticEnv->transactionStrucs = makeDynArrayInfo( );
seanhalle@0 263 for( i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++ )
seanhalle@0 264 {
seanhalle@0 265 semanticEnv->fnSingletons[i].endInstrAddr = NULL;
seanhalle@0 266 semanticEnv->fnSingletons[i].hasBeenStarted = FALSE;
seanhalle@0 267 semanticEnv->fnSingletons[i].hasFinished = FALSE;
seanhalle@0 268 semanticEnv->fnSingletons[i].waitQ = makeVMSQ();
seanhalle@0 269 semanticEnv->transactionStrucs[i].waitingVPQ = makeVMSQ();
seanhalle@0 270 }
seanhalle@6 271
seanhalle@8 272 semanticEnv->numLiveExtraTaskSlvs = 0; //must be last
seanhalle@15 273 semanticEnv->numLiveThreadSlvs = 1; //must be last, counts the seed
seanhalle@6 274
seanhalle@6 275 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
seanhalle@6 276 semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
seanhalle@6 277 semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
seanhalle@6 278 semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
seanhalle@6 279 semanticEnv->dynDependenciesList = makeListOfArrays(sizeof(Dependency),128);
nengel@16 280 semanticEnv->dataDependenciesList = makeListOfArrays(sizeof(Dependency),128);
nengel@16 281 semanticEnv->singletonDependenciesList = makeListOfArrays(sizeof(Dependency),128);
nengel@17 282 semanticEnv->warDependenciesList = makeListOfArrays(sizeof(Dependency),128);
seanhalle@6 283 semanticEnv->ntonGroupsInfo = makePrivDynArrayOfSize((void***)&(semanticEnv->ntonGroups),8);
seanhalle@6 284
seanhalle@6 285 semanticEnv->hwArcs = makeListOfArrays(sizeof(Dependency),128);
seanhalle@6 286 memset(semanticEnv->last_in_slot,0,sizeof(NUM_CORES * NUM_ANIM_SLOTS * sizeof(Unit)));
seanhalle@6 287 #endif
seanhalle@0 288 }
seanhalle@0 289
seanhalle@0 290
seanhalle@2 291 /*Frees any memory allocated by VSs__init() then calls VMS_int__shutdown
seanhalle@0 292 */
seanhalle@0 293 void
seanhalle@2 294 VSs__cleanup_after_shutdown()
seanhalle@2 295 { VSsSemEnv *semanticEnv;
seanhalle@0 296
seanhalle@0 297 semanticEnv = _VMSMasterEnv->semanticEnv;
nengel@20 298
nengel@20 299 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
seanhalle@0 300 FILE* output;
seanhalle@0 301 int n;
nengel@20 302 char filename[255];
nengel@18 303 //UCC
seanhalle@0 304 for(n=0;n<255;n++)
seanhalle@0 305 {
seanhalle@0 306 sprintf(filename, "./counters/UCC.%d",n);
seanhalle@0 307 output = fopen(filename,"r");
seanhalle@0 308 if(output)
seanhalle@0 309 {
seanhalle@0 310 fclose(output);
seanhalle@0 311 }else{
seanhalle@0 312 break;
seanhalle@0 313 }
seanhalle@0 314 }
seanhalle@0 315 if(n<255){
seanhalle@0 316 printf("Saving UCC to File: %s ...\n", filename);
seanhalle@0 317 output = fopen(filename,"w+");
seanhalle@0 318 if(output!=NULL){
seanhalle@0 319 set_dependency_file(output);
seanhalle@0 320 //fprintf(output,"digraph Dependencies {\n");
seanhalle@0 321 //set_dot_file(output);
seanhalle@0 322 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
seanhalle@0 323 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
seanhalle@0 324 forAllInListOfArraysDo(semanticEnv->unitList, &print_unit_to_file);
seanhalle@0 325 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
seanhalle@0 326 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
nengel@16 327 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
nengel@16 328 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
nengel@17 329 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
seanhalle@0 330 forAllInDynArrayDo(semanticEnv->ntonGroupsInfo,&print_nton_to_file);
seanhalle@0 331 //fprintf(output,"}\n");
seanhalle@0 332 fflush(output);
seanhalle@0 333
seanhalle@0 334 } else
seanhalle@0 335 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
seanhalle@0 336 } else {
seanhalle@0 337 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
seanhalle@0 338 }
seanhalle@0 339 //Loop Graph
seanhalle@0 340 for(n=0;n<255;n++)
seanhalle@0 341 {
seanhalle@0 342 sprintf(filename, "./counters/LoopGraph.%d",n);
seanhalle@0 343 output = fopen(filename,"r");
seanhalle@0 344 if(output)
seanhalle@0 345 {
seanhalle@0 346 fclose(output);
seanhalle@0 347 }else{
seanhalle@0 348 break;
seanhalle@0 349 }
seanhalle@0 350 }
seanhalle@0 351 if(n<255){
seanhalle@0 352 printf("Saving LoopGraph to File: %s ...\n", filename);
seanhalle@0 353 output = fopen(filename,"w+");
seanhalle@0 354 if(output!=NULL){
seanhalle@0 355 set_dependency_file(output);
seanhalle@0 356 //fprintf(output,"digraph Dependencies {\n");
seanhalle@0 357 //set_dot_file(output);
seanhalle@0 358 //FIXME: first line still depends on counters being enabled, replace w/ unit struct!
seanhalle@0 359 //forAllInDynArrayDo(_VMSMasterEnv->counter_history_array_info, &print_dot_node_info );
seanhalle@0 360 forAllInListOfArraysDo( semanticEnv->unitList, &print_unit_to_file );
seanhalle@0 361 forAllInListOfArraysDo( semanticEnv->commDependenciesList, &print_comm_dependency_to_file );
seanhalle@0 362 forAllInListOfArraysDo( semanticEnv->ctlDependenciesList, &print_ctl_dependency_to_file );
nengel@16 363 forAllInListOfArraysDo( semanticEnv->dataDependenciesList, &print_data_dependency_to_file );
nengel@16 364 forAllInListOfArraysDo( semanticEnv->singletonDependenciesList, &print_singleton_dependency_to_file );
seanhalle@0 365 forAllInListOfArraysDo( semanticEnv->dynDependenciesList, &print_dyn_dependency_to_file );
nengel@17 366 forAllInListOfArraysDo( semanticEnv->warDependenciesList, &print_war_dependency_to_file );
seanhalle@0 367 forAllInListOfArraysDo( semanticEnv->hwArcs, &print_hw_dependency_to_file );
seanhalle@0 368 //fprintf(output,"}\n");
seanhalle@0 369 fflush(output);
seanhalle@0 370
seanhalle@0 371 } else
seanhalle@0 372 printf("Opening LoopGraph file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
seanhalle@0 373 } else {
seanhalle@0 374 printf("Could not open LoopGraph file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
seanhalle@0 375 }
seanhalle@0 376
seanhalle@0 377
seanhalle@0 378 freeListOfArrays(semanticEnv->unitList);
seanhalle@0 379 freeListOfArrays(semanticEnv->commDependenciesList);
seanhalle@0 380 freeListOfArrays(semanticEnv->ctlDependenciesList);
seanhalle@0 381 freeListOfArrays(semanticEnv->dynDependenciesList);
nengel@16 382 freeListOfArrays(semanticEnv->dataDependenciesList);
nengel@17 383 freeListOfArrays(semanticEnv->warDependenciesList);
nengel@17 384 freeListOfArrays(semanticEnv->singletonDependenciesList);
nengel@17 385 freeListOfArrays(semanticEnv->hwArcs);
seanhalle@0 386
seanhalle@0 387 #endif
nengel@20 388 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
nengel@20 389 FILE* output2;
nengel@20 390 int n2;
nengel@20 391 char filename2[255];
nengel@20 392 for(n2=0;n2<255;n2++)
seanhalle@0 393 {
nengel@20 394 sprintf(filename2, "./counters/Counters.%d.csv",n2);
nengel@20 395 output2 = fopen(filename2,"r");
nengel@20 396 if(output2)
seanhalle@0 397 {
nengel@20 398 fclose(output2);
seanhalle@0 399 }else{
seanhalle@0 400 break;
seanhalle@0 401 }
seanhalle@0 402 }
nengel@20 403 if(n2<255){
nengel@20 404 printf("Saving Counter measurements to File: %s ...\n", filename2);
nengel@20 405 output2 = fopen(filename2,"w+");
nengel@20 406 if(output2!=NULL){
nengel@20 407 set_counter_file(output2);
seanhalle@0 408 int i;
seanhalle@0 409 for(i=0;i<NUM_CORES;i++){
seanhalle@0 410 forAllInListOfArraysDo( semanticEnv->counterList[i], &print_counter_events_to_file );
nengel@20 411 fflush(output2);
seanhalle@0 412 }
seanhalle@0 413
seanhalle@0 414 } else
seanhalle@0 415 printf("Opening UCC file failed. Please check that folder \"counters\" exists in run directory and has write permission.\n");
seanhalle@0 416 } else {
seanhalle@0 417 printf("Could not open UCC file, please clean \"counters\" folder. (Must contain less than 255 files.)\n");
seanhalle@0 418 }
seanhalle@0 419
seanhalle@0 420 #endif
nengel@22 421 /* It's all allocated inside VMS's big chunk -- that's about to be freed, so
nengel@22 422 * nothing to do here */
nengel@33 423 //_VMSMasterEnv->shutdownInitiated = TRUE;
nengel@22 424 int coreIdx, slotIdx;
nengel@22 425 SlaveVP* slotSlv;
nengel@22 426 for (coreIdx = 0; coreIdx < NUM_CORES; coreIdx++) {
nengel@22 427 for (slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++) {
nengel@22 428 slotSlv = semanticEnv->slotTaskSlvs[coreIdx][slotIdx];
nengel@22 429 VMS_int__free(slotSlv->semanticData);
nengel@33 430 VMS_int__dissipate_slaveVP(slotSlv);
nengel@22 431 #ifdef IDLE_SLAVES
nengel@22 432 slotSlv = semanticEnv->idleSlv[coreIdx][slotIdx];
nengel@22 433 VMS_int__free(slotSlv->semanticData);
nengel@33 434 VMS_int__dissipate_slaveVP(slotSlv);
nengel@22 435 #endif
nengel@22 436 }
nengel@22 437 }
nengel@33 438 int i;
nengel@33 439 for (i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++) {
nengel@33 440 freePrivQ(semanticEnv->fnSingletons[i].waitQ);
nengel@33 441 freePrivQ(semanticEnv->transactionStrucs[i].waitingVPQ);
nengel@33 442 }
seanhalle@0 443
nengel@22 444 freePrivQ(semanticEnv->freeExtraTaskSlvQ);
nengel@22 445 freePrivQ(semanticEnv->slavesReadyToResumeQ);
nengel@22 446 freePrivQ(semanticEnv->taskReadyQ);
nengel@33 447 freePrivQ(semanticEnv->deferredSubmitsQ);
nengel@33 448 freeHashTable(semanticEnv->argPtrHashTbl);
nengel@33 449 freeHashTable(semanticEnv->commHashTbl);
nengel@39 450 freeHashTable(semanticEnv->criticalHashTbl);
nengel@33 451 VMS_int__free(semanticEnv->coreIsDone);
nengel@33 452 VMS_int__free(_VMSMasterEnv->semanticEnv);
nengel@33 453
nengel@33 454 VMS_SS__cleanup_at_end_of_shutdown();
nengel@33 455 }
seanhalle@0 456
seanhalle@0 457
seanhalle@0 458 //===========================================================================
seanhalle@0 459
seanhalle@2 460 SlaveVP *
seanhalle@7 461 VSs__create_thread( TopLevelFnPtr fnPtr, void *initData,
seanhalle@7 462 SlaveVP *creatingThd )
seanhalle@2 463 { VSsSemReq reqData;
seanhalle@0 464
seanhalle@0 465 //the semantic request data is on the stack and disappears when this
seanhalle@0 466 // call returns -- it's guaranteed to remain in the VP's stack for as
seanhalle@0 467 // long as the VP is suspended.
seanhalle@0 468 reqData.reqType = 0; //know type because in a VMS create req
seanhalle@0 469 reqData.fnPtr = fnPtr;
seanhalle@0 470 reqData.initData = initData;
seanhalle@7 471 reqData.callingSlv = creatingThd;
seanhalle@0 472
seanhalle@7 473 VMS_WL__send_create_slaveVP_req( &reqData, creatingThd );
seanhalle@0 474
seanhalle@7 475 return creatingThd->dataRetFromReq;
seanhalle@0 476 }
seanhalle@0 477
seanhalle@10 478 /*This is always the last thing done in the code animated by a thread VP.
seanhalle@7 479 * Normally, this would be the last line of the thread's top level function.
seanhalle@7 480 * But, if the thread exits from any point, it has to do so by calling
seanhalle@7 481 * this.
seanhalle@10 482 *
seanhalle@10 483 *It simply sends a dissipate request, which handles all the state cleanup.
seanhalle@7 484 */
seanhalle@2 485 void
nengel@37 486 VSs__end_thread()
nengel@20 487 {
seanhalle@8 488
nengel@37 489 VMS_WL__send_dissipate_req( currVP );
seanhalle@0 490 }
seanhalle@0 491
nengel@38 492 void VSs__run_thread(TopLevelFnPtr fnPtr, void *initData){
nengel@38 493 (*fnPtr)(initData);
nengel@38 494 VSs__end_thread();
nengel@38 495 }
seanhalle@10 496
seanhalle@0 497 //===========================================================================
seanhalle@0 498
seanhalle@0 499
seanhalle@4 500 //======================= task submit and end ==============================
nengel@39 501
seanhalle@4 502 /*
seanhalle@2 503 */
nengel@39 504 void VSs__submit_task(VSsTaskType *taskType, void *args, void* deps) {
nengel@39 505 VSsSemReq reqData;
seanhalle@0 506
nengel@39 507 reqData.reqType = submit_task;
nengel@39 508
nengel@39 509 reqData.taskType = taskType;
nengel@39 510 reqData.args = args;
nengel@39 511 reqData.deps = deps;
nengel@39 512 reqData.callingSlv = currVP;
nengel@39 513
nengel@39 514 reqData.taskID = NULL;
nengel@39 515
nengel@39 516 VMS_WL__send_sem_request(&reqData, currVP);
nengel@39 517 }
seanhalle@0 518
nengel@20 519 int32 *
nengel@37 520 VSs__create_taskID_of_size( int32 numInts)
seanhalle@4 521 { int32 *taskID;
seanhalle@4 522
seanhalle@4 523 taskID = VMS_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
seanhalle@4 524 taskID[0] = numInts;
seanhalle@4 525 return taskID;
nengel@39 526 }
seanhalle@4 527
nengel@39 528 void VSs__submit_task_with_ID(VSsTaskType *taskType, void *args, void* deps, int32 *taskID) {
nengel@39 529 VSsSemReq reqData;
seanhalle@4 530
nengel@39 531 reqData.reqType = submit_task;
nengel@39 532
nengel@39 533 reqData.taskType = taskType;
nengel@39 534 reqData.args = args;
nengel@39 535 reqData.deps = deps;
nengel@39 536 reqData.taskID = taskID;
nengel@39 537 reqData.callingSlv = currVP;
nengel@39 538
nengel@39 539 VMS_WL__send_sem_request(&reqData, currVP);
nengel@39 540 }
seanhalle@4 541
seanhalle@4 542
seanhalle@4 543 /*This call is the last to happen in every task. It causes the slave to
seanhalle@2 544 * suspend and get the next task out of the task-queue. Notice there is no
seanhalle@2 545 * assigner here.. only one slave, no slave ReadyQ, and so on..
seanhalle@2 546 *Can either make the assigner take the next task out of the taskQ, or can
seanhalle@2 547 * leave all as it is, and make task-end take the next task.
seanhalle@2 548 *Note: this fits the case in the new VMS for no-context tasks, so will use
seanhalle@2 549 * the built-in taskQ of new VMS, and should be local and much faster.
seanhalle@2 550 *
seanhalle@2 551 *The task-stub is saved in the animSlv, so the request handler will get it
seanhalle@2 552 * from there, along with the task-type which has arg types, and so on..
seanhalle@4 553 *
seanhalle@4 554 * NOTE: if want, don't need to send the animating SlaveVP around..
seanhalle@4 555 * instead, can make a single slave per core, and coreCtrlr looks up the
seanhalle@4 556 * slave from having the core number.
seanhalle@4 557 *
seanhalle@4 558 *But, to stay compatible with all the other VMS languages, leave it in..
seanhalle@0 559 */
seanhalle@0 560 void
nengel@37 561 VSs__end_task()
seanhalle@2 562 { VSsSemReq reqData;
seanhalle@0 563
seanhalle@2 564 reqData.reqType = end_task;
nengel@37 565 reqData.callingSlv = currVP;
seanhalle@2 566
nengel@37 567 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@0 568 }
seanhalle@0 569
nengel@38 570 void VSs__run_task(TopLevelFnPtr fnPtr, void *initData){
nengel@38 571 (*fnPtr)(initData);
nengel@38 572 VSs__end_task();
nengel@38 573 }
seanhalle@4 574
nengel@5 575 void
nengel@37 576 VSs__taskwait()
nengel@5 577 {
nengel@5 578 VSsSemReq reqData;
nengel@5 579
nengel@5 580 reqData.reqType = taskwait;
nengel@37 581 reqData.callingSlv = currVP;
nengel@5 582
nengel@37 583 VMS_WL__send_sem_request( &reqData, currVP );
nengel@5 584 }
nengel@5 585
nengel@21 586 void
nengel@37 587 VSs__taskwait_on(void* ptr){
nengel@21 588 VSsSemReq reqData;
nengel@5 589
nengel@21 590 reqData.reqType = taskwait_on;
nengel@37 591 reqData.callingSlv = currVP;
nengel@21 592
nengel@21 593 reqData.args = ptr;
nengel@21 594
nengel@37 595 VMS_WL__send_sem_request( &reqData, currVP );
nengel@21 596 }
nengel@21 597
nengel@21 598 void
nengel@39 599 VSs__start_critical(void* lock){
nengel@21 600 VSsSemReq reqData;
nengel@21 601
nengel@21 602 reqData.reqType = critical_start;
nengel@37 603 reqData.callingSlv = currVP;
nengel@21 604
nengel@39 605 reqData.criticalID = lock;
nengel@21 606
nengel@37 607 VMS_WL__send_sem_request( &reqData, currVP );
nengel@21 608 }
nengel@21 609
nengel@21 610 void
nengel@39 611 VSs__end_critical(void* lock){
nengel@21 612 VSsSemReq reqData;
nengel@21 613
nengel@21 614 reqData.reqType = critical_end;
nengel@37 615 reqData.callingSlv = currVP;
nengel@21 616
nengel@39 617 reqData.criticalID = lock;
nengel@21 618
nengel@37 619 VMS_WL__send_sem_request( &reqData, currVP );
nengel@21 620 }
nengel@5 621
seanhalle@4 622 //========================== send and receive ============================
seanhalle@4 623 //
seanhalle@4 624
nengel@20 625 int32 *
nengel@37 626 VSs__give_self_taskID()
seanhalle@4 627 {
nengel@37 628 return ((VSsSemData*)currVP->semanticData)->taskStub->taskID;
seanhalle@4 629 }
seanhalle@4 630
seanhalle@4 631 //================================ send ===================================
seanhalle@4 632
seanhalle@4 633 void
nengel@37 634 VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID)
seanhalle@4 635 { VSsSemReq reqData;
seanhalle@4 636
seanhalle@4 637 reqData.reqType = send_type_to;
seanhalle@4 638
seanhalle@4 639 reqData.msg = msg;
seanhalle@4 640 reqData.msgType = type;
seanhalle@4 641 reqData.receiverID = receiverID;
nengel@37 642 reqData.senderSlv = currVP;
seanhalle@4 643
seanhalle@4 644 reqData.nextReqInHashEntry = NULL;
seanhalle@4 645
nengel@37 646 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@4 647
seanhalle@4 648 //When come back from suspend, no longer own data reachable from msg
seanhalle@4 649 }
seanhalle@4 650
seanhalle@4 651 void
nengel@37 652 VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID)
seanhalle@4 653 { VSsSemReq reqData;
seanhalle@4 654
seanhalle@4 655 reqData.reqType = send_from_to;
seanhalle@4 656
seanhalle@4 657 reqData.msg = msg;
seanhalle@4 658 reqData.senderID = senderID;
seanhalle@4 659 reqData.receiverID = receiverID;
nengel@37 660 reqData.senderSlv = currVP;
seanhalle@4 661
seanhalle@4 662 reqData.nextReqInHashEntry = NULL;
seanhalle@4 663
nengel@37 664 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@4 665 }
seanhalle@4 666
seanhalle@4 667
seanhalle@4 668 //================================ receive ================================
seanhalle@4 669
seanhalle@4 670 /*The "type" version of send and receive creates a many-to-one relationship.
seanhalle@4 671 * The sender is anonymous, and many sends can stack up, waiting to be
seanhalle@4 672 * received. The same receiver can also have send from-to's
seanhalle@4 673 * waiting for it, and those will be kept separate from the "type"
seanhalle@4 674 * messages.
seanhalle@4 675 */
seanhalle@4 676 void *
nengel@37 677 VSs__receive_type_to( const int32 type, int32* receiverID )
seanhalle@4 678 { DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
seanhalle@4 679 VSsSemReq reqData;
seanhalle@4 680
seanhalle@4 681 reqData.reqType = receive_type_to;
seanhalle@4 682
seanhalle@4 683 reqData.msgType = type;
seanhalle@4 684 reqData.receiverID = receiverID;
nengel@37 685 reqData.receiverSlv = currVP;
seanhalle@4 686
seanhalle@4 687 reqData.nextReqInHashEntry = NULL;
seanhalle@4 688
nengel@37 689 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@4 690
nengel@37 691 return currVP->dataRetFromReq;
seanhalle@4 692 }
seanhalle@4 693
seanhalle@4 694
seanhalle@4 695
seanhalle@4 696 /*Call this at the point a receiving task wants in-coming data.
seanhalle@4 697 * Use this from-to form when know senderID -- it makes a direct channel
seanhalle@4 698 * between sender and receiver.
seanhalle@4 699 */
seanhalle@4 700 void *
nengel@37 701 VSs__receive_from_to( int32 *senderID, int32 *receiverID)
seanhalle@4 702 {
seanhalle@4 703 VSsSemReq reqData;
seanhalle@4 704
seanhalle@4 705 reqData.reqType = receive_from_to;
seanhalle@4 706
seanhalle@4 707 reqData.senderID = senderID;
seanhalle@4 708 reqData.receiverID = receiverID;
nengel@37 709 reqData.receiverSlv = currVP;
seanhalle@4 710
seanhalle@4 711 reqData.nextReqInHashEntry = NULL;
seanhalle@4 712 DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
seanhalle@4 713
nengel@37 714 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@4 715
nengel@37 716 return currVP->dataRetFromReq;
seanhalle@4 717 }
seanhalle@4 718
seanhalle@4 719
seanhalle@4 720
seanhalle@4 721
seanhalle@2 722 //==========================================================================
seanhalle@0 723 //
seanhalle@0 724 /*A function singleton is a function whose body executes exactly once, on a
seanhalle@0 725 * single core, no matter how many times the fuction is called and no
seanhalle@0 726 * matter how many cores or the timing of cores calling it.
seanhalle@0 727 *
seanhalle@0 728 *A data singleton is a ticket attached to data. That ticket can be used
seanhalle@0 729 * to get the data through the function exactly once, no matter how many
seanhalle@0 730 * times the data is given to the function, and no matter the timing of
seanhalle@0 731 * trying to get the data through from different cores.
seanhalle@0 732 */
seanhalle@0 733
seanhalle@0 734 /*asm function declarations*/
seanhalle@2 735 void asm_save_ret_to_singleton(VSsSingleton *singletonPtrAddr);
seanhalle@2 736 void asm_write_ret_from_singleton(VSsSingleton *singletonPtrAddr);
seanhalle@0 737
seanhalle@0 738 /*Fn singleton uses ID as index into array of singleton structs held in the
seanhalle@0 739 * semantic environment.
seanhalle@0 740 */
seanhalle@0 741 void
nengel@37 742 VSs__start_fn_singleton( int32 singletonID)
seanhalle@0 743 {
seanhalle@2 744 VSsSemReq reqData;
seanhalle@0 745
seanhalle@0 746 //
seanhalle@0 747 reqData.reqType = singleton_fn_start;
seanhalle@0 748 reqData.singletonID = singletonID;
seanhalle@0 749
nengel@37 750 VMS_WL__send_sem_request( &reqData, currVP );
nengel@37 751 if( currVP->dataRetFromReq ) //will be 0 or addr of label in end singleton
seanhalle@0 752 {
nengel@37 753 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( currVP );
seanhalle@0 754 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
seanhalle@0 755 }
seanhalle@0 756 }
seanhalle@0 757
seanhalle@0 758 /*Data singleton hands addr of loc holding a pointer to a singleton struct.
seanhalle@0 759 * The start_data_singleton makes the structure and puts its addr into the
seanhalle@0 760 * location.
seanhalle@0 761 */
seanhalle@0 762 void
nengel@37 763 VSs__start_data_singleton( VSsSingleton **singletonAddr )
seanhalle@0 764 {
seanhalle@2 765 VSsSemReq reqData;
seanhalle@0 766
seanhalle@0 767 if( *singletonAddr && (*singletonAddr)->hasFinished )
seanhalle@0 768 goto JmpToEndSingleton;
seanhalle@0 769
seanhalle@0 770 reqData.reqType = singleton_data_start;
seanhalle@0 771 reqData.singletonPtrAddr = singletonAddr;
seanhalle@0 772
nengel@37 773 VMS_WL__send_sem_request( &reqData, currVP );
nengel@37 774 if( currVP->dataRetFromReq ) //either 0 or end singleton's return addr
seanhalle@0 775 { //Assembly code changes the return addr on the stack to the one
seanhalle@0 776 // saved into the singleton by the end-singleton-fn
seanhalle@0 777 //The return addr is at 0x4(%%ebp)
seanhalle@0 778 JmpToEndSingleton:
seanhalle@0 779 asm_write_ret_from_singleton(*singletonAddr);
seanhalle@0 780 }
seanhalle@0 781 //now, simply return
seanhalle@0 782 //will exit either from the start singleton call or the end-singleton call
seanhalle@0 783 }
seanhalle@0 784
seanhalle@0 785 /*Uses ID as index into array of flags. If flag already set, resumes from
seanhalle@0 786 * end-label. Else, sets flag and resumes normally.
seanhalle@0 787 *
seanhalle@0 788 *Note, this call cannot be inlined because the instr addr at the label
seanhalle@0 789 * inside is shared by all invocations of a given singleton ID.
seanhalle@0 790 */
seanhalle@0 791 void
nengel@37 792 VSs__end_fn_singleton( int32 singletonID )
seanhalle@0 793 {
seanhalle@2 794 VSsSemReq reqData;
seanhalle@0 795
seanhalle@0 796 //don't need this addr until after at least one singleton has reached
seanhalle@0 797 // this function
nengel@37 798 VSsSemEnv *semEnv = VMS_int__give_sem_env_for( currVP );
seanhalle@0 799 asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
seanhalle@0 800
seanhalle@0 801 reqData.reqType = singleton_fn_end;
seanhalle@0 802 reqData.singletonID = singletonID;
seanhalle@0 803
nengel@37 804 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@0 805
nengel@20 806 //EndSingletonInstrAddr:
seanhalle@0 807 return;
seanhalle@0 808 }
seanhalle@0 809
seanhalle@0 810 void
nengel@37 811 VSs__end_data_singleton( VSsSingleton **singletonPtrAddr )
seanhalle@0 812 {
seanhalle@2 813 VSsSemReq reqData;
seanhalle@0 814
seanhalle@0 815 //don't need this addr until after singleton struct has reached
seanhalle@0 816 // this function for first time
seanhalle@0 817 //do assembly that saves the return addr of this fn call into the
seanhalle@0 818 // data singleton -- that data-singleton can only be given to exactly
seanhalle@0 819 // one instance in the code of this function. However, can use this
seanhalle@0 820 // function in different places for different data-singletons.
seanhalle@0 821 // (*(singletonAddr))->endInstrAddr = &&EndDataSingletonInstrAddr;
seanhalle@0 822
seanhalle@0 823
seanhalle@0 824 asm_save_ret_to_singleton(*singletonPtrAddr);
seanhalle@0 825
seanhalle@0 826 reqData.reqType = singleton_data_end;
seanhalle@0 827 reqData.singletonPtrAddr = singletonPtrAddr;
seanhalle@0 828
nengel@37 829 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@0 830 }
seanhalle@0 831
seanhalle@0 832 /*This executes the function in the masterVP, so it executes in isolation
seanhalle@0 833 * from any other copies -- only one copy of the function can ever execute
seanhalle@0 834 * at a time.
seanhalle@0 835 *
seanhalle@0 836 *It suspends to the master, and the request handler takes the function
seanhalle@0 837 * pointer out of the request and calls it, then resumes the VP.
seanhalle@0 838 *Only very short functions should be called this way -- for longer-running
seanhalle@0 839 * isolation, use transaction-start and transaction-end, which run the code
seanhalle@0 840 * between as work-code.
seanhalle@0 841 */
seanhalle@0 842 void
seanhalle@2 843 VSs__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
nengel@37 844 void *data )
seanhalle@0 845 {
seanhalle@2 846 VSsSemReq reqData;
seanhalle@0 847
seanhalle@0 848 //
seanhalle@0 849 reqData.reqType = atomic;
seanhalle@0 850 reqData.fnToExecInMaster = ptrToFnToExecInMaster;
seanhalle@0 851 reqData.dataForFn = data;
seanhalle@0 852
nengel@37 853 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@0 854 }
seanhalle@0 855
seanhalle@0 856
seanhalle@0 857 /*This suspends to the master.
seanhalle@0 858 *First, it looks at the VP's data, to see the highest transactionID that VP
seanhalle@0 859 * already has entered. If the current ID is not larger, it throws an
seanhalle@0 860 * exception stating a bug in the code. Otherwise it puts the current ID
seanhalle@0 861 * there, and adds the ID to a linked list of IDs entered -- the list is
seanhalle@0 862 * used to check that exits are properly ordered.
seanhalle@0 863 *Next it is uses transactionID as index into an array of transaction
seanhalle@0 864 * structures.
seanhalle@0 865 *If the "VP_currently_executing" field is non-null, then put requesting VP
seanhalle@0 866 * into queue in the struct. (At some point a holder will request
seanhalle@0 867 * end-transaction, which will take this VP from the queue and resume it.)
seanhalle@0 868 *If NULL, then write requesting into the field and resume.
seanhalle@0 869 */
seanhalle@0 870 void
nengel@37 871 VSs__start_transaction( int32 transactionID )
seanhalle@0 872 {
seanhalle@2 873 VSsSemReq reqData;
seanhalle@0 874
seanhalle@0 875 //
nengel@37 876 reqData.callingSlv = currVP;
seanhalle@0 877 reqData.reqType = trans_start;
seanhalle@0 878 reqData.transID = transactionID;
seanhalle@0 879
nengel@37 880 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@0 881 }
seanhalle@0 882
seanhalle@0 883 /*This suspends to the master, then uses transactionID as index into an
seanhalle@0 884 * array of transaction structures.
seanhalle@0 885 *It looks at VP_currently_executing to be sure it's same as requesting VP.
seanhalle@0 886 * If different, throws an exception, stating there's a bug in the code.
seanhalle@0 887 *Next it looks at the queue in the structure.
seanhalle@0 888 *If it's empty, it sets VP_currently_executing field to NULL and resumes.
seanhalle@0 889 *If something in, gets it, sets VP_currently_executing to that VP, then
seanhalle@0 890 * resumes both.
seanhalle@0 891 */
seanhalle@0 892 void
nengel@37 893 VSs__end_transaction( int32 transactionID )
seanhalle@0 894 {
seanhalle@2 895 VSsSemReq reqData;
seanhalle@0 896
seanhalle@0 897 //
nengel@37 898 reqData.callingSlv = currVP;
seanhalle@0 899 reqData.reqType = trans_end;
seanhalle@0 900 reqData.transID = transactionID;
seanhalle@0 901
nengel@37 902 VMS_WL__send_sem_request( &reqData, currVP );
seanhalle@0 903 }
seanhalle@7 904
seanhalle@7 905 //======================== Internal ==================================
seanhalle@7 906 /*
seanhalle@7 907 */
seanhalle@7 908 SlaveVP *
seanhalle@7 909 VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
seanhalle@7 910 SlaveVP *creatingSlv )
seanhalle@7 911 { VSsSemReq reqData;
seanhalle@7 912
seanhalle@7 913 //the semantic request data is on the stack and disappears when this
seanhalle@7 914 // call returns -- it's guaranteed to remain in the VP's stack for as
seanhalle@7 915 // long as the VP is suspended.
seanhalle@7 916 reqData.reqType = 0; //know type because in a VMS create req
seanhalle@7 917 reqData.coreToAssignOnto = -1; //means round-robin assign
seanhalle@7 918 reqData.fnPtr = fnPtr;
seanhalle@7 919 reqData.initData = initData;
seanhalle@7 920 reqData.callingSlv = creatingSlv;
seanhalle@7 921
seanhalle@7 922 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
seanhalle@7 923
seanhalle@7 924 return creatingSlv->dataRetFromReq;
seanhalle@7 925 }
seanhalle@7 926
seanhalle@7 927 SlaveVP *
seanhalle@7 928 VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr, void *initData,
seanhalle@7 929 SlaveVP *creatingSlv, int32 coreToAssignOnto )
seanhalle@7 930 { VSsSemReq reqData;
seanhalle@7 931
seanhalle@7 932 //the semantic request data is on the stack and disappears when this
seanhalle@7 933 // call returns -- it's guaranteed to remain in the VP's stack for as
seanhalle@7 934 // long as the VP is suspended.
seanhalle@7 935 reqData.reqType = create_slave_w_aff; //not used, May 2012
seanhalle@7 936 reqData.coreToAssignOnto = coreToAssignOnto;
seanhalle@7 937 reqData.fnPtr = fnPtr;
seanhalle@7 938 reqData.initData = initData;
seanhalle@7 939 reqData.callingSlv = creatingSlv;
seanhalle@7 940
seanhalle@7 941 VMS_WL__send_create_slaveVP_req( &reqData, creatingSlv );
seanhalle@7 942
seanhalle@7 943 return creatingSlv->dataRetFromReq;
seanhalle@7 944 }
seanhalle@7 945
nengel@37 946 int __main_ret;
nengel@37 947
nengel@37 948 void __entry_point(void* _args) {
nengel@37 949 __main_args* args = (__main_args*) _args;
nengel@37 950 __main_ret = __program_main(args->argc, args->argv);
nengel@37 951 }
nengel@37 952
nengel@37 953 #undef main
nengel@37 954
nengel@37 955 int main(int argc, char** argv) {
nengel@37 956 __main_args args;
nengel@37 957 args.argc = argc;
nengel@37 958 args.argv = argv;
nengel@37 959 VSs__create_seed_slave_and_do_work(__entry_point, (void*) &args);
nengel@37 960 return __main_ret;
nengel@37 961 }