# HG changeset patch # User Nina Engelhardt # Date 1362576753 -3600 # Node ID 227db52cbd9395826b2ccc8ce1c7e351b07dd616 # Parent 3787df8b95f92691a86520c344bb94c3eabe67d9 VSs working diff -r 3787df8b95f9 -r 227db52cbd93 VSs.c --- a/VSs.c Fri Feb 01 17:18:57 2013 +0100 +++ b/VSs.c Wed Mar 06 14:32:33 2013 +0100 @@ -209,7 +209,7 @@ VSs__init_counter_data_structs(); #endif - semanticEnv->shutdownInitiated = FALSE; + //semanticEnv->shutdownInitiated = FALSE; semanticEnv->coreIsDone = VMS_int__malloc( NUM_CORES * sizeof( bool32 ) ); semanticEnv->numCoresDone = 0; //For each animation slot, there is an idle slave, and an initial @@ -245,7 +245,7 @@ semanticEnv->freeExtraTaskSlvQ = makeVMSQ(); semanticEnv->taskReadyQ = makeVMSQ(); - semanticEnv->argPtrHashTbl = makeHashTable32( 16, &VMS_int__free ); + semanticEnv->argPtrHashTbl = makeHashTable32( 20, &free_pointer_entry ); semanticEnv->commHashTbl = makeHashTable32( 16, &VMS_int__free ); semanticEnv->nextCoreToGetNewSlv = 0; @@ -424,33 +424,39 @@ #endif /* It's all allocated inside VMS's big chunk -- that's about to be freed, so * nothing to do here */ -/* + //_VMSMasterEnv->shutdownInitiated = TRUE; int coreIdx, slotIdx; SlaveVP* slotSlv; for (coreIdx = 0; coreIdx < NUM_CORES; coreIdx++) { for (slotIdx = 0; slotIdx < NUM_ANIM_SLOTS; slotIdx++) { slotSlv = semanticEnv->slotTaskSlvs[coreIdx][slotIdx]; VMS_int__free(slotSlv->semanticData); - VMS_int__free( slotSlv->startOfStack ); - VMS_int__free( slotSlv ); + VMS_int__dissipate_slaveVP(slotSlv); #ifdef IDLE_SLAVES slotSlv = semanticEnv->idleSlv[coreIdx][slotIdx]; VMS_int__free(slotSlv->semanticData); - VMS_int__free( slotSlv->startOfStack ); - VMS_int__free( slotSlv ); + VMS_int__dissipate_slaveVP(slotSlv); #endif } } + int i; + for (i = 0; i < NUM_STRUCS_IN_SEM_ENV; i++) { + freePrivQ(semanticEnv->fnSingletons[i].waitQ); + freePrivQ(semanticEnv->transactionStrucs[i].waitingVPQ); + freePrivQ(semanticEnv->criticalSection[i].waitQ); + } freePrivQ(semanticEnv->freeExtraTaskSlvQ); freePrivQ(semanticEnv->slavesReadyToResumeQ); freePrivQ(semanticEnv->taskReadyQ); - freeHashTable( semanticEnv->argPtrHashTbl ); - freeHashTable( semanticEnv->commHashTbl ); - VMS_int__free( _VMSMasterEnv->semanticEnv ); - */ - VMS_SS__cleanup_at_end_of_shutdown(); - } + freePrivQ(semanticEnv->deferredSubmitsQ); + freeHashTable(semanticEnv->argPtrHashTbl); + freeHashTable(semanticEnv->commHashTbl); + VMS_int__free(semanticEnv->coreIsDone); + VMS_int__free(_VMSMasterEnv->semanticEnv); + + VMS_SS__cleanup_at_end_of_shutdown(); +} //=========================================================================== diff -r 3787df8b95f9 -r 227db52cbd93 VSs.h --- a/VSs.h Fri Feb 01 17:18:57 2013 +0100 +++ b/VSs.h Wed Mar 06 14:32:33 2013 +0100 @@ -14,6 +14,7 @@ #include "VMS_impl/VMS.h" #include "Measurement/dependency.h" +void free_pointer_entry(void* ptrEntry); /* Switch for Nexus support * Note: nexus incompatible with holistic recording (constraints not accessible) * But counter recording still functional, can build constraintless display @@ -78,25 +79,25 @@ } VSsPointerEntry; -typedef struct - { - void **args; //ctld args must come first, as ptrs - VSsTaskType *taskType; - int32 *taskID; - int32 numBlockingProp; - SlaveVP *slaveAssignedTo; //only valid before end task (thread) - VSsPointerEntry **ptrEntries; - void* parentTaskStub; - int32 numLiveChildTasks; - int32 numLiveChildThreads; - bool32 isWaitingForChildTasksToEnd; - bool32 isWaitingForChildThreadsToEnd; - bool32 isEnded; - #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC - Unit parentUnit; - Unit firstOfTask; - #endif - } +typedef struct { + void **args; //ctld args must come first, as ptrs + VSsTaskType *taskType; + int32 *taskID; + int32 numBlockingProp; + SlaveVP *slaveAssignedTo; //only valid before end task (thread) + VSsPointerEntry **ptrEntries; + void* parentTaskStub; + int32 numLiveChildTasks; + int32 numLiveChildThreads; + bool32 isWaitingForChildTasksToEnd; + bool32 isWaitingForChildThreadsToEnd; + bool32 isEnded; + int *argsMask; +#ifdef HOLISTIC__TURN_ON_OBSERVE_UCC + Unit parentUnit; + Unit firstOfTask; +#endif +} VSsTaskStub; @@ -253,7 +254,7 @@ #ifdef IDLE_SLAVES SlaveVP* idleSlv[NUM_CORES][NUM_ANIM_SLOTS]; #endif - int shutdownInitiated; + //int shutdownInitiated; } VSsSemEnv; diff -r 3787df8b95f9 -r 227db52cbd93 VSs_PluginFns.c --- a/VSs_PluginFns.c Fri Feb 01 17:18:57 2013 +0100 +++ b/VSs_PluginFns.c Wed Mar 06 14:32:33 2013 +0100 @@ -66,7 +66,7 @@ slotNum = slot->slotIdx; semEnv = (VSsSemEnv *) _semEnv; - + //Check for suspended slaves that are ready to resume returnSlv = readPrivQ(semEnv->slavesReadyToResumeQ); if (returnSlv != NULL) //Yes, have a slave, so return it. @@ -130,22 +130,22 @@ semEnv->numCoresDone += 1; semEnv->coreIsDone[coreNum] = TRUE; #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE - semEnv->shutdownInitiated = TRUE; + _VMSMasterEnv->shutdownInitiated = TRUE; #else if (semEnv->numCoresDone == NUM_CORES) { //means no cores have work, and none can generate more - semEnv->shutdownInitiated = TRUE; + _VMSMasterEnv->shutdownInitiated = TRUE; } #endif } } else if (_VMSMasterEnv->numAnimatedSlaves == 0){ DEBUG__printf(TRUE,"Deadlock detected"); - semEnv->shutdownInitiated = TRUE; + _VMSMasterEnv->shutdownInitiated = TRUE; } //return NULL.. no task and none to resume returnSlv = NULL; //except if shutdown has been initiated by this or other core - if (semEnv->shutdownInitiated) { + if (_VMSMasterEnv->shutdownInitiated) { returnSlv = VMS_SS__create_shutdown_slave(); } goto ReturnTheSlv; //don't need, but completes pattern @@ -155,12 +155,14 @@ ReturnTheSlv: //Nina, doing gotos to here should help with holistic.. - +if(returnSlv){ +DEBUG__printf_w_task(dbgRqstHdlr,((VSsSemData*)returnSlv->semanticData)->taskStub,"scheduled"); +} #ifdef IDLE_SLAVES if (!returnSlv) { returnSlv = semEnv->idlePr[coreNum][slotNum]; - if (semEnv->shutdownInitiated) { + if (_VMSMasterEnv->shutdownInitiated) { returnSlv = VMS_SS__create_shutdown_slave(); } } @@ -283,7 +285,7 @@ break; case trans_end: handleTransEnd(semReq, reqSlv, semEnv); break; - default: VMS_PI__throw_exception("Unknown request type", reqSlv, NULL); + default: VMS_PI__throw_exception("Unknown request type\n", reqSlv, NULL); break; } } @@ -411,6 +413,7 @@ newStub->slaveAssignedTo = NULL; //set later newStub->taskType = IS_A_THREAD; newStub->ptrEntries = NULL; + newStub->argsMask = NULL; newStub->args = initData; newStub->numLiveChildTasks = 0; newStub->numLiveChildThreads = 0; @@ -419,7 +422,7 @@ newStub->isWaitingForChildThreadsToEnd = FALSE; newStub->taskID = NULL; newStub->isEnded = FALSE; - + return newStub; } diff -r 3787df8b95f9 -r 227db52cbd93 VSs_Request_Handlers.c --- a/VSs_Request_Handlers.c Fri Feb 01 17:18:57 2013 +0100 +++ b/VSs_Request_Handlers.c Wed Mar 06 14:32:33 2013 +0100 @@ -81,7 +81,12 @@ return newEntry; } -void free_pointer_entry(VSsPointerEntry* ptrEntry){ +void free_pointer_entry(void* _ptrEntry) { + VSsPointerEntry* ptrEntry = (VSsPointerEntry*)_ptrEntry; + int entriesStillInQ = ptrEntry->waitersQ->numWrites - ptrEntry->waitersQ->numReads; + if (entriesStillInQ) { + DEBUG__printf(dbgRqstHdlr, "Deleting Queue with %d entries still remaining", entriesStillInQ); + } freePrivQ(ptrEntry->waitersQ); VMS_int__free(ptrEntry); } @@ -108,6 +113,8 @@ newStub->isEnded = FALSE; newStub->taskID = NULL; newStub->parentTaskStub = NULL; + newStub->argsMask = VMS_int__malloc(sizeof(int) * taskType->numCtldArgs); + memset(newStub->argsMask, 0, sizeof(int) * taskType->numCtldArgs); //Copy the arg-pointers.. can be more arguments than just the ones // that StarSs uses to control ordering of task execution. memcpy(newArgs, args, taskType->sizeOfArgs); @@ -128,6 +135,37 @@ return newCarrier; } + //check for identical pointers in args -- mask all but one copy + //if one of pointers is writer, non-masked arg must be writer +int mask_duplicates(VSsTaskStub *taskStub){ + int argNum, i; + int numUniqueArgs = 0; + VSsTaskType* taskType = taskStub->taskType; + void **args = taskStub->args; + + for(argNum = 0; argNum < taskType->numCtldArgs; argNum++){ + //if already masked, don't need to check again + if(taskStub->argsMask[argNum]) continue; + + int unmasked = argNum; + for(i=argNum+1; inumCtldArgs; i++){ + if(args[argNum] == args[i]){ + if(taskType->argTypes[i] == WRITER){ + taskStub->argsMask[unmasked] = TRUE; + unmasked = i; + } else { + taskStub->argsMask[i] = TRUE; + } + } + } + } + + for(i=0; inumCtldArgs; i++){ + if(!taskStub->argsMask[i]) numUniqueArgs++; + } + return numUniqueArgs; +} + void do_submit(VSsSemReq *semReq, VSsSemEnv *semEnv){ uint32 key[5]; HashEntry *rawHashEntry; //has char *, but use with uint32 * @@ -140,6 +178,8 @@ HashTable * argPtrHashTbl = semEnv->argPtrHashTbl; + int32 argNum; + if(!semReq) { DEBUG__printf(dbgRqstHdlr,"***submitted Req is null***\n") return;} @@ -156,7 +196,11 @@ args = semReq->args; taskType = semReq->taskType; taskStub = create_task_stub(taskType, args); //copies arg ptrs - taskStub->numBlockingProp = taskType->numCtldArgs; + //check for identical pointers in args -- mask all but one copy + //if one of pointers is writer, non-masked arg must be writer + int numUniqueArgs = mask_duplicates(taskStub); + + taskStub->numBlockingProp = numUniqueArgs; taskStub->taskID = semReq->taskID; //may be NULL VSsSemData* @@ -178,8 +222,11 @@ *Processing an argument means getting the hash of the pointer. Then, * looking up the hash entry. (If none, create one). */ - int32 argNum; + for (argNum = 0; argNum < taskType->numCtldArgs; argNum++) { + //only process unmasked args + if(taskStub->argsMask[argNum]) continue; + key[0] = 4; //two 32b values in key *((uint64*) & key[1]) = (uint64) args[argNum]; //write 64b into two 32b *((uint64*) & key[3]) = (uint64) taskStub->parentTaskStub ; @@ -210,15 +257,15 @@ * task-stub into the readyQ. At the same time, increment * the hash-entry's count of enabled and non-finished readers.*/ taskStub->numBlockingProp -= 1; - DEBUG__printf_w_task(dbgRqstHdlr, taskStub, "taking ptrEntry %p (read)", ptrEntry); + DEBUG__printf_w_task(dbgSS, taskStub, "taking ptrEntry %p (read)", ptrEntry); if (taskStub->numBlockingProp == 0) { writePrivQ(taskStub, semEnv->taskReadyQ); - DEBUG__printf_w_task(dbgRqstHdlr, taskStub, "ready (dependencies fulfilled)"); + DEBUG__printf_w_task(dbgSS, taskStub, "ready (dependencies fulfilled)"); } ptrEntry->numEnabledNonDoneReaders += 1; } else { /*Otherwise, the reader is put into the hash-entry's Q of * waiters*/ - DEBUG__printf_w_task(dbgRqstHdlr, taskStub, "getting in line for ptrEntry %p (read)", ptrEntry); + DEBUG__printf_w_task(dbgSS, taskStub, "getting in line for ptrEntry %p (read)", ptrEntry); taskCarrier = create_task_carrier(taskStub, argNum, READER); writePrivQ(taskCarrier, ptrEntry->waitersQ); } @@ -232,14 +279,14 @@ * task-stub. If the count is zero, then put the task-stub * into the readyQ.*/ taskStub->numBlockingProp -= 1; - DEBUG__printf_w_task(dbgRqstHdlr,taskStub,"taking ptrEntry %p (write)",ptrEntry); + DEBUG__printf_w_task(dbgSS,taskStub,"taking ptrEntry %p (write)",ptrEntry); if (taskStub->numBlockingProp == 0) { - DEBUG__printf_w_task(dbgRqstHdlr, taskStub, "ready (dependencies fulfilled)"); + DEBUG__printf_w_task(dbgSS, taskStub, "ready (dependencies fulfilled)"); writePrivQ(taskStub, semEnv->taskReadyQ); } ptrEntry->hasEnabledNonFinishedWriter = TRUE; } else {/*Otherwise, put the writer into the entry's Q of waiters.*/ - DEBUG__printf_w_task(dbgRqstHdlr,taskStub,"getting in line for ptrEntry %p (write)",ptrEntry); + DEBUG__printf_w_task(dbgSS,taskStub,"getting in line for ptrEntry %p (write)",ptrEntry); taskCarrier = create_task_carrier(taskStub, argNum, WRITER); writePrivQ(taskCarrier, ptrEntry->waitersQ); } @@ -360,6 +407,7 @@ //Eventually task_end will put the slave into the freeExtraTaskSlvQ replaceWithNewSlotSlvIfNeeded(semReq->callingSlv, semEnv); + DEBUG__printf_w_task(dbgRqstHdlr, ((VSsSemData*)semReq->callingSlv->semanticData)->taskStub, "submit req for "); #ifdef DEBUG__TURN_ON_DEBUG_PRINT if (dbgRqstHdlr) { if (semReq->taskID) { @@ -374,7 +422,6 @@ } } #endif - DEBUG__printf(dbgRqstHdlr, "submit req from slaveID %d", semReq->callingSlv->slaveID); // Throttle if too many tasks @@ -487,28 +534,16 @@ */ int32 argNum; for (argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++) { - /* commented out 'cause remembering entry ptr when create stub - key[0] = 2; //says are 2 32b values in key - *( (uint64*)&key[1] ) = args[argNum]; //write 64b ptr into two 32b - - /*If the hash entry was chained, put it at the - * start of the chain. (Means no-longer-used pointers accumulate - * at end of chain, decide garbage collection later) - */ - /*NOTE: don't do hash lookups here, instead, have a pointer to the - * hash entry inside task-stub, put there during task creation. - rawHashEntry = getEntryFromTable32( key, ptrHashTbl ); - ptrEntry = (VSsPointerEntry *)rawHashEntry->content; - if( ptrEntry == NULL ) - VMS_App__throw_exception("hash entry NULL", NULL, NULL); - */ + //only process unmasked args + if(endingTaskStub->argsMask[argNum]) continue; + ptrEntry = ptrEntries[argNum]; /*check if the ending task was reader of this arg*/ if (endingTaskType->argTypes[argNum] == READER) { /*then decrement the enabled and non-finished reader-count in * the hash-entry. */ ptrEntry->numEnabledNonDoneReaders -= 1; - DEBUG__printf(dbgRqstHdlr,"Releasing read on ptrEntry %p",ptrEntry) + DEBUG__printf(dbgSS,"Releasing read on ptrEntry %p",ptrEntry) #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC Unit u; u.vp = semReq->callingSlv->slaveID; @@ -526,11 +561,17 @@ /*If the count becomes zero, then take the next entry from the Q. *It should be a writer, or else there's a bug in this algorithm.*/ if (ptrEntry->numEnabledNonDoneReaders == 0) { - DEBUG__printf(dbgRqstHdlr,"ptrEntry %p now free",ptrEntry) + DEBUG__printf(dbgSS,"ptrEntry %p now free",ptrEntry) waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ); if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete the ptr entry at this point - DEBUG__printf(dbgRqstHdlr,"no waiting writer found for ptrEntry %p\n",ptrEntry) - //free_pointer_entry(ptrEntry); +/* + uint32 key[5]; + key[0] = 4; //two 32b values in key + *((uint64*) & key[1]) = (uint64) args[argNum]; //write 64b into two 32b + *((uint64*) & key[3]) = (uint64) endingTaskStub->parentTaskStub; + deleteEntryFromTable32(key, semEnv->argPtrHashTbl); + free_pointer_entry(ptrEntry); +*/ continue; //next iter of loop } if (waitingTaskCarrier->isReader) @@ -545,12 +586,12 @@ * task-stub. If the count has reached zero, then put the * task-stub into the readyQ.*/ waitingTaskStub->numBlockingProp -= 1; - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"taking ptrEntry %p (write)",ptrEntry); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"taking ptrEntry %p (write)",ptrEntry); if (waitingTaskStub->numBlockingProp == 0) { - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"ready (dependencies fulfilled)"); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"ready (dependencies fulfilled)"); writePrivQ(waitingTaskStub, semEnv->taskReadyQ); } else { - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"still blocked on %d args",waitingTaskStub->numBlockingProp); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"still blocked on %d args",waitingTaskStub->numBlockingProp); } } @@ -590,29 +631,34 @@ ptrEntry->lastWriter.task = semReq->callingSlv->assignCount; #endif - DEBUG__printf(dbgRqstHdlr,"Releasing write on ptrEntry %p; ",ptrEntry) + DEBUG__printf(dbgSS,"Releasing write on ptrEntry %p; ",ptrEntry) /*Take the next waiter from the hash-entry's Q.*/ waitingTaskCarrier = readPrivQ(ptrEntry->waitersQ); if (waitingTaskCarrier == NULL) { //TODO: looks safe to delete ptr entry at this point - DEBUG__printf(dbgRqstHdlr,"no waiting task on ptrEntry %p; deleting",ptrEntry); - //free_pointer_entry(ptrEntry); - //NOPE, still tasks around that kept the pointer... +/* + uint32 key[5]; + key[0] = 4; //two 32b values in key + *((uint64*) & key[1]) = (uint64) args[argNum]; //write 64b into two 32b + *((uint64*) & key[3]) = (uint64) endingTaskStub->parentTaskStub; + deleteEntryFromTable32(key, semEnv->argPtrHashTbl); + free_pointer_entry(ptrEntry); +*/ continue; //go to next iter of loop, done here. } waitingTaskStub = waitingTaskCarrier->taskStub; /*If task is a writer of this hash-entry's pointer*/ if (!waitingTaskCarrier->isReader) { /* then turn the flag back on.*/ - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"taking ptrEntry %p (write)",ptrEntry); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"taking ptrEntry %p (write)",ptrEntry); ptrEntry->hasEnabledNonFinishedWriter = TRUE; /*Decrement the writer's blocking-propendent-count in task-stub * If it becomes zero, then put the task-stub into the readyQ.*/ waitingTaskStub->numBlockingProp -= 1; if (waitingTaskStub->numBlockingProp == 0) { - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"ready (dependencies fulfilled)"); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"ready (dependencies fulfilled)"); writePrivQ(waitingTaskStub, semEnv->taskReadyQ); } else { - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"still blocked on %d args; ",waitingTaskStub->numBlockingProp); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"still blocked on %d args; ",waitingTaskStub->numBlockingProp); } VMS_PI__free(waitingTaskCarrier); } else { /*Waiting task is a reader, so do a loop, of all waiting readers @@ -621,12 +667,12 @@ * readers.*/ //deal with tasks suspended by taskwait_on here - these don't count as a dependency but are otherwise treated like readers if(waitingTaskCarrier->isSuspended){ - DEBUG__printf_w_task(dbgRqstHdlr, waitingTaskStub, "taskwaiting on ptr %p resumed; ", ptrEntry); + DEBUG__printf_w_task(dbgSS, waitingTaskStub, "taskwaiting on ptr %p resumed; ", ptrEntry); resume_slaveVP(waitingTaskStub->slaveAssignedTo, semEnv); } else { ptrEntry->numEnabledNonDoneReaders += 1; - DEBUG__printf_w_task(dbgRqstHdlr, waitingTaskStub, "now on ptrEntry %p (read)",ptrEntry); + DEBUG__printf_w_task(dbgSS, waitingTaskStub, "now on ptrEntry %p (read)",ptrEntry); //if(waitingTaskStub->taskID) { DEBUG__printf2(dbgRqstHdlr,"Reader %d now on ptrEntry %p; ",waitingTaskStub->taskID[1],ptrEntry) } //else {DEBUG__printf2(dbgRqstHdlr,"Reader %p now on ptrEntry %p; ",waitingTaskStub,ptrEntry)} /*Decrement the blocking propendents count of the reader's @@ -635,10 +681,10 @@ waitingTaskStub->numBlockingProp -= 1; if (waitingTaskStub->numBlockingProp == 0) { - DEBUG__printf_w_task(dbgRqstHdlr, waitingTaskStub, "ready (dependencies fulfilled)"); + DEBUG__printf_w_task(dbgSS, waitingTaskStub, "ready (dependencies fulfilled)"); writePrivQ(waitingTaskStub, semEnv->taskReadyQ); } else { - DEBUG__printf_w_task(dbgRqstHdlr,waitingTaskStub,"still blocked on %d args",waitingTaskStub->numBlockingProp); + DEBUG__printf_w_task(dbgSS,waitingTaskStub,"still blocked on %d args",waitingTaskStub->numBlockingProp); } } //if-else, suspended or normal reader //discard carrier @@ -684,11 +730,18 @@ } void -free_task_stub(VSsTaskStub *stubToFree) - { if(stubToFree->ptrEntries != NULL ) //a thread stub has NULL entry - { VMS_PI__free( stubToFree->ptrEntries ); +free_task_stub(VSsTaskStub *stubToFree) { + if (stubToFree->ptrEntries != NULL) //a thread stub has NULL entry + { + VMS_PI__free(stubToFree->ptrEntries); } - VMS_PI__free( stubToFree ); + if (stubToFree->argsMask != NULL) { + VMS_PI__free(stubToFree->argsMask); + } + if(stubToFree->taskID != NULL) { //TaskID is handed from user most of the time, not sure if overreaching (but why would you want to keep it?) + VMS_PI__free(stubToFree->taskID); + } + VMS_PI__free(stubToFree); } //========================== Task Comm handlers =========================== @@ -1093,8 +1146,7 @@ semData->slaveType = ExtraTaskSlv; } -void -handleTaskwait(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { +void handleTaskwait(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { VSsTaskStub* requestingTaskStub; VSsSemData* semData; DEBUG__printf1(dbgRqstHdlr, "Taskwait request from processor %d", @@ -1103,14 +1155,12 @@ semData = (VSsSemData *) semReq->callingSlv->semanticData; requestingTaskStub = semData->taskStub; + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); + if (semData->taskStub->numLiveChildTasks == 0) { //nobody to wait for, resume resume_slaveVP(requestingSlv, semEnv); } else //have to wait, replace requester with new slot slv & mark waiting { - if (semData->slaveType == SlotTaskSlv) { - replaceWithNewSlotSlvIfNeeded( requestingSlv, semEnv ); - } - requestingTaskStub->isWaitingForChildTasksToEnd = TRUE; } } @@ -1128,6 +1178,7 @@ semData = (VSsSemData *) semReq->callingSlv->semanticData; requestingTaskStub = semData->taskStub; + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); void* ptr = semReq->args; @@ -1163,16 +1214,18 @@ } } -void handleCriticalStart(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv){ +void handleCriticalStart(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { VSsSemData* semData; int32 criticalID; DEBUG__printf1(dbgRqstHdlr, "CriticalStart request from processor %d", - requestingSlv->slaveID) + requestingSlv->slaveID); + + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); semData = (VSsSemData *) semReq->callingSlv->semanticData; - + criticalID = semReq->criticalID; - if(!semEnv->criticalSection[criticalID].isOccupied){ + if (!semEnv->criticalSection[criticalID].isOccupied) { semEnv->criticalSection[criticalID].isOccupied = TRUE; resume_slaveVP(requestingSlv, semEnv); } else { @@ -1185,7 +1238,9 @@ SlaveVP *waitingSlv; int32 criticalID; DEBUG__printf1(dbgRqstHdlr, "CriticalEnd request from processor %d", - requestingSlv->slaveID) + requestingSlv->slaveID); + + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); semData = (VSsSemData *) semReq->callingSlv->semanticData; @@ -1202,22 +1257,22 @@ /* */ -void -handleMalloc(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { +void handleMalloc(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { void *ptr; - DEBUG__printf1(dbgRqstHdlr, "Malloc request from processor %d", requestingSlv->slaveID) + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); + + DEBUG__printf1(dbgRqstHdlr, "Malloc request from processor %d", requestingSlv->slaveID); ptr = VMS_PI__malloc(semReq->sizeToMalloc); requestingSlv->dataRetFromReq = ptr; resume_slaveVP(requestingSlv, semEnv); } - /* */ -void -handleFree(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { - DEBUG__printf1(dbgRqstHdlr, "Free request from processor %d", requestingSlv->slaveID) +void handleFree(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { + DEBUG__printf1(dbgRqstHdlr, "Free request from processor %d", requestingSlv->slaveID); + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); VMS_PI__free(semReq->ptrToFree); resume_slaveVP(requestingSlv, semEnv); } @@ -1232,6 +1287,7 @@ void handleStartSingleton_helper(VSsSingleton *singleton, SlaveVP *reqstingSlv, VSsSemEnv *semEnv) { + replaceWithNewSlotSlvIfNeeded(reqstingSlv, semEnv); if (singleton->hasFinished) { //the code that sets the flag to true first sets the end instr addr reqstingSlv->dataRetFromReq = singleton->endInstrAddr; #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC @@ -1294,7 +1350,7 @@ // so if this is true, is an error ERROR("singleton code ran twice"); } - + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); singleton->hasFinished = TRUE; #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC singleton->executingUnit.vp = requestingSlv->slaveID; @@ -1347,7 +1403,8 @@ */ void handleAtomic(VSsSemReq *semReq, SlaveVP *requestingSlv, VSsSemEnv *semEnv) { - DEBUG__printf1(dbgRqstHdlr, "Atomic request from processor %d", requestingSlv->slaveID) + DEBUG__printf1(dbgRqstHdlr, "Atomic request from processor %d", requestingSlv->slaveID); + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); semReq->fnToExecInMaster(semReq->dataForFn); resume_slaveVP(requestingSlv, semEnv); } @@ -1372,8 +1429,8 @@ VSsSemData *semData; TransListElem *nextTransElem; - DEBUG__printf1(dbgRqstHdlr, "TransStart request from processor %d", requestingSlv->slaveID) - + DEBUG__printf1(dbgRqstHdlr, "TransStart request from processor %d", requestingSlv->slaveID); + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); //check ordering of entering transactions is correct semData = requestingSlv->semanticData; if (semData->highestTransEntered > semReq->transID) { //throw VMS exception, which shuts down VMS. @@ -1420,7 +1477,7 @@ SlaveVP *waitingSlv; VSsTrans *transStruc; TransListElem *lastTrans; - + replaceWithNewSlotSlvIfNeeded(requestingSlv, semEnv); DEBUG__printf1(dbgRqstHdlr, "TransEnd request from processor %d", requestingSlv->slaveID) transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);