VMS/VMS_Implementations/VSs_impls/VSs__MC_shared_impl

view VSs_PluginFns.c @ 5:8188c5b4bfd7

implemented taskwait
author Nina Engelhardt <nengel@mailbox.tu-berlin.de>
date Fri, 13 Jul 2012 17:35:49 +0200
parents 13af59ed7ea5
children 1780f6b00e3d
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
10 #include "Queue_impl/PrivateQueue.h"
11 #include "VSs.h"
12 #include "VSs_Request_Handlers.h"
14 //=========================== Local Fn Prototypes ===========================
15 void
16 resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv );
18 void
19 handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
21 void
22 handleDissipate( SlaveVP *requestingSlv, VSsSemEnv *semEnv );
24 void
25 handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv );
27 //============================== Assigner ==================================
28 //
29 /*For VSs, assigning a slave simply takes the next work-unit off the
30 * ready-to-go work-unit queue and assigns it to the offered slot.
31 *If the ready-to-go work-unit queue is empty, then nothing to assign
32 * to the animation slot -- return FALSE to let Master loop know assigning
33 * that slot failed.
34 */
35 SlaveVP *
36 VSs__assign_slaveVP_to_slot( void *_semEnv, AnimSlot *slot )
37 { SlaveVP *assignSlv;
38 VSsSemEnv *semEnv;
39 VSsSemData *semData;
40 int32 coreNum, slotNum;
42 coreNum = slot->coreSlotIsOn;
43 slotNum = slot->slotIdx;
45 semEnv = (VSsSemEnv *)_semEnv;
47 /*At this point, could do an optimization -- have one slave for each slot
48 * and make it ALWAYS the one to assign to that slot -- so there is no
49 * read fromQ. However, going to keep this compatible with other
50 * languages, like VOMP and SSR. So, leave the normal slave fetch
51 * from readyQ. For example, allows SSR constructs, to create extra
52 * slaves, and send communications direction between them, while still
53 * having the StarSs-style spawning of tasks.. so one of the tasks
54 * can now suspend and do more interesting things.. means keep a pool
55 * of slaves, and take one from pool when a task suspends.
56 */
57 //TODO: fix false sharing in array
58 assignSlv = readPrivQ( semEnv->readyVPQs[coreNum] );
59 if( assignSlv == NULL )
60 { //make a new slave to animate
61 //This happens for the first task on the core and when all available
62 // slaves are blocked by constructs like send, or mutex, and so on..
63 assignSlv = VSs__create_slave_helper( NULL, NULL, semEnv, coreNum );
64 }
65 semData = (VSsSemData *)assignSlv->semanticData;
66 //slave could be resuming a task in progress, check for this
67 if( semData->needsTaskAssigned )
68 { //no, not resuming, needs a task..
69 VSsTaskStub *newTaskStub;
70 SlaveVP *extraSlv;
71 newTaskStub = readPrivQ( semEnv->taskReadyQ );
72 if( newTaskStub == NULL )
73 { //No task, so slave unused, so put it back and return "no-slave"
74 //But first check if have extra free slaves
75 extraSlv = readPrivQ( semEnv->readyVPQs[coreNum] );
76 if( extraSlv == NULL )
77 { //means no tasks and no slave on this core can generate more
78 //TODO: false sharing
79 if( semEnv->coreIsDone[coreNum] == FALSE)
80 { semEnv->numCoresDone += 1;
81 semEnv->coreIsDone[coreNum] = TRUE;
82 #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
83 semEnv->shutdownInitiated = TRUE;
84 #else
85 if( semEnv->numCoresDone == NUM_CORES )
86 { //means no cores have work, and none can generate more
87 semEnv->shutdownInitiated = TRUE;
88 }
89 #endif
90 }
91 //put slave back into Q and return NULL
92 writePrivQ( assignSlv, semEnv->readyVPQs[coreNum] );
93 assignSlv = NULL;
94 //except if shutdown has been initiated by this or other core
95 if(semEnv->shutdownInitiated)
96 { assignSlv = VMS_SS__create_shutdown_slave();
97 }
98 }
99 else //extra slave exists, but no tasks for either slave
100 { if(((VSsSemData *)extraSlv->semanticData)->needsTaskAssigned == TRUE)
101 { //means have two slaves need tasks -- redundant, kill one
102 handleDissipate( extraSlv, semEnv );
103 //then put other back into Q and return NULL
104 writePrivQ( assignSlv, semEnv->readyVPQs[coreNum] );
105 assignSlv = NULL;
106 }
107 else
108 { //extra slave has work -- so take it instead
109 writePrivQ( assignSlv, semEnv->readyVPQs[coreNum] );
110 assignSlv = extraSlv;
111 //semData = (VSsSemData *)assignSlv->semanticData; Don't use
112 }
113 }
114 }
115 else //have a new task for the slave.
116 { //point slave to task's function, and mark slave as having task
117 VMS_int__reset_slaveVP_to_TopLvlFn( assignSlv,
118 newTaskStub->taskType->fn, newTaskStub->args );
119 semData->taskStub = newTaskStub;
120 newTaskStub->slaveAssignedTo = assignSlv;
121 semData->needsTaskAssigned = FALSE;
122 }
123 } //outcome: 1)slave didn't need a new task 2)slave just pointed at one
124 // 3)no tasks, so slave NULL
126 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
127 if( assignSlv == NULL )
128 { assignSlv = semEnv->idleSlv[coreNum][slotNum];
129 if(semEnv->shutdownInitiated)
130 { assignSlv = VMS_SS__create_shutdown_slave();
131 }
132 //things that would normally happen in resume(), but these VPs
133 // never go there
134 assignSlv->assignCount++; //Somewhere here!
135 Unit newu;
136 newu.vp = assignSlv->slaveID;
137 newu.task = assignSlv->assignCount;
138 addToListOfArrays(Unit,newu,semEnv->unitList);
140 if (assignSlv->assignCount > 1)
141 { Dependency newd;
142 newd.from_vp = assignSlv->slaveID;
143 newd.from_task = assignSlv->assignCount - 1;
144 newd.to_vp = assignSlv->slaveID;
145 newd.to_task = assignSlv->assignCount;
146 addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);
147 }
148 }
149 #endif
150 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
151 if( assignSlv != NULL )
152 { //assignSlv->numTimesAssigned++;
153 Unit prev_in_slot =
154 semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum];
155 if(prev_in_slot.vp != 0)
156 { Dependency newd;
157 newd.from_vp = prev_in_slot.vp;
158 newd.from_task = prev_in_slot.task;
159 newd.to_vp = assignSlv->slaveID;
160 newd.to_task = assignSlv->assignCount;
161 addToListOfArrays(Dependency,newd,semEnv->hwArcs);
162 }
163 prev_in_slot.vp = assignSlv->slaveID;
164 prev_in_slot.task = assignSlv->assignCount;
165 semEnv->last_in_slot[coreNum * NUM_ANIM_SLOTS + slotNum] =
166 prev_in_slot;
167 }
168 #endif
169 return( assignSlv );
170 }
173 //=========================== Request Handler ============================
174 //
175 /*
176 */
177 void
178 VSs__Request_Handler( SlaveVP *requestingSlv, void *_semEnv )
179 { VSsSemEnv *semEnv;
180 VMSReqst *req;
182 semEnv = (VSsSemEnv *)_semEnv;
184 req = VMS_PI__take_next_request_out_of( requestingSlv );
186 while( req != NULL )
187 {
188 switch( req->reqType )
189 { case semantic: handleSemReq( req, requestingSlv, semEnv);
190 break;
191 case createReq: handleCreate( req, requestingSlv, semEnv);
192 break;
193 case dissipate: handleDissipate( requestingSlv, semEnv);
194 break;
195 case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
196 (ResumeSlvFnPtr) &resume_slaveVP);
197 break;
198 default:
199 break;
200 }
202 req = VMS_PI__take_next_request_out_of( requestingSlv );
203 } //while( req != NULL )
205 }
208 void
209 handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VSsSemEnv *semEnv )
210 { VSsSemReq *semReq;
212 semReq = VMS_PI__take_sem_reqst_from(req);
213 if( semReq == NULL ) return;
214 switch( semReq->reqType ) //sem handlers are all in other file
215 {
216 case submit_task: handleSubmitTask( semReq, semEnv);
217 break;
218 case end_task: handleEndTask( semReq, semEnv);
219 break;
220 case send_type_to: handleSendTypeTo( semReq, semEnv);
221 break;
222 case send_from_to: handleSendFromTo( semReq, semEnv);
223 break;
224 case receive_type_to: handleReceiveTypeTo(semReq, semEnv);
225 break;
226 case receive_from_to: handleReceiveFromTo(semReq, semEnv);
227 break;
229 //====================================================================
230 case taskwait: handleTaskwait(semReq, reqSlv, semEnv);
231 break;
232 case malloc_req: handleMalloc( semReq, reqSlv, semEnv);
233 break;
234 case free_req: handleFree( semReq, reqSlv, semEnv);
235 break;
236 case singleton_fn_start: handleStartFnSingleton(semReq, reqSlv, semEnv);
237 break;
238 case singleton_fn_end: handleEndFnSingleton( semReq, reqSlv, semEnv);
239 break;
240 case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
241 break;
242 case singleton_data_end: handleEndDataSingleton(semReq, reqSlv, semEnv);
243 break;
244 case atomic: handleAtomic( semReq, reqSlv, semEnv);
245 break;
246 case trans_start: handleTransStart( semReq, reqSlv, semEnv);
247 break;
248 case trans_end: handleTransEnd( semReq, reqSlv, semEnv);
249 break;
250 }
251 }
255 //=========================== VMS Request Handlers ==============================
256 /*SlaveVP dissipate (NOT task-end!)
257 */
258 void
259 handleDissipate( SlaveVP *requestingSlv, VSsSemEnv *semEnv )
260 {
261 DEBUG__printf1(dbgRqstHdlr,"Dissipate request from processor %d",requestingSlv->slaveID)
262 //free any semantic data allocated to the virt procr
263 VMS_PI__free( requestingSlv->semanticData );
265 //Now, call VMS to free_all AppVP state -- stack and so on
266 VMS_PI__dissipate_slaveVP( requestingSlv );
267 }
269 /*Re-use this in the entry-point fn
270 */
271 SlaveVP *
272 VSs__create_slave_helper( TopLevelFnPtr fnPtr, void *initData,
273 VSsSemEnv *semEnv, int32 coreToAssignOnto )
274 { SlaveVP *newSlv;
275 VSsSemData *semData;
277 //This is running in master, so use internal version
278 newSlv = VMS_PI__create_slaveVP( fnPtr, initData );
280 semEnv->numSlaveVP += 1;
282 semData = VMS_PI__malloc( sizeof(VSsSemData) );
283 semData->highestTransEntered = -1;
284 semData->lastTransEntered = NULL;
285 semData->needsTaskAssigned = TRUE;
287 semData->threadInfo = VMS_PI__malloc( sizeof(VSsThreadInfo) );
288 semData->threadInfo->isWaiting = FALSE;
289 semData->threadInfo->numChildTasks = 0;
290 semData->threadInfo->parent = NULL;
291 semData->threadInfo->parentIsTask = FALSE;
292 semData->threadInfo->slaveAssignedTo = newSlv;
294 newSlv->semanticData = semData;
296 //=================== Assign new processor to a core =====================
297 #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
298 newSlv->coreAnimatedBy = 0;
300 #else
302 if(coreToAssignOnto < 0 || coreToAssignOnto >= NUM_CORES )
303 { //out-of-range, so round-robin assignment
304 newSlv->coreAnimatedBy = semEnv->nextCoreToGetNewSlv;
306 if( semEnv->nextCoreToGetNewSlv >= NUM_CORES - 1 )
307 semEnv->nextCoreToGetNewSlv = 0;
308 else
309 semEnv->nextCoreToGetNewSlv += 1;
310 }
311 else //core num in-range, so use it
312 { newSlv->coreAnimatedBy = coreToAssignOnto;
313 }
314 #endif
315 //========================================================================
317 return newSlv;
318 }
320 /*SlaveVP create (NOT task create!)
321 */
322 void
323 handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VSsSemEnv *semEnv )
324 { VSsSemReq *semReq;
325 SlaveVP *newSlv;
328 semReq = VMS_PI__take_sem_reqst_from( req );
330 newSlv = VSs__create_slave_helper( semReq->fnPtr, semReq->initData, semEnv,
331 semReq->coreToAssignOnto );
333 ((VSsSemData*)newSlv->semanticData)->threadInfo->parent = requestingSlv;
335 DEBUG__printf2(dbgRqstHdlr,"Create from: %d, new VP: %d", requestingSlv->slaveID, newSlv->slaveID)
337 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
338 Dependency newd;
339 newd.from_vp = requestingSlv->slaveID;
340 newd.from_task = requestingSlv->assignCount;
341 newd.to_vp = newSlv->slaveID;
342 newd.to_task = 1;
343 //addToListOfArraysDependency(newd,semEnv->commDependenciesList);
344 addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);
345 #endif
347 //For VSs, caller needs ptr to created processor returned to it
348 requestingSlv->dataRetFromReq = newSlv;
350 resume_slaveVP( requestingSlv, semEnv );
351 resume_slaveVP( newSlv, semEnv );
352 }
355 //=========================== Helper ==============================
356 void
357 resume_slaveVP( SlaveVP *slave, VSsSemEnv *semEnv )
358 {
359 #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
360 /*
361 int lastRecordIdx = slave->counter_history_array_info->numInArray -1;
362 CounterRecord* lastRecord = slave->counter_history[lastRecordIdx];
363 saveLowTimeStampCountInto(lastRecord->unblocked_timestamp);
364 */
365 #endif
366 #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
367 slave->assignCount++; //Somewhere here!
368 Unit newu;
369 newu.vp = slave->slaveID;
370 newu.task = slave->assignCount;
371 addToListOfArrays(Unit,newu,semEnv->unitList);
373 if (slave->assignCount > 1){
374 Dependency newd;
375 newd.from_vp = slave->slaveID;
376 newd.from_task = slave->assignCount - 1;
377 newd.to_vp = slave->slaveID;
378 newd.to_task = slave->assignCount;
379 addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);
380 }
381 #endif
382 writePrivQ( slave, semEnv->readyVPQs[ slave->coreAnimatedBy] );
383 }