VMS/VMS_Implementations/Vthread_impls/Vthread_MC_shared_impl

view Vthread__PluginFns.c @ 29:b94dc57e4455

refactored many files -- chgd names, moved code around -- doesn't compile
author Some Random Person <seanhalle@yahoo.com>
date Wed, 09 May 2012 13:24:19 -0700
parents
children
line source
1 /*
2 * Copyright 2010 OpenSourceCodeStewardshipFoundation
3 *
4 * Licensed under BSD
5 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <malloc.h>
11 #include "C_Libraries/Queue_impl/PrivateQueue.h"
12 #include "Vthread.h"
13 #include "Vthread_Request_Handlers.h"
14 #include "Vthread_helper.h"
16 //=========================== Local Fn Prototypes ===========================
18 void inline
19 handleSemReq( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv );
21 inline void
22 handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv );
24 inline void
25 handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv );
28 //============================== Scheduler ==================================
29 //
30 /*For Vthread, scheduling a slave simply takes the next work-unit off the
31 * ready-to-go work-unit queue and assigns it to the slaveToSched.
32 *If the ready-to-go work-unit queue is empty, then nothing to schedule
33 * to the slave -- return FALSE to let Master loop know scheduling that
34 * slave failed.
35 */
36 char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram
37 SlaveVP *
38 Vthread__schedule_slaveVP( void *_semEnv, int coreNum )
39 { SlaveVP *schedSlv;
40 VthdSemEnv *semEnv;
42 semEnv = (VthdSemEnv *)_semEnv;
44 schedSlv = readPrivQ( semEnv->readySlvQs[coreNum] );
45 //Note, using a non-blocking queue -- it returns NULL if queue empty
47 return( schedSlv );
48 }
52 //=========================== Request Handler =============================
53 //
54 /*Will get requests to send, to receive, and to create new processors.
55 * Upon send, check the hash to see if a receive is waiting.
56 * Upon receive, check hash to see if a send has already happened.
57 * When other is not there, put in. When other is there, the comm.
58 * completes, which means the receiver P gets scheduled and
59 * picks up right after the receive request. So make the work-unit
60 * and put it into the queue of work-units ready to go.
61 * Other request is create a new Processor, with the function to run in the
62 * Processor, and initial data.
63 */
64 void
65 Vthread__Request_Handler( SlaveVP *requestingSlv, void *_semEnv )
66 { VthdSemEnv *semEnv;
67 VMSReqst *req;
69 semEnv = (VthdSemEnv *)_semEnv;
71 req = VMS_PI__take_next_request_out_of( requestingSlv );
73 while( req != NULL )
74 {
75 switch( req->reqType )
76 { case semantic: handleSemReq( req, requestingSlv, semEnv);
77 break;
78 case createReq: handleCreate( req, requestingSlv, semEnv);
79 break;
80 case dissipate: handleDissipate( requestingSlv, semEnv);
81 break;
82 case VMSSemantic: VMS_PI__handle_VMSSemReq(req, requestingSlv, semEnv,
83 (ResumeSlvFnPtr)&resume_slaveVP);
84 break;
85 default:
86 break;
87 }
89 req = VMS_PI__take_next_request_out_of( requestingSlv );
90 } //while( req != NULL )
91 }
94 void inline
95 handleSemReq( VMSReqst *req, SlaveVP *reqSlv, VthdSemEnv *semEnv )
96 { VthdSemReq *semReq;
98 semReq = VMS_PI__take_sem_reqst_from(req);
99 if( semReq == NULL ) return;
100 switch( semReq->reqType )
101 {
102 case make_mutex: handleMakeMutex( semReq, semEnv);
103 break;
104 case mutex_lock: handleMutexLock( semReq, semEnv);
105 break;
106 case mutex_unlock: handleMutexUnlock(semReq, semEnv);
107 break;
108 case make_cond: handleMakeCond( semReq, semEnv);
109 break;
110 case cond_wait: handleCondWait( semReq, semEnv);
111 break;
112 case cond_signal: handleCondSignal( semReq, semEnv);
113 break;
114 case malloc_req: handleMalloc( semReq, reqSlv, semEnv);
115 break;
116 case free_req: handleFree( semReq, reqSlv, semEnv);
117 break;
118 case singleton_fn_start: handleStartFnSingleton(semReq, reqSlv, semEnv);
119 break;
120 case singleton_fn_end: handleEndFnSingleton( semReq, reqSlv, semEnv);
121 break;
122 case singleton_data_start:handleStartDataSingleton(semReq,reqSlv,semEnv);
123 break;
124 case singleton_data_end: handleEndDataSingleton(semReq, reqSlv, semEnv);
125 break;
126 case atomic: handleAtomic( semReq, reqSlv, semEnv);
127 break;
128 case trans_start: handleTransStart( semReq, reqSlv, semEnv);
129 break;
130 case trans_end: handleTransEnd( semReq, reqSlv, semEnv);
131 break;
132 }
133 }
135 //=========================== VMS Request Handlers ===========================
136 //
137 inline void
138 handleDissipate( SlaveVP *requestingSlv, VthdSemEnv *semEnv )
139 {
140 //free any semantic data allocated to the virt procr
141 VMS_PI__free( requestingSlv->semanticData );
143 //Now, call VMS to free_all AppSlv state -- stack and so on
144 VMS_PI__dissipate_slaveVP( requestingSlv );
145 }
147 inline void
148 handleCreate( VMSReqst *req, SlaveVP *requestingSlv, VthdSemEnv *semEnv )
149 { VthdSemReq *semReq;
150 SlaveVP *newSlv;
152 //========================= MEASUREMENT STUFF ======================
153 Meas_startCreate
154 //==================================================================
156 semReq = VMS_PI__take_sem_reqst_from( req );
158 newSlv = Vthread__create_slaveVP_helper( semReq->fnPtr, semReq->initData,
159 semEnv, semReq->coreToScheduleOnto);
161 //For Vthread, caller needs ptr to created processor returned to it
162 requestingSlv->dataRetFromReq = newSlv;
164 resume_slaveVP( newSlv, semEnv );
165 resume_slaveVP( requestingSlv, semEnv );
167 //========================= MEASUREMENT STUFF ======================
168 Meas_endCreate
169 #ifdef MEAS__TIME_PLUGIN
170 #ifdef MEAS__SUB_CREATE
171 subIntervalFromHist( startStamp, endStamp,
172 _VMSMasterEnv->reqHdlrHighTimeHist );
173 #endif
174 #endif
175 //==================================================================
176 }
179 //=========================== Helper ==============================
180 void inline
181 resume_slaveVP( SlaveVP *procr, VthdSemEnv *semEnv )
182 {
183 writePrivQ( procr, semEnv->readySlvQs[ procr->coreAnimatedBy] );
184 }
186 //===========================================================================