changeset 60:3c9ed64db705 Holistic_Model

chgd brch name to Holistic_Model, from perf_ctrs, and Updated to compatibility with common_ancestor brch
author Some Random Person <seanhalle@yahoo.com>
date Fri, 09 Mar 2012 22:28:08 -0800
parents 471c89d1d545
children 33b2eb9af81a
files .hgeol DESIGN_NOTES.txt SSR.h SSR_Counter_Recording.c SSR_Counter_Recording.h SSR_PluginFns.c SSR_Request_Handlers.c SSR_Request_Handlers.h SSR_lib.c __brch__Holistic_Model dependency.c dependency.h
diffstat 12 files changed, 511 insertions(+), 471 deletions(-) [+]
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/.hgeol	Fri Mar 09 22:28:08 2012 -0800
     1.3 @@ -0,0 +1,12 @@
     1.4 +
     1.5 +[patterns]
     1.6 +**.py = native
     1.7 +**.txt = native
     1.8 +**.c = native
     1.9 +**.h = native
    1.10 +**.cpp = native
    1.11 +**.java = native
    1.12 +**.sh = native
    1.13 +**.pl = native
    1.14 +**.jpg = bin
    1.15 +**.gif = bin
     2.1 --- a/DESIGN_NOTES.txt	Fri Mar 09 19:01:21 2012 +0100
     2.2 +++ b/DESIGN_NOTES.txt	Fri Mar 09 22:28:08 2012 -0800
     2.3 @@ -1,212 +1,212 @@
     2.4 -
     2.5 
     2.6 -From e-mail to Albert, on design of app-virt-procr to core-loop animation
     2.7 
     2.8 -switch and back.
     2.9 
    2.10 -
    2.11 
    2.12 -====================
    2.13 
    2.14 -General warnings about this code:
    2.15 
    2.16 -It only compiles in GCC 4.x  (label addr and computed goto)
    2.17 
    2.18 -Has assembly for x86  32bit
    2.19 
    2.20 -
    2.21 
    2.22 -
    2.23 
    2.24 -====================
    2.25 
    2.26 -AVProcr data-struc has: stack-ptr, jump-ptr, data-ptr, slotNum, coreloop-ptr
    2.27 
    2.28 - and semantic-custom-ptr
    2.29 
    2.30 -
    2.31 
    2.32 -The VMS Creator: takes ptr to function and ptr to initial data
    2.33 
    2.34 --- creates a new AVProcr struc
    2.35 
    2.36 --- sets the jmp-ptr field to the ptr-to-function passed in
    2.37 
    2.38 --- sets the data-ptr to ptr to initial data passed in
    2.39 
    2.40 --- if this is for a suspendable virt  processor, then create a stack and set
    2.41 
    2.42 -   the stack-ptr
    2.43 
    2.44 -
    2.45 
    2.46 -VMS__create_procr( AVProcrFnPtr fnPtr, void *initialData )
    2.47 
    2.48 -{
    2.49 
    2.50 -AVProcr   newPr = malloc( sizeof(AVProcr) );
    2.51 
    2.52 -newPr->jmpPtr = fnPtr;
    2.53 
    2.54 -newPr->coreLoopDonePt = &CoreLoopDonePt; //label is in coreLoop
    2.55 
    2.56 -newPr->data = initialData;
    2.57 
    2.58 -newPr->stackPtr = createNewStack();
    2.59 
    2.60 -return newPr;
    2.61 
    2.62 -}
    2.63 
    2.64 -
    2.65 
    2.66 -The semantic layer can then add its own state in the cusom-ptr field
    2.67 
    2.68 -
    2.69 
    2.70 -The Scheduler plug-in:
    2.71 
    2.72 --- Sets slave-ptr in AVProcr, and points the slave to AVProcr
    2.73 
    2.74 --- if non-suspendable, sets the AVProcr's stack-ptr to the slave's stack-ptr
    2.75 
    2.76 -
    2.77 
    2.78 -MasterLoop:
    2.79 
    2.80 --- puts AVProcr structures onto the workQ
    2.81 
    2.82 -
    2.83 
    2.84 -CoreLoop:
    2.85 
    2.86 --- gets stack-ptr out of AVProcr and sets the core's stack-ptr to that
    2.87 
    2.88 --- gets data-ptr out of AVProcr and puts it into reg GCC uses for that param
    2.89 
    2.90 --- puts AVProcr's addr into reg GCC uses for the AVProcr-pointer param
    2.91 
    2.92 --- jumps to the addr in AVProcr's jmp-ptr field
    2.93 
    2.94 -CoreLoop()
    2.95 
    2.96 -{ while( FOREVER )
    2.97 
    2.98 - { nextPr = readQ( workQ );  //workQ is static (global) var declared volatile
    2.99 
   2.100 -   <dataPtr-param-register>       = nextPr->data;
   2.101 
   2.102 -   <AVProcrPtr-param-register> = nextPr;
   2.103 
   2.104 -   <stack-pointer register>          = nextPr->stackPtr;
   2.105 
   2.106 -   jmp nextPr->jmpPtr;
   2.107 
   2.108 -CoreLoopDonePt:   //label's addr put into AVProcr when create new one
   2.109 
   2.110 - }
   2.111 
   2.112 -}
   2.113 
   2.114 -(Note, for suspendable processors coming back from suspension, there is no
   2.115 
   2.116 - need to fill the parameter registers -- they will be discarded)
   2.117 
   2.118 -
   2.119 
   2.120 -Suspend an application-level virtual processor:
   2.121 
   2.122 -VMS__AVPSuspend( AVProcr *pr )
   2.123 
   2.124 -{
   2.125 
   2.126 -pr->jmpPtr = &ResumePt;  //label defined a few lines below
   2.127 
   2.128 -pr->slave->doneFlag = TRUE;
   2.129 
   2.130 -pr->stackPtr = <current SP reg value>;
   2.131 
   2.132 -jmp pr->coreLoopDonePt;
   2.133 
   2.134 -ResumePt: return;
   2.135 
   2.136 -}
   2.137 
   2.138 -
   2.139 
   2.140 -This works because the core loop will have switched back to this stack
   2.141 
   2.142 - before jumping to ResumePt..    also, the core loop never modifies the
   2.143 
   2.144 - stack pointer, it simply switches to whatever stack pointer is in the
   2.145 
   2.146 - next AVProcr it gets off the workQ.
   2.147 
   2.148 -
   2.149 
   2.150 -
   2.151 
   2.152 -
   2.153 
   2.154 -=============================================================================
   2.155 
   2.156 -As it is now, there's only one major unknown about GCC (first thing below
   2.157 
   2.158 -  the line),  and there are a few restrictions, the most intrusive being
   2.159 
   2.160 -  that the functions the application gives to the semantic layer have a
   2.161 
   2.162 -  pre-defined prototype -- return nothing, take a pointer to initial data
   2.163 
   2.164 -  and a pointer to an AVProcr struc, which they're not allowed to modify
   2.165 
   2.166 -  -- only pass it to semantic-lib calls.
   2.167 
   2.168 -
   2.169 
   2.170 -So, here are the assumptions, restrictions, and so forth:
   2.171 
   2.172 -===========================
   2.173 
   2.174 -Major assumption:  that GCC will do the following the same way every time:
   2.175 
   2.176 -  say the application defines a function that fits this typedef:
   2.177 
   2.178 -typedef void (*AVProcrFnPtr)  ( void *, AVProcr * );
   2.179 
   2.180 -
   2.181 
   2.182 -and let's say somewhere in the code they do this:
   2.183 
   2.184 -AVProcrFnPtr   fnPtr = &someFunc;
   2.185 
   2.186 -
   2.187 
   2.188 -then they do this:
   2.189 
   2.190 -(*fnPtr)( dataPtr, animatingVirtProcrPtr );
   2.191 
   2.192 -
   2.193 
   2.194 -Can the registers that GCC uses to pass the two pointers be predicted?
   2.195 
   2.196 - Will they always be the same registers, in every program that has the
   2.197 
   2.198 - same typedef?
   2.199 
   2.200 -If that typedef fixes, guaranteed, the registers (on x86) that GCC will use
   2.201 
   2.202 - to send the two pointers, then the rest of this solution works.
   2.203 
   2.204 -
   2.205 
   2.206 -Change in model: Instead of a virtual processor whose execution trace is
   2.207 
   2.208 - divided into work-units, replacing that with the pattern that a virtual
   2.209 
   2.210 - processor is suspended.  Which means, no more "work unit" data structure
   2.211 
   2.212 - -- instead, it's now an "Application Virtual Processor" structure
   2.213 
   2.214 - -- AVProcr -- which is given directly to the application function!
   2.215 
   2.216 -
   2.217 
   2.218 -   -- You were right, don't need slaves to be virtual processors, only need
   2.219 
   2.220 -      "scheduling buckets" -- just a way to keep track of things..
   2.221 
   2.222 -
   2.223 
   2.224 -Restrictions:
   2.225 
   2.226 --- the  "virtual entities"  created by the semantic layer must be virtual
   2.227 
   2.228 -   processors, created with a function-to-execute and initial data -- the
   2.229 
   2.230 -   function is restricted to return nothing and only take a pointer to the
   2.231 
   2.232 -   initial data plus a pointer to an AVProcr structure, which represents
   2.233 
   2.234 -   "self", the virtual processor created.  (This is the interface I showed
   2.235 
   2.236 -   you for "Hello World" semantic layer).
   2.237 
   2.238 -What this means for synchronous dataflow, is that the nodes in the graph
   2.239 
   2.240 -  are virtual processors that in turn spawn a new virtual processor for
   2.241 
   2.242 -  every "firing" of the node.  This should be fine because the function
   2.243 
   2.244 -  that the node itself is created with is a "canned" function that is part
   2.245 
   2.246 -  of the semantic layer -- the function that is spawned is the user-provided
   2.247 
   2.248 -  function.  The restriction only means that the values from the inputs to
   2.249 
   2.250 -  the node are packaged as the "initial data" given to the spawned virtual
   2.251 
   2.252 -  processor -- so the user-function has to cast a void * to the
   2.253 
   2.254 -  semantic-layer-defined structure by which it gets the inputs to the node.
   2.255 
   2.256 -
   2.257 
   2.258 --- Second restriction is that the semantic layer has to use VMS supplied
   2.259 
   2.260 -   stuff -- for example, the data structure that represents the
   2.261 
   2.262 -   application-level virtual processor is defined in VMS, and the semantic
   2.263 
   2.264 -   layer has to call a VMS function in order to suspend a virtual processor.
   2.265 
   2.266 -
   2.267 
   2.268 --- Third restriction is that the application code never do anything with
   2.269 
   2.270 -   the AVProcr structure except pass it to semantic-layer lib calls.
   2.271 
   2.272 -
   2.273 
   2.274 --- Fourth restriction is that every virtual processor must call a
   2.275 
   2.276 -   "dissipate" function as its last act -- the user-supplied
   2.277 
   2.278 -   virtual-processor function can't just end -- it has to call
   2.279 
   2.280 -   SemLib__dissipate( AVProcr ) before the closing brace.. and after the
   2.281 
   2.282 -   semantic layer is done cleaning up its own data, it has to in turn call
   2.283 
   2.284 -   VMS__disspate( AVProcr ).
   2.285 
   2.286 -
   2.287 
   2.288 --- For performance reasons, I think I want to have two different kinds of
   2.289 
   2.290 -   app-virtual processor -- suspendable ones and non-suspendable -- where
   2.291 
   2.292 -   non-suspendable are not allowed to perform any communication with other
   2.293 
   2.294 -   virtual processors, except at birth and death.  Suspendable ones, of
   2.295 
   2.296 -   course can perform communications, create other processors, and so forth
   2.297 
   2.298 -   -- all of which cause it to suspend.
   2.299 
   2.300 -The performance difference is that I need a separate stack for each
   2.301 
   2.302 -  suspendable, but non-suspendable can re-use a fixed number of stacks
   2.303 
   2.304 -  (one for each slave).
   2.305 
   2.306 -
   2.307 
   2.308 -
   2.309 
   2.310 -==================== May 29
   2.311 
   2.312 -
   2.313 
   2.314 -Qs:
   2.315 
   2.316 ---1 how to safely jump between virt processor's trace and coreloop
   2.317 
   2.318 ---2 how to set up __cdecl style stack + frame for just-born virtual processor
   2.319 
   2.320 ---3 how to switch stack-pointers + frame-pointers
   2.321 
   2.322 -
   2.323 
   2.324 -
   2.325 
   2.326 ---1:
   2.327 
   2.328 -Not sure if GCC's computed goto is safe, because modify the stack pointer
   2.329 
   2.330 -without GCC's knowledge -- although, don't use the stack in the coreloop
   2.331 
   2.332 -segment, so, actually, that should be safe!
   2.333 
   2.334 -
   2.335 
   2.336 -So, GCC has its own special C extensions, one of which gets address of label:
   2.337 
   2.338 -
   2.339 
   2.340 -void *labelAddr;
   2.341 
   2.342 -labelAddr = &&label;
   2.343 
   2.344 -goto *labelAddr;
   2.345 
   2.346 -
   2.347 
   2.348 ---2
   2.349 
   2.350 -In CoreLoop, will check whether VirtProc just born, or was suspended.
   2.351 
   2.352 -If just born, do bit of code that sets up the virtual processor's stack
   2.353 
   2.354 -and frame according to the __cdecl convention for the standard virt proc
   2.355 
   2.356 -fn typedef -- save the pointer to data and pointer to virt proc struc into
   2.357 
   2.358 -correct places in the frame
   2.359 
   2.360 -   __cdecl says, according to:
   2.361 
   2.362 -http://unixwiz.net/techtips/win32-callconv-asm.html
   2.363 
   2.364 -To do this:
   2.365 
   2.366 -push the parameters onto the stack, right most first, working backwards to
   2.367 
   2.368 - the left.
   2.369 
   2.370 -Then perform call instr, which pushes return addr onto stack.
   2.371 
   2.372 -Then callee first pushes the frame pointer, %EBP followed by placing the
   2.373 
   2.374 -then-current value of stack pointer into %EBP
   2.375 
   2.376 -push ebp
   2.377 
   2.378 -mov  ebp, esp    // ebp « esp
   2.379 
   2.380 -
   2.381 
   2.382 -Once %ebp has been changed, it can now refer directly to the function's
   2.383 
   2.384 - arguments as 8(%ebp), 12(%ebp). Note that 0(%ebp) is the old base pointer
   2.385 
   2.386 - and 4(%ebp) is the old instruction pointer.
   2.387 
   2.388 -
   2.389 
   2.390 -Then callee pushes regs it will use then adds to stack pointer the size of
   2.391 
   2.392 - its local vars.
   2.393 
   2.394 -
   2.395 
   2.396 -Stack in callee looks like this:
   2.397 
   2.398 -16(%ebp)	 - third function parameter
   2.399 
   2.400 -12(%ebp)	 - second function parameter
   2.401 
   2.402 -8(%ebp)	 - first function parameter
   2.403 
   2.404 -4(%ebp)	 - old %EIP (the function's "return address")
   2.405 
   2.406 -----------^^ State seen at first instr of callee ^^-----------
   2.407 
   2.408 -0(%ebp)	- old %EBP (previous function's base pointer)
   2.409 
   2.410 --4(%ebp)	 - save of EAX, the only reg used in function
   2.411 
   2.412 --8(%ebp)	 - first local variable
   2.413 
   2.414 --12(%ebp)	 - second local variable
   2.415 
   2.416 --16(%ebp)	 - third local variable
   2.417 
   2.418 -
   2.419 
   2.420 -
   2.421 
   2.422 ---3
   2.423 
   2.424 -It might be just as simple as two mov instrs, one for %ESP, one for %EBP..
   2.425 
   2.426 - the stack and frame pointer regs
   2.427 
   2.428 +
   2.429 +From e-mail to Albert, on design of app-virt-procr to core-loop animation
   2.430 +switch and back.
   2.431 +
   2.432 +====================
   2.433 +General warnings about this code:
   2.434 +It only compiles in GCC 4.x  (label addr and computed goto)
   2.435 +Has assembly for x86  32bit
   2.436 +
   2.437 +
   2.438 +====================
   2.439 +AVProcr data-struc has: stack-ptr, jump-ptr, data-ptr, slotNum, coreloop-ptr
   2.440 + and semantic-custom-ptr
   2.441 +
   2.442 +The VMS Creator: takes ptr to function and ptr to initial data
   2.443 +-- creates a new AVProcr struc
   2.444 +-- sets the jmp-ptr field to the ptr-to-function passed in
   2.445 +-- sets the data-ptr to ptr to initial data passed in
   2.446 +-- if this is for a suspendable virt  processor, then create a stack and set
   2.447 +   the stack-ptr
   2.448 +
   2.449 +VMS_int__create_slaveVP( AVProcrFnPtr fnPtr, void *initialData )
   2.450 +{
   2.451 +AVProcr   newSlv = malloc( sizeof(AVProcr) );
   2.452 +newSlv->jmpPtr = fnPtr;
   2.453 +newSlv->coreLoopDonePt = &CoreLoopDonePt; //label is in coreLoop
   2.454 +newSlv->data = initialData;
   2.455 +newSlv->stackPtr = createNewStack();
   2.456 +return newSlv;
   2.457 +}
   2.458 +
   2.459 +The semantic layer can then add its own state in the cusom-ptr field
   2.460 +
   2.461 +The Scheduler plug-in:
   2.462 +-- Sets slave-ptr in AVProcr, and points the slave to AVProcr
   2.463 +-- if non-suspendable, sets the AVProcr's stack-ptr to the slave's stack-ptr
   2.464 +
   2.465 +MasterLoop:
   2.466 +-- puts AVProcr structures onto the workQ
   2.467 +
   2.468 +CoreLoop:
   2.469 +-- gets stack-ptr out of AVProcr and sets the core's stack-ptr to that
   2.470 +-- gets data-ptr out of AVProcr and puts it into reg GCC uses for that param
   2.471 +-- puts AVProcr's addr into reg GCC uses for the AVProcr-pointer param
   2.472 +-- jumps to the addr in AVProcr's jmp-ptr field
   2.473 +CoreLoop()
   2.474 +{ while( FOREVER )
   2.475 + { nextSlv = readQ( workQ );  //workQ is static (global) var declared volatile
   2.476 +   <dataPtr-param-register>       = nextSlv->data;
   2.477 +   <AVProcrPtr-param-register> = nextSlv;
   2.478 +   <stack-pointer register>          = nextSlv->stackPtr;
   2.479 +   jmp nextSlv->jmpPtr;
   2.480 +CoreLoopDonePt:   //label's addr put into AVProcr when create new one
   2.481 + }
   2.482 +}
   2.483 +(Note, for suspendable processors coming back from suspension, there is no
   2.484 + need to fill the parameter registers -- they will be discarded)
   2.485 +
   2.486 +Suspend an application-level virtual processor:
   2.487 +VMS_int__AVPSuspend( AVProcr *pr )
   2.488 +{
   2.489 +pr->jmpPtr = &ResumePt;  //label defined a few lines below
   2.490 +pr->slave->doneFlag = TRUE;
   2.491 +pr->stackPtr = <current SP reg value>;
   2.492 +jmp pr->coreLoopDonePt;
   2.493 +ResumePt: return;
   2.494 +}
   2.495 +
   2.496 +This works because the core loop will have switched back to this stack
   2.497 + before jumping to ResumePt..    also, the core loop never modifies the
   2.498 + stack pointer, it simply switches to whatever stack pointer is in the
   2.499 + next AVProcr it gets off the workQ.
   2.500 +
   2.501 +
   2.502 +
   2.503 +=============================================================================
   2.504 +As it is now, there's only one major unknown about GCC (first thing below
   2.505 +  the line),  and there are a few restrictions, the most intrusive being
   2.506 +  that the functions the application gives to the semantic layer have a
   2.507 +  pre-defined prototype -- return nothing, take a pointer to initial data
   2.508 +  and a pointer to an AVProcr struc, which they're not allowed to modify
   2.509 +  -- only pass it to semantic-lib calls.
   2.510 +
   2.511 +So, here are the assumptions, restrictions, and so forth:
   2.512 +===========================
   2.513 +Major assumption:  that GCC will do the following the same way every time:
   2.514 +  say the application defines a function that fits this typedef:
   2.515 +typedef void (*AVProcrFnPtr)  ( void *, AVProcr * );
   2.516 +
   2.517 +and let's say somewhere in the code they do this:
   2.518 +AVProcrFnPtr   fnPtr = &someFunc;
   2.519 +
   2.520 +then they do this:
   2.521 +(*fnPtr)( dataPtr, animatingSlaveVPPtr );
   2.522 +
   2.523 +Can the registers that GCC uses to pass the two pointers be predicted?
   2.524 + Will they always be the same registers, in every program that has the
   2.525 + same typedef?
   2.526 +If that typedef fixes, guaranteed, the registers (on x86) that GCC will use
   2.527 + to send the two pointers, then the rest of this solution works.
   2.528 +
   2.529 +Change in model: Instead of a virtual processor whose execution trace is
   2.530 + divided into work-units, replacing that with the pattern that a virtual
   2.531 + processor is suspended.  Which means, no more "work unit" data structure
   2.532 + -- instead, it's now an "Application Virtual Processor" structure
   2.533 + -- AVProcr -- which is given directly to the application function!
   2.534 +
   2.535 +   -- You were right, don't need slaves to be virtual processors, only need
   2.536 +      "scheduling buckets" -- just a way to keep track of things..
   2.537 +
   2.538 +Restrictions:
   2.539 +-- the  "virtual entities"  created by the semantic layer must be virtual
   2.540 +   processors, created with a function-to-execute and initial data -- the
   2.541 +   function is restricted to return nothing and only take a pointer to the
   2.542 +   initial data plus a pointer to an AVProcr structure, which represents
   2.543 +   "self", the virtual processor created.  (This is the interface I showed
   2.544 +   you for "Hello World" semantic layer).
   2.545 +What this means for synchronous dataflow, is that the nodes in the graph
   2.546 +  are virtual processors that in turn spawn a new virtual processor for
   2.547 +  every "firing" of the node.  This should be fine because the function
   2.548 +  that the node itself is created with is a "canned" function that is part
   2.549 +  of the semantic layer -- the function that is spawned is the user-provided
   2.550 +  function.  The restriction only means that the values from the inputs to
   2.551 +  the node are packaged as the "initial data" given to the spawned virtual
   2.552 +  processor -- so the user-function has to cast a void * to the
   2.553 +  semantic-layer-defined structure by which it gets the inputs to the node.
   2.554 +
   2.555 +-- Second restriction is that the semantic layer has to use VMS supplied
   2.556 +   stuff -- for example, the data structure that represents the
   2.557 +   application-level virtual processor is defined in VMS, and the semantic
   2.558 +   layer has to call a VMS function in order to suspend a virtual processor.
   2.559 +
   2.560 +-- Third restriction is that the application code never do anything with
   2.561 +   the AVProcr structure except pass it to semantic-layer lib calls.
   2.562 +
   2.563 +-- Fourth restriction is that every virtual processor must call a
   2.564 +   "dissipate" function as its last act -- the user-supplied
   2.565 +   virtual-processor function can't just end -- it has to call
   2.566 +   SemLib__dissipate( AVProcr ) before the closing brace.. and after the
   2.567 +   semantic layer is done cleaning up its own data, it has to in turn call
   2.568 +   VMS_int__disspate( AVProcr ).
   2.569 +
   2.570 +-- For performance reasons, I think I want to have two different kinds of
   2.571 +   app-virtual processor -- suspendable ones and non-suspendable -- where
   2.572 +   non-suspendable are not allowed to perform any communication with other
   2.573 +   virtual processors, except at birth and death.  Suspendable ones, of
   2.574 +   course can perform communications, create other processors, and so forth
   2.575 +   -- all of which cause it to suspend.
   2.576 +The performance difference is that I need a separate stack for each
   2.577 +  suspendable, but non-suspendable can re-use a fixed number of stacks
   2.578 +  (one for each slave).
   2.579 +
   2.580 +
   2.581 +==================== May 29
   2.582 +
   2.583 +Qs:
   2.584 +--1 how to safely jump between virt processor's trace and coreloop
   2.585 +--2 how to set up __cdecl style stack + frame for just-born virtual processor
   2.586 +--3 how to switch stack-pointers + frame-pointers
   2.587 +
   2.588 +
   2.589 +--1:
   2.590 +Not sure if GCC's computed goto is safe, because modify the stack pointer
   2.591 +without GCC's knowledge -- although, don't use the stack in the coreloop
   2.592 +segment, so, actually, that should be safe!
   2.593 +
   2.594 +So, GCC has its own special C extensions, one of which gets address of label:
   2.595 +
   2.596 +void *labelAddr;
   2.597 +labelAddr = &&label;
   2.598 +goto *labelAddr;
   2.599 +
   2.600 +--2
   2.601 +In CoreLoop, will check whether VirtProc just born, or was suspended.
   2.602 +If just born, do bit of code that sets up the virtual processor's stack
   2.603 +and frame according to the __cdecl convention for the standard virt proc
   2.604 +fn typedef -- save the pointer to data and pointer to virt proc struc into
   2.605 +correct places in the frame
   2.606 +   __cdecl says, according to:
   2.607 +http://unixwiz.net/techtips/win32-callconv-asm.html
   2.608 +To do this:
   2.609 +push the parameters onto the stack, right most first, working backwards to
   2.610 + the left.
   2.611 +Then perform call instr, which pushes return addr onto stack.
   2.612 +Then callee first pushes the frame pointer, %EBP followed by placing the
   2.613 +then-current value of stack pointer into %EBP
   2.614 +push ebp
   2.615 +mov  ebp, esp    // ebp « esp
   2.616 +
   2.617 +Once %ebp has been changed, it can now refer directly to the function's
   2.618 + arguments as 8(%ebp), 12(%ebp). Note that 0(%ebp) is the old base pointer
   2.619 + and 4(%ebp) is the old instruction pointer.
   2.620 +
   2.621 +Then callee pushes regs it will use then adds to stack pointer the size of
   2.622 + its local vars.
   2.623 +
   2.624 +Stack in callee looks like this:
   2.625 +16(%ebp)	 - third function parameter
   2.626 +12(%ebp)	 - second function parameter
   2.627 +8(%ebp)	 - first function parameter
   2.628 +4(%ebp)	 - old %EIP (the function's "return address")
   2.629 +----------^^ State seen at first instr of callee ^^-----------
   2.630 +0(%ebp)	- old %EBP (previous function's base pointer)
   2.631 +-4(%ebp)	 - save of EAX, the only reg used in function
   2.632 +-8(%ebp)	 - first local variable
   2.633 +-12(%ebp)	 - second local variable
   2.634 +-16(%ebp)	 - third local variable
   2.635 +
   2.636 +
   2.637 +--3
   2.638 +It might be just as simple as two mov instrs, one for %ESP, one for %EBP..
   2.639 + the stack and frame pointer regs
     3.1 --- a/SSR.h	Fri Mar 09 19:01:21 2012 +0100
     3.2 +++ b/SSR.h	Fri Mar 09 22:28:08 2012 -0800
     3.3 @@ -9,9 +9,9 @@
     3.4  #ifndef _SSR_H
     3.5  #define	_SSR_H
     3.6  
     3.7 -#include "../../C_Libraries/Queue_impl/PrivateQueue.h"
     3.8 -#include "../../C_Libraries/Hash_impl/PrivateHash.h"
     3.9 -#include "../VMS_impl/VMS.h"
    3.10 +#include "Queue_impl/PrivateQueue.h"
    3.11 +#include "Hash_impl/PrivateHash.h"
    3.12 +#include "VMS_impl/VMS.h"
    3.13  #include "dependency.h"
    3.14  
    3.15  
    3.16 @@ -31,7 +31,7 @@
    3.17  
    3.18  typedef struct
    3.19   {
    3.20 -   VirtProcr      *VPCurrentlyExecuting;
    3.21 +   SlaveVP      *VPCurrentlyExecuting;
    3.22     PrivQueueStruc *waitingVPQ;
    3.23   }
    3.24  SSRTrans;
    3.25 @@ -69,14 +69,14 @@
    3.26  
    3.27  struct _SSRSemReq
    3.28   { enum SSRReqType    reqType;
    3.29 -   VirtProcr         *sendPr;
    3.30 -   VirtProcr         *receivePr;
    3.31 +   SlaveVP         *sendPr;
    3.32 +   SlaveVP         *receivePr;
    3.33     int32              msgType;
    3.34     void              *msg;
    3.35     SSRSemReq         *nextReqInHashEntry;
    3.36  
    3.37     void              *initData;
    3.38 -   VirtProcrFnPtr     fnPtr;
    3.39 +   TopLevelFnPtr     fnPtr;
    3.40     int32              coreToScheduleOnto;
    3.41  
    3.42     int32              sizeToMalloc;
    3.43 @@ -97,7 +97,7 @@
    3.44   {
    3.45     PrivQueueStruc **readyVPQs;
    3.46     HashTable       *commHashTbl;
    3.47 -   int32            numVirtPr;
    3.48 +   int32            numSlaveVP;
    3.49     int32            nextCoreToGetNewPr;
    3.50     int32            primitiveStartTime;
    3.51  
    3.52 @@ -105,7 +105,7 @@
    3.53     SSRSingleton     fnSingletons[NUM_STRUCS_IN_SEM_ENV];
    3.54     SSRTrans         transactionStrucs[NUM_STRUCS_IN_SEM_ENV];
    3.55     
    3.56 -   #ifdef OBSERVE_UCC
    3.57 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    3.58     ListOfArrays* unitList;
    3.59     ListOfArrays* ctlDependenciesList;
    3.60     ListOfArrays* commDependenciesList;
    3.61 @@ -116,10 +116,10 @@
    3.62     ListOfArrays* hwArcs;
    3.63     #endif
    3.64  
    3.65 -   #ifdef MEAS__PERF_COUNTERS
    3.66 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    3.67     ListOfArrays* counterList[NUM_CORES];
    3.68     #endif
    3.69 -   VirtProcr* idlePr[NUM_CORES][NUM_SCHED_SLOTS];
    3.70 +   SlaveVP* idlePr[NUM_CORES][NUM_SCHED_SLOTS];
    3.71   }
    3.72  SSRSemEnv;
    3.73  
    3.74 @@ -142,7 +142,7 @@
    3.75  //===========================================================================
    3.76  
    3.77  void
    3.78 -SSR__create_seed_procr_and_do_work( VirtProcrFnPtr fn, void *initData );
    3.79 +SSR__create_seed_procr_and_do_work( TopLevelFnPtr fn, void *initData );
    3.80  
    3.81  int32
    3.82  SSR__giveMinWorkUnitCycles( float32 percentOverhead );
    3.83 @@ -169,33 +169,33 @@
    3.84  
    3.85  //=======================
    3.86  
    3.87 -  VirtProcr *
    3.88 -SSR__create_procr_with( VirtProcrFnPtr fnPtr, void *initData,
    3.89 -                          VirtProcr *creatingPr );
    3.90 +  SlaveVP *
    3.91 +SSR__create_procr_with( TopLevelFnPtr fnPtr, void *initData,
    3.92 +                          SlaveVP *creatingSlv );
    3.93  
    3.94 -  VirtProcr *
    3.95 -SSR__create_procr_with_affinity( VirtProcrFnPtr fnPtr,    void *initData,
    3.96 -                            VirtProcr *creatingPr, int32 coreToScheduleOnto);
    3.97 +  SlaveVP *
    3.98 +SSR__create_procr_with_affinity( TopLevelFnPtr fnPtr,    void *initData,
    3.99 +                            SlaveVP *creatingPr, int32 coreToScheduleOnto);
   3.100  
   3.101  void
   3.102 -SSR__dissipate_procr( VirtProcr *procrToDissipate );
   3.103 +SSR__dissipate_procr( SlaveVP *procrToDissipate );
   3.104  
   3.105  //=======================
   3.106  void *
   3.107 -SSR__malloc_to( int numBytes, VirtProcr *ownerPr );
   3.108 +SSR__malloc_to( int numBytes, SlaveVP *ownerSlv );
   3.109  
   3.110  void
   3.111 -SSR__free( void *ptrToFree, VirtProcr *owningPr );
   3.112 +SSR__free( void *ptrToFree, SlaveVP *owningSlv );
   3.113  
   3.114  void
   3.115 -SSR__transfer_ownership_of_from_to( void *data, VirtProcr *oldOwnerPr,
   3.116 -                                                    VirtProcr *newOwnerPr );
   3.117 +SSR__transfer_ownership_of_from_to( void *data, SlaveVP *oldOwnerPr,
   3.118 +                                                    SlaveVP *newOwnerSlv );
   3.119                                                      
   3.120  void
   3.121 -SSR__add_ownership_by_to( VirtProcr *newOwnerPr, void *data );
   3.122 +SSR__add_ownership_by_to( SlaveVP *newOwnerPr, void *data );
   3.123  
   3.124  void
   3.125 -SSR__remove_ownership_by_from( VirtProcr *loserPr, void *dataLosing );
   3.126 +SSR__remove_ownership_by_from( SlaveVP *loserPr, void *dataLosing );
   3.127  
   3.128  void
   3.129  SSR__transfer_ownership_to_outside( void *dataToTransferOwnershipOf );
   3.130 @@ -204,52 +204,52 @@
   3.131  
   3.132  //=======================
   3.133  void
   3.134 -SSR__send_of_type_to( VirtProcr *sendPr, void *msg, const int type,
   3.135 -                        VirtProcr *receivePr);
   3.136 +SSR__send_of_type_to( SlaveVP *sendPr, void *msg, const int type,
   3.137 +                        SlaveVP *receivePr);
   3.138  
   3.139  void
   3.140 -SSR__send_from_to( void *msg, VirtProcr *sendPr, VirtProcr *receivePr);
   3.141 +SSR__send_from_to( void *msg, SlaveVP *sendPr, SlaveVP *receivePr);
   3.142  
   3.143  void *
   3.144 -SSR__receive_type_to( const int type, VirtProcr *receivePr );
   3.145 +SSR__receive_type_to( const int type, SlaveVP *receiveSlv );
   3.146  
   3.147  void *
   3.148 -SSR__receive_from_to( VirtProcr *sendPr, VirtProcr *receivePr );
   3.149 +SSR__receive_from_to( SlaveVP *sendPr, SlaveVP *receiveSlv );
   3.150  
   3.151  
   3.152  //======================= Concurrency Stuff ======================
   3.153  void
   3.154 -SSR__start_fn_singleton( int32 singletonID, VirtProcr *animPr );
   3.155 +SSR__start_fn_singleton( int32 singletonID, SlaveVP *animSlv );
   3.156  
   3.157  void
   3.158 -SSR__end_fn_singleton( int32 singletonID, VirtProcr *animPr );
   3.159 +SSR__end_fn_singleton( int32 singletonID, SlaveVP *animSlv );
   3.160  
   3.161  void
   3.162 -SSR__start_data_singleton( SSRSingleton **singeltonAddr, VirtProcr *animPr );
   3.163 +SSR__start_data_singleton( SSRSingleton **singeltonAddr, SlaveVP *animSlv );
   3.164  
   3.165  void
   3.166 -SSR__end_data_singleton( SSRSingleton **singletonAddr, VirtProcr *animPr );
   3.167 +SSR__end_data_singleton( SSRSingleton **singletonAddr, SlaveVP *animSlv );
   3.168  
   3.169  void
   3.170  SSR__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   3.171 -                                    void *data, VirtProcr *animPr );
   3.172 +                                    void *data, SlaveVP *animSlv );
   3.173  
   3.174  void
   3.175 -SSR__start_transaction( int32 transactionID, VirtProcr *animPr );
   3.176 +SSR__start_transaction( int32 transactionID, SlaveVP *animSlv );
   3.177  
   3.178  void
   3.179 -SSR__end_transaction( int32 transactionID, VirtProcr *animPr );
   3.180 +SSR__end_transaction( int32 transactionID, SlaveVP *animSlv );
   3.181  
   3.182  
   3.183  //=========================  Internal use only  =============================
   3.184  void
   3.185 -SSR__Request_Handler( VirtProcr *requestingPr, void *_semEnv );
   3.186 +SSR__Request_Handler( SlaveVP *requestingPr, void *_semEnv );
   3.187  
   3.188 -VirtProcr *
   3.189 -SSR__schedule_virt_procr( void *_semEnv, int coreNum, int slotNum );
   3.190 +SlaveVP *
   3.191 +SSR__schedule_slaveVP( void *_semEnv, int coreNum, int slotNum );
   3.192  
   3.193 -VirtProcr*
   3.194 -SSR__create_procr_helper( VirtProcrFnPtr fnPtr, void *initData,
   3.195 +SlaveVP*
   3.196 +SSR__create_procr_helper( TopLevelFnPtr fnPtr, void *initData,
   3.197                            SSRSemEnv *semEnv,    int32 coreToScheduleOnto );
   3.198  
   3.199  #endif	/* _SSR_H */
     4.1 --- a/SSR_Counter_Recording.c	Fri Mar 09 19:01:21 2012 +0100
     4.2 +++ b/SSR_Counter_Recording.c	Fri Mar 09 22:28:08 2012 -0800
     4.3 @@ -1,7 +1,14 @@
     4.4 +/*
     4.5 + * 
     4.6 + * author: Nina Engelhardt
     4.7 + */
     4.8 +
     4.9  #include "SSR_Counter_Recording.h"
    4.10 -#include "../VMS_impl/VMS.h"
    4.11 +#include "VMS_impl/VMS.h"
    4.12  #include "SSR.h"
    4.13  
    4.14 +#ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    4.15 +
    4.16  void SSR__init_counter_data_structs(){
    4.17      SSRSemEnv *semanticEnv = _VMSMasterEnv->semanticEnv;
    4.18      int i;
    4.19 @@ -21,7 +28,7 @@
    4.20      list->next_free_index++; 
    4.21  }
    4.22  
    4.23 -void SSR__counter_handler(int evt_type, int vpid, int task, VirtProcr* pr, uint64 cycles, uint64 instrs)
    4.24 +void SSR__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs)
    4.25  {
    4.26      
    4.27      if (pr->isMasterVP || pr->isShutdownVP) { //Save only values for actual work
    4.28 @@ -109,4 +116,5 @@
    4.29           fprintf(counterfile,", %d",e->coreID);
    4.30       fprintf(counterfile,"\n");
    4.31       fflush(counterfile);
    4.32 -}
    4.33 \ No newline at end of file
    4.34 +}
    4.35 +#endif
     5.1 --- a/SSR_Counter_Recording.h	Fri Mar 09 19:01:21 2012 +0100
     5.2 +++ b/SSR_Counter_Recording.h	Fri Mar 09 22:28:08 2012 -0800
     5.3 @@ -8,7 +8,7 @@
     5.4  #ifndef SSR_COUNTER_RECORDING_H
     5.5  #define	SSR_COUNTER_RECORDING_H
     5.6  
     5.7 -#include "../VMS_impl/VMS.h"
     5.8 +#include "VMS_impl/VMS.h"
     5.9  
    5.10  typedef struct {
    5.11     int event_type;
    5.12 @@ -24,7 +24,7 @@
    5.13  
    5.14  void SSR__init_counter_data_structs();
    5.15  
    5.16 -void SSR__counter_handler(int evt_type, int vpid, int task, VirtProcr* pr, uint64 cycles, uint64 instrs);
    5.17 +void SSR__counter_handler(int evt_type, int vpid, int task, SlaveVP* pr, uint64 cycles, uint64 instrs);
    5.18  
    5.19  void set_counter_file(FILE* f);
    5.20  
     6.1 --- a/SSR_PluginFns.c	Fri Mar 09 19:01:21 2012 +0100
     6.2 +++ b/SSR_PluginFns.c	Fri Mar 09 22:28:08 2012 -0800
     6.3 @@ -13,16 +13,16 @@
     6.4  
     6.5  //=========================== Local Fn Prototypes ===========================
     6.6  void
     6.7 -resume_procr( VirtProcr *procr, SSRSemEnv *semEnv );
     6.8 +resume_slaveVP( SlaveVP *procr, SSRSemEnv *semEnv );
     6.9  
    6.10  void
    6.11 -handleSemReq( VMSReqst *req, VirtProcr *requestingPr, SSRSemEnv *semEnv );
    6.12 +handleSemReq( VMSReqst *req, SlaveVP *requestingPr, SSRSemEnv *semEnv );
    6.13  
    6.14  void
    6.15 -handleDissipate(             VirtProcr *requestingPr, SSRSemEnv *semEnv );
    6.16 +handleDissipate(             SlaveVP *requestingPr, SSRSemEnv *semEnv );
    6.17  
    6.18  void
    6.19 -handleCreate( VMSReqst *req, VirtProcr *requestingPr, SSRSemEnv *semEnv  );
    6.20 +handleCreate( VMSReqst *req, SlaveVP *requestingPr, SSRSemEnv *semEnv  );
    6.21  
    6.22  
    6.23  //============================== Scheduler ==================================
    6.24 @@ -35,9 +35,9 @@
    6.25   */
    6.26  char __Scheduler[] = "FIFO Scheduler"; //Gobal variable for name in saved histogram
    6.27  
    6.28 -VirtProcr *
    6.29 -SSR__schedule_virt_procr( void *_semEnv, int coreNum, int slotNum )
    6.30 - { VirtProcr   *schedPr;
    6.31 +SlaveVP *
    6.32 +SSR__schedule_slaveVP( void *_semEnv, int coreNum, int slotNum )
    6.33 + { SlaveVP   *schedPr;
    6.34     SSRSemEnv *semEnv;
    6.35  
    6.36     semEnv  = (SSRSemEnv *)_semEnv;
    6.37 @@ -47,24 +47,24 @@
    6.38    if(!schedPr){
    6.39         schedPr = semEnv->idlePr[coreNum][slotNum];
    6.40       //things that would normally happen in resume(), but these VPs never go there
    6.41 -     #ifdef OBSERVE_UCC
    6.42 +     #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    6.43          schedPr->numTimesScheduled++; //Somewhere here!
    6.44          Unit newu;
    6.45 -        newu.vp = schedPr->procrID;
    6.46 +        newu.vp = schedPr->slaveID;
    6.47          newu.task = schedPr->numTimesScheduled;
    6.48          addToListOfArrays(Unit,newu,semEnv->unitList);
    6.49     
    6.50          if (schedPr->numTimesScheduled > 1){
    6.51                  Dependency newd;
    6.52 -                newd.from_vp = schedPr->procrID;
    6.53 +                newd.from_vp = schedPr->slaveID;
    6.54                  newd.from_task = schedPr->numTimesScheduled - 1;
    6.55 -                newd.to_vp = schedPr->procrID;
    6.56 +                newd.to_vp = schedPr->slaveID;
    6.57                  newd.to_task = schedPr->numTimesScheduled;
    6.58                  addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
    6.59          }
    6.60        #endif
    6.61     }
    6.62 -   #ifdef OBSERVE_UCC
    6.63 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    6.64     if (schedPr) {
    6.65          //schedPr->numTimesScheduled++;
    6.66          Unit prev_in_slot = semEnv->last_in_slot[coreNum * NUM_SCHED_SLOTS + slotNum];
    6.67 @@ -72,11 +72,11 @@
    6.68                  Dependency newd;
    6.69                  newd.from_vp = prev_in_slot.vp;
    6.70                  newd.from_task = prev_in_slot.task;
    6.71 -                newd.to_vp = schedPr->procrID;
    6.72 +                newd.to_vp = schedPr->slaveID;
    6.73                  newd.to_task = schedPr->numTimesScheduled;
    6.74                  addToListOfArrays(Dependency,newd,semEnv->hwArcs);   
    6.75          }
    6.76 -        prev_in_slot.vp = schedPr->procrID;
    6.77 +        prev_in_slot.vp = schedPr->slaveID;
    6.78          prev_in_slot.task = schedPr->numTimesScheduled;
    6.79          semEnv->last_in_slot[coreNum * NUM_SCHED_SLOTS + slotNum] = prev_in_slot;        
    6.80     }
    6.81 @@ -98,13 +98,13 @@
    6.82   *  Processor, and initial data.
    6.83   */
    6.84  void
    6.85 -SSR__Request_Handler( VirtProcr *requestingPr, void *_semEnv )
    6.86 +SSR__Request_Handler( SlaveVP *requestingPr, void *_semEnv )
    6.87   { SSRSemEnv *semEnv;
    6.88     VMSReqst    *req;
    6.89     
    6.90     semEnv = (SSRSemEnv *)_semEnv;
    6.91  
    6.92 -   req    = VMS__take_next_request_out_of( requestingPr );
    6.93 +   req    = VMS_PI__take_next_request_out_of( requestingPr );
    6.94  
    6.95     while( req != NULL )
    6.96      {
    6.97 @@ -115,24 +115,24 @@
    6.98              break;
    6.99           case dissipate:    handleDissipate(           requestingPr, semEnv);
   6.100              break;
   6.101 -         case VMSSemantic:  VMS__handle_VMSSemReq(req, requestingPr, semEnv,
   6.102 -                                                                  &resume_procr);
   6.103 +         case VMSSemantic:  VMS_PI__handle_VMSSemReq(req, requestingPr, semEnv,
   6.104 +                                                               &resume_slaveVP);
   6.105              break;
   6.106           default:
   6.107              break;
   6.108         }
   6.109        
   6.110 -      req = VMS__take_next_request_out_of( requestingPr );
   6.111 +      req = VMS_PI__take_next_request_out_of( requestingPr );
   6.112      } //while( req != NULL )
   6.113  
   6.114   }
   6.115  
   6.116  
   6.117  void
   6.118 -handleSemReq( VMSReqst *req, VirtProcr *reqPr, SSRSemEnv *semEnv )
   6.119 +handleSemReq( VMSReqst *req, SlaveVP *reqPr, SSRSemEnv *semEnv )
   6.120   { SSRSemReq *semReq;
   6.121  
   6.122 -   semReq = VMS__take_sem_reqst_from(req);
   6.123 +   semReq = VMS_PI__take_sem_reqst_from(req);
   6.124     if( semReq == NULL ) return;
   6.125     switch( semReq->reqType )  //sem handlers are all in other file
   6.126      {
   6.127 @@ -174,43 +174,43 @@
   6.128  //=========================== VMS Request Handlers ==============================
   6.129  //
   6.130  void
   6.131 -handleDissipate( VirtProcr *requestingPr, SSRSemEnv *semEnv )
   6.132 +handleDissipate( SlaveVP *requestingPr, SSRSemEnv *semEnv )
   6.133   {
   6.134 -    DEBUG1(dbgRqstHdlr,"Dissipate request from processor %d\n",requestingPr->procrID)
   6.135 +    DEBUG1(dbgRqstHdlr,"Dissipate request from processor %d\n",requestingPr->slaveID)
   6.136        //free any semantic data allocated to the virt procr
   6.137 -   VMS__free( requestingPr->semanticData );
   6.138 +   VMS_PI__free( requestingPr->semanticData );
   6.139  
   6.140        //Now, call VMS to free_all AppVP state -- stack and so on
   6.141 -   VMS__dissipate_procr( requestingPr );
   6.142 +   VMS_PI__dissipate_slaveVP( requestingPr );
   6.143  
   6.144 -   semEnv->numVirtPr -= 1;
   6.145 -   if( semEnv->numVirtPr == 0 )
   6.146 +   semEnv->numSlaveVP -= 1;
   6.147 +   if( semEnv->numSlaveVP == 0 )
   6.148      {    //no more work, so shutdown
   6.149 -      VMS__shutdown();
   6.150 +      VMS_SS__shutdown();
   6.151      }
   6.152   }
   6.153  
   6.154  /*Re-use this in the entry-point fn
   6.155   */
   6.156 -  VirtProcr *
   6.157 -SSR__create_procr_helper( VirtProcrFnPtr fnPtr, void *initData,
   6.158 +  SlaveVP *
   6.159 +SSR__create_procr_helper( TopLevelFnPtr fnPtr, void *initData,
   6.160                            SSRSemEnv *semEnv,    int32 coreToScheduleOnto )
   6.161 - { VirtProcr    *newPr;
   6.162 + { SlaveVP    *newPr;
   6.163     SSRSemData   *semData;
   6.164  
   6.165        //This is running in master, so use internal version
   6.166 -   newPr = VMS__create_procr( fnPtr, initData );
   6.167 +   newPr = VMS_PI__create_slaveVP( fnPtr, initData );
   6.168  
   6.169 -   semEnv->numVirtPr += 1;
   6.170 +   semEnv->numSlaveVP += 1;
   6.171  
   6.172 -   semData = VMS__malloc( sizeof(SSRSemData) );
   6.173 +   semData = VMS_PI__malloc( sizeof(SSRSemData) );
   6.174     semData->highestTransEntered = -1;
   6.175     semData->lastTransEntered    = NULL;
   6.176  
   6.177     newPr->semanticData = semData;
   6.178  
   6.179     //=================== Assign new processor to a core =====================
   6.180 -   #ifdef SEQUENTIAL
   6.181 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
   6.182     newPr->coreAnimatedBy = 0;
   6.183  
   6.184     #else
   6.185 @@ -234,24 +234,24 @@
   6.186   }
   6.187  
   6.188  void
   6.189 -handleCreate( VMSReqst *req, VirtProcr *requestingPr, SSRSemEnv *semEnv  )
   6.190 +handleCreate( VMSReqst *req, SlaveVP *requestingPr, SSRSemEnv *semEnv  )
   6.191   { SSRSemReq *semReq;
   6.192 -   VirtProcr    *newPr;
   6.193 +   SlaveVP    *newPr;
   6.194     
   6.195 -   DEBUG1(dbgRqstHdlr,"Create request from processor %d ",requestingPr->procrID)
   6.196 +   DEBUG1(dbgRqstHdlr,"Create request from processor %d ",requestingPr->slaveID)
   6.197     
   6.198 -   semReq = VMS__take_sem_reqst_from( req );
   6.199 +   semReq = VMS_PI__take_sem_reqst_from( req );
   6.200   
   6.201     newPr = SSR__create_procr_helper( semReq->fnPtr, semReq->initData, semEnv,
   6.202                                       semReq->coreToScheduleOnto );
   6.203     
   6.204 -   DEBUG1(dbgRqstHdlr,"(new VP: %d)\n",newPr->procrID)
   6.205 +   DEBUG1(dbgRqstHdlr,"(new VP: %d)\n",newPr->slaveID)
   6.206  
   6.207 -   #ifdef OBSERVE_UCC
   6.208 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   6.209     Dependency newd;
   6.210 -   newd.from_vp = requestingPr->procrID;
   6.211 +   newd.from_vp = requestingPr->slaveID;
   6.212     newd.from_task = requestingPr->numTimesScheduled;
   6.213 -   newd.to_vp = newPr->procrID;
   6.214 +   newd.to_vp = newPr->slaveID;
   6.215     newd.to_task = 1;
   6.216     //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   6.217     addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   6.218 @@ -260,34 +260,34 @@
   6.219        //For SSR, caller needs ptr to created processor returned to it
   6.220     requestingPr->dataRetFromReq = newPr;
   6.221  
   6.222 -   resume_procr( newPr,        semEnv );
   6.223 -   resume_procr( requestingPr, semEnv );
   6.224 +   resume_slaveVP( newPr,        semEnv );
   6.225 +   resume_slaveVP( requestingPr, semEnv );
   6.226   }
   6.227  
   6.228  
   6.229  //=========================== Helper ==============================
   6.230  void
   6.231 -resume_procr( VirtProcr *procr, SSRSemEnv *semEnv )
   6.232 +resume_slaveVP( SlaveVP *procr, SSRSemEnv *semEnv )
   6.233   {
   6.234 -   #ifdef MEAS__PERF_COUNTERS
   6.235 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
   6.236  /*
   6.237     int lastRecordIdx = procr->counter_history_array_info->numInArray -1;
   6.238     CounterRecord* lastRecord = procr->counter_history[lastRecordIdx];
   6.239     saveLowTimeStampCountInto(lastRecord->unblocked_timestamp);
   6.240  */
   6.241     #endif
   6.242 -   #ifdef OBSERVE_UCC
   6.243 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   6.244     procr->numTimesScheduled++; //Somewhere here!
   6.245     Unit newu;
   6.246 -   newu.vp = procr->procrID;
   6.247 +   newu.vp = procr->slaveID;
   6.248     newu.task = procr->numTimesScheduled;
   6.249     addToListOfArrays(Unit,newu,semEnv->unitList);
   6.250     
   6.251     if (procr->numTimesScheduled > 1){
   6.252          Dependency newd;
   6.253 -        newd.from_vp = procr->procrID;
   6.254 +        newd.from_vp = procr->slaveID;
   6.255          newd.from_task = procr->numTimesScheduled - 1;
   6.256 -        newd.to_vp = procr->procrID;
   6.257 +        newd.to_vp = procr->slaveID;
   6.258          newd.to_task = procr->numTimesScheduled;
   6.259          addToListOfArrays(Dependency, newd ,semEnv->ctlDependenciesList);  
   6.260     }
     7.1 --- a/SSR_Request_Handlers.c	Fri Mar 09 19:01:21 2012 +0100
     7.2 +++ b/SSR_Request_Handlers.c	Fri Mar 09 22:28:08 2012 -0800
     7.3 @@ -7,16 +7,16 @@
     7.4  #include <stdio.h>
     7.5  #include <stdlib.h>
     7.6  
     7.7 -#include "../VMS_impl/VMS.h"
     7.8 -#include "../../C_Libraries/Queue_impl/PrivateQueue.h"
     7.9 -#include "../../C_Libraries/Hash_impl/PrivateHash.h"
    7.10 +#include "VMS_impl/VMS.h"
    7.11 +#include "Queue_impl/PrivateQueue.h"
    7.12 +#include "Hash_impl/PrivateHash.h"
    7.13  #include "SSR.h"
    7.14  
    7.15  
    7.16  
    7.17  //=========================== Local Fn Prototypes ===========================
    7.18  void
    7.19 -resume_procr( VirtProcr *procr, SSRSemEnv *semEnv );
    7.20 +resume_slaveVP( SlaveVP *procr, SSRSemEnv *semEnv );
    7.21  
    7.22  
    7.23  
    7.24 @@ -29,7 +29,7 @@
    7.25  cloneReq( SSRSemReq *semReq )
    7.26   { SSRSemReq *clonedReq;
    7.27  
    7.28 -   clonedReq             = VMS__malloc( sizeof(SSRSemReq) );
    7.29 +   clonedReq             = VMS_PI__malloc( sizeof(SSRSemReq) );
    7.30     clonedReq->reqType    = semReq->reqType;
    7.31     clonedReq->sendPr     = semReq->sendPr;
    7.32     clonedReq->msg        = semReq->msg;
    7.33 @@ -83,13 +83,13 @@
    7.34   */
    7.35  void
    7.36  handleSendType( SSRSemReq *semReq, SSRSemEnv *semEnv )
    7.37 - { VirtProcr   *sendPr, *receivePr;
    7.38 + { SlaveVP   *sendPr, *receivePr;
    7.39     int          key[] = {0,0,0};
    7.40     SSRSemReq *waitingReq;
    7.41     HashEntry   *entry;
    7.42     HashTable   *commHashTbl = semEnv->commHashTbl;
    7.43     
    7.44 -   DEBUG1(dbgRqstHdlr,"SendType request from processor %d\n",semReq->sendPr->procrID)
    7.45 +   DEBUG1(dbgRqstHdlr,"SendType request from processor %d\n",semReq->sendPr->slaveID)
    7.46   
    7.47     receivePr = semReq->receivePr; //For "send", know both send & recv procrs
    7.48     sendPr    = semReq->sendPr;
    7.49 @@ -101,7 +101,7 @@
    7.50           //  list when multiple have the same key.
    7.51  
    7.52        //TODO: use a faster hash function -- see notes in intelligence gather
    7.53 -   key[0] = (int)receivePr->procrID;
    7.54 +   key[0] = (int)receivePr->slaveID;
    7.55     key[1] = (int)(semReq->msgType);
    7.56   //key[2] acts as the 0 that terminates the string
    7.57  
    7.58 @@ -126,11 +126,11 @@
    7.59      }
    7.60     else
    7.61      {    
    7.62 -       #ifdef OBSERVE_UCC
    7.63 +       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
    7.64          Dependency newd;
    7.65 -        newd.from_vp = sendPr->procrID;
    7.66 +        newd.from_vp = sendPr->slaveID;
    7.67          newd.from_task = sendPr->numTimesScheduled;
    7.68 -        newd.to_vp = receivePr->procrID;
    7.69 +        newd.to_vp = receivePr->slaveID;
    7.70          newd.to_task = receivePr->numTimesScheduled +1;
    7.71          //(newd,semEnv->commDependenciesList);  
    7.72          addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList);  
    7.73 @@ -142,10 +142,10 @@
    7.74              semEnv->ntonGroups[groupId] = new_NtoN(groupId);
    7.75          }
    7.76          Unit u;
    7.77 -        u.vp = sendPr->procrID;
    7.78 +        u.vp = sendPr->slaveID;
    7.79          u.task = sendPr->numTimesScheduled;
    7.80          addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
    7.81 -        u.vp = receivePr->procrID;
    7.82 +        u.vp = receivePr->slaveID;
    7.83          u.task = receivePr->numTimesScheduled +1;
    7.84          addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
    7.85         #endif
    7.86 @@ -153,7 +153,7 @@
    7.87         //waiting request is a receive, so it pairs to this send
    7.88           //First, remove the waiting receive request from the entry
    7.89        entry->content = waitingReq->nextReqInHashEntry;
    7.90 -      VMS__free( waitingReq ); //Don't use contents -- so free it
    7.91 +      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
    7.92        
    7.93        if( entry->content == NULL )
    7.94         {    //TODO: mod hash table to double-link, so can delete entry from
    7.95 @@ -166,8 +166,8 @@
    7.96        receivePr->dataRetFromReq = semReq->msg;
    7.97  
    7.98           //bring both processors back from suspend
    7.99 -      resume_procr( sendPr,    semEnv );
   7.100 -      resume_procr( receivePr, semEnv );
   7.101 +      resume_slaveVP( sendPr,    semEnv );
   7.102 +      resume_slaveVP( receivePr, semEnv );
   7.103  
   7.104        return;
   7.105      }
   7.106 @@ -179,20 +179,20 @@
   7.107  //TODO: combine both send handlers into single handler
   7.108  void
   7.109  handleSendFromTo( SSRSemReq *semReq, SSRSemEnv *semEnv)
   7.110 - { VirtProcr   *sendPr, *receivePr;
   7.111 + { SlaveVP   *sendPr, *receivePr;
   7.112     int          key[] = {0,0,0};
   7.113     SSRSemReq *waitingReq;
   7.114     HashEntry   *entry;
   7.115     HashTable   *commHashTbl = semEnv->commHashTbl;
   7.116  
   7.117 -   DEBUG2(dbgRqstHdlr,"SendFromTo request from processor %d to %d\n",semReq->sendPr->procrID,semReq->receivePr->procrID)
   7.118 +   DEBUG2(dbgRqstHdlr,"SendFromTo request from processor %d to %d\n",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
   7.119     
   7.120     receivePr = semReq->receivePr; //For "send", know both send & recv procrs
   7.121     sendPr    = semReq->sendPr;    
   7.122     
   7.123         
   7.124 -   key[0] = (int)receivePr->procrID;
   7.125 -   key[1] = (int)sendPr->procrID;
   7.126 +   key[0] = (int)receivePr->slaveID;
   7.127 +   key[1] = (int)sendPr->slaveID;
   7.128   //key[2] acts at the 0 that terminates the string
   7.129  
   7.130     entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
   7.131 @@ -206,18 +206,18 @@
   7.132      }
   7.133     else
   7.134      {    //waiting request is a receive, so it completes pair with this send
   7.135 -      #ifdef OBSERVE_UCC
   7.136 +      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.137          Dependency newd;
   7.138 -        newd.from_vp = sendPr->procrID;
   7.139 +        newd.from_vp = sendPr->slaveID;
   7.140          newd.from_task = sendPr->numTimesScheduled;
   7.141 -        newd.to_vp = receivePr->procrID;
   7.142 +        newd.to_vp = receivePr->slaveID;
   7.143          newd.to_task = receivePr->numTimesScheduled +1;
   7.144          //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   7.145          addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   7.146        #endif 
   7.147           //First, remove the waiting receive request from the entry
   7.148        entry->content = waitingReq->nextReqInHashEntry;
   7.149 -      VMS__free( waitingReq ); //Don't use contents -- so free it
   7.150 +      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
   7.151        
   7.152           //can only be one waiting req for "from-to" semantics
   7.153        if( entry->content != NULL )
   7.154 @@ -231,8 +231,8 @@
   7.155        receivePr->dataRetFromReq = semReq->msg;
   7.156  
   7.157           //bring both processors back from suspend
   7.158 -      resume_procr( sendPr,    semEnv );
   7.159 -      resume_procr( receivePr, semEnv );
   7.160 +      resume_slaveVP( sendPr,    semEnv );
   7.161 +      resume_slaveVP( receivePr, semEnv );
   7.162              
   7.163        return;
   7.164      }
   7.165 @@ -277,7 +277,7 @@
   7.166  
   7.167  void
   7.168  handleReceiveType( SSRSemReq *semReq, SSRSemEnv *semEnv)
   7.169 - { VirtProcr   *sendPr, *receivePr;
   7.170 + { SlaveVP   *sendPr, *receivePr;
   7.171     int          key[] = {0,0,0};
   7.172     SSRSemReq *waitingReq;
   7.173     HashEntry   *entry;
   7.174 @@ -285,9 +285,9 @@
   7.175  
   7.176     receivePr = semReq->receivePr;
   7.177  
   7.178 -   DEBUG1(dbgRqstHdlr,"ReceiveType request from processor %d\n",receivePr->procrID)
   7.179 +   DEBUG1(dbgRqstHdlr,"ReceiveType request from processor %d\n",receivePr->slaveID)
   7.180     
   7.181 -   key[0] = (int)receivePr->procrID;
   7.182 +   key[0] = (int)receivePr->slaveID;
   7.183     key[1] = (int)(semReq->msgType);
   7.184   //key[2] acts as the 0 that terminates the string
   7.185  
   7.186 @@ -313,13 +313,13 @@
   7.187  
   7.188           //bring both processors back from suspend
   7.189        sendPr = waitingReq->sendPr;
   7.190 -      VMS__free( waitingReq );
   7.191 +      VMS_PI__free( waitingReq );
   7.192  
   7.193 -       #ifdef OBSERVE_UCC
   7.194 +       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.195          Dependency newd;
   7.196 -        newd.from_vp = sendPr->procrID;
   7.197 +        newd.from_vp = sendPr->slaveID;
   7.198          newd.from_task = sendPr->numTimesScheduled;
   7.199 -        newd.to_vp = receivePr->procrID;
   7.200 +        newd.to_vp = receivePr->slaveID;
   7.201          newd.to_task = receivePr->numTimesScheduled +1;
   7.202          //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   7.203          addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList); 
   7.204 @@ -331,16 +331,16 @@
   7.205              semEnv->ntonGroups[groupId] = new_NtoN(groupId);
   7.206          }
   7.207          Unit u;
   7.208 -        u.vp = sendPr->procrID;
   7.209 +        u.vp = sendPr->slaveID;
   7.210          u.task = sendPr->numTimesScheduled;
   7.211          addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
   7.212 -        u.vp = receivePr->procrID;
   7.213 +        u.vp = receivePr->slaveID;
   7.214          u.task = receivePr->numTimesScheduled +1;
   7.215          addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
   7.216         #endif
   7.217        
   7.218 -      resume_procr( sendPr,    semEnv );
   7.219 -      resume_procr( receivePr, semEnv );
   7.220 +      resume_slaveVP( sendPr,    semEnv );
   7.221 +      resume_slaveVP( receivePr, semEnv );
   7.222  
   7.223        return;
   7.224      }
   7.225 @@ -352,19 +352,19 @@
   7.226   */
   7.227  void
   7.228  handleReceiveFromTo( SSRSemReq *semReq, SSRSemEnv *semEnv)
   7.229 - { VirtProcr   *sendPr, *receivePr;
   7.230 + { SlaveVP   *sendPr, *receivePr;
   7.231     int          key[] = {0,0,0};
   7.232     SSRSemReq *waitingReq;
   7.233     HashEntry   *entry;
   7.234     HashTable   *commHashTbl = semEnv->commHashTbl;
   7.235  
   7.236 -   DEBUG2(dbgRqstHdlr,"ReceiveFromTo request from processor %d to %d\n",semReq->sendPr->procrID,semReq->receivePr->procrID)
   7.237 +   DEBUG2(dbgRqstHdlr,"ReceiveFromTo request from processor %d to %d\n",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
   7.238     
   7.239     receivePr = semReq->receivePr;
   7.240     sendPr    = semReq->sendPr;    //for receive from-to, know send procr
   7.241  
   7.242 -   key[0] = (int)receivePr->procrID;
   7.243 -   key[1] = (int)sendPr->procrID;
   7.244 +   key[0] = (int)receivePr->slaveID;
   7.245 +   key[1] = (int)sendPr->slaveID;
   7.246   //key[2] acts at the 0 that terminates the string
   7.247  
   7.248     entry = giveEntryElseInsertReqst( (char *)key, semReq, commHashTbl);
   7.249 @@ -375,11 +375,11 @@
   7.250        //At this point, know have waiting request(s) -- should be send(s)
   7.251     if( waitingReq->reqType == send_from_to )
   7.252      {    //waiting request is a send, so pair it with this receive
   7.253 -      #ifdef OBSERVE_UCC
   7.254 +      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   7.255          Dependency newd;
   7.256 -        newd.from_vp = sendPr->procrID;
   7.257 +        newd.from_vp = sendPr->slaveID;
   7.258          newd.from_task = sendPr->numTimesScheduled;
   7.259 -        newd.to_vp = receivePr->procrID;
   7.260 +        newd.to_vp = receivePr->slaveID;
   7.261          newd.to_task = receivePr->numTimesScheduled +1;
   7.262          //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   7.263          addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);    
   7.264 @@ -395,10 +395,10 @@
   7.265  
   7.266           //bring both processors back from suspend
   7.267        sendPr = waitingReq->sendPr;
   7.268 -      VMS__free( waitingReq );
   7.269 +      VMS_PI__free( waitingReq );
   7.270  
   7.271 -      resume_procr( sendPr,    semEnv );
   7.272 -      resume_procr( receivePr, semEnv );
   7.273 +      resume_slaveVP( sendPr,    semEnv );
   7.274 +      resume_slaveVP( receivePr, semEnv );
   7.275  
   7.276        return;
   7.277      }
   7.278 @@ -424,24 +424,24 @@
   7.279  /*
   7.280   */
   7.281  void
   7.282 -handleMalloc( SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv )
   7.283 +handleMalloc( SSRSemReq *semReq, SlaveVP *requestingPr, SSRSemEnv *semEnv )
   7.284   { void *ptr;
   7.285   
   7.286 -   DEBUG1(dbgRqstHdlr,"Malloc request from processor %d\n",requestingPr->procrID)
   7.287 +   DEBUG1(dbgRqstHdlr,"Malloc request from processor %d\n",requestingPr->slaveID)
   7.288  
   7.289 -   ptr = VMS__malloc( semReq->sizeToMalloc );
   7.290 +   ptr = VMS_PI__malloc( semReq->sizeToMalloc );
   7.291     requestingPr->dataRetFromReq = ptr;
   7.292 -   resume_procr( requestingPr, semEnv );
   7.293 +   resume_slaveVP( requestingPr, semEnv );
   7.294   }
   7.295  
   7.296  /*
   7.297   */
   7.298  void
   7.299 -handleFree( SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv )
   7.300 +handleFree( SSRSemReq *semReq, SlaveVP *requestingPr, SSRSemEnv *semEnv )
   7.301   {
   7.302 -    DEBUG1(dbgRqstHdlr,"Free request from processor %d\n",requestingPr->procrID)
   7.303 -   VMS__free( semReq->ptrToFree );
   7.304 -   resume_procr( requestingPr, semEnv );
   7.305 +    DEBUG1(dbgRqstHdlr,"Free request from processor %d\n",requestingPr->slaveID)
   7.306 +   VMS_PI__free( semReq->ptrToFree );
   7.307 +   resume_slaveVP( requestingPr, semEnv );
   7.308   }
   7.309  
   7.310  
   7.311 @@ -451,13 +451,13 @@
   7.312   * end-label.  Else, sets flag and resumes normally.
   7.313   */
   7.314  void inline
   7.315 -handleStartSingleton_helper( SSRSingleton *singleton, VirtProcr *reqstingPr,
   7.316 +handleStartSingleton_helper( SSRSingleton *singleton, SlaveVP *reqstingPr,
   7.317                               SSRSemEnv    *semEnv )
   7.318   {
   7.319     if( singleton->hasFinished )
   7.320      {    //the code that sets the flag to true first sets the end instr addr
   7.321        reqstingPr->dataRetFromReq = singleton->endInstrAddr;
   7.322 -      resume_procr( reqstingPr, semEnv );
   7.323 +      resume_slaveVP( reqstingPr, semEnv );
   7.324        return;
   7.325      }
   7.326     else if( singleton->hasBeenStarted )
   7.327 @@ -469,28 +469,28 @@
   7.328      {    //hasn't been started, so this is the first attempt at the singleton
   7.329        singleton->hasBeenStarted = TRUE;
   7.330        reqstingPr->dataRetFromReq = 0x0;
   7.331 -      resume_procr( reqstingPr, semEnv );
   7.332 +      resume_slaveVP( reqstingPr, semEnv );
   7.333        return;
   7.334      }
   7.335   }
   7.336  void inline
   7.337 -handleStartFnSingleton( SSRSemReq *semReq, VirtProcr *requestingPr,
   7.338 +handleStartFnSingleton( SSRSemReq *semReq, SlaveVP *requestingPr,
   7.339                        SSRSemEnv *semEnv )
   7.340   { SSRSingleton *singleton;
   7.341 - DEBUG1(dbgRqstHdlr,"StartFnSingleton request from processor %d\n",requestingPr->procrID)
   7.342 + DEBUG1(dbgRqstHdlr,"StartFnSingleton request from processor %d\n",requestingPr->slaveID)
   7.343  
   7.344     singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
   7.345     handleStartSingleton_helper( singleton, requestingPr, semEnv );
   7.346   }
   7.347  void inline
   7.348 -handleStartDataSingleton( SSRSemReq *semReq, VirtProcr *requestingPr,
   7.349 +handleStartDataSingleton( SSRSemReq *semReq, SlaveVP *requestingPr,
   7.350                        SSRSemEnv *semEnv )
   7.351   { SSRSingleton *singleton;
   7.352  
   7.353 - DEBUG1(dbgRqstHdlr,"StartDataSingleton request from processor %d\n",requestingPr->procrID)
   7.354 + DEBUG1(dbgRqstHdlr,"StartDataSingleton request from processor %d\n",requestingPr->slaveID)
   7.355     if( *(semReq->singletonPtrAddr) == NULL )
   7.356 -    { singleton                 = VMS__malloc( sizeof(SSRSingleton) );
   7.357 -      singleton->waitQ          = makeVMSPrivQ();
   7.358 +    { singleton                 = VMS_PI__malloc( sizeof(SSRSingleton) );
   7.359 +      singleton->waitQ          = makeVMSQ();
   7.360        singleton->endInstrAddr   = 0x0;
   7.361        singleton->hasBeenStarted = FALSE;
   7.362        singleton->hasFinished    = FALSE;
   7.363 @@ -503,16 +503,16 @@
   7.364  
   7.365  
   7.366  void inline
   7.367 -handleEndSingleton_helper( SSRSingleton *singleton, VirtProcr *requestingPr,
   7.368 +handleEndSingleton_helper( SSRSingleton *singleton, SlaveVP *requestingPr,
   7.369                             SSRSemEnv    *semEnv )
   7.370   { PrivQueueStruc *waitQ;
   7.371     int32           numWaiting, i;
   7.372 -   VirtProcr      *resumingPr;
   7.373 +   SlaveVP      *resumingPr;
   7.374  
   7.375     if( singleton->hasFinished )
   7.376      { //by definition, only one slave should ever be able to run end singleton
   7.377        // so if this is true, is an error
   7.378 -      //VMS__throw_exception( "singleton code ran twice", requestingPr, NULL);
   7.379 +      //VMS_PI__throw_exception( "singleton code ran twice", requestingPr, NULL);
   7.380      }
   7.381  
   7.382     singleton->hasFinished = TRUE;
   7.383 @@ -522,30 +522,30 @@
   7.384      {    //they will resume inside start singleton, then jmp to end singleton
   7.385        resumingPr = readPrivQ( waitQ );
   7.386        resumingPr->dataRetFromReq = singleton->endInstrAddr;
   7.387 -      resume_procr( resumingPr, semEnv );
   7.388 +      resume_slaveVP( resumingPr, semEnv );
   7.389      }
   7.390  
   7.391 -   resume_procr( requestingPr, semEnv );
   7.392 +   resume_slaveVP( requestingPr, semEnv );
   7.393  
   7.394  }
   7.395  void inline
   7.396 -handleEndFnSingleton( SSRSemReq *semReq, VirtProcr *requestingPr,
   7.397 +handleEndFnSingleton( SSRSemReq *semReq, SlaveVP *requestingPr,
   7.398                          SSRSemEnv *semEnv )
   7.399   {
   7.400     SSRSingleton   *singleton;
   7.401  
   7.402 -   DEBUG1(dbgRqstHdlr,"EndFnSingleton request from processor %d\n",requestingPr->procrID)
   7.403 +   DEBUG1(dbgRqstHdlr,"EndFnSingleton request from processor %d\n",requestingPr->slaveID)
   7.404     
   7.405     singleton = &(semEnv->fnSingletons[ semReq->singletonID ]);
   7.406     handleEndSingleton_helper( singleton, requestingPr, semEnv );
   7.407    }
   7.408  void inline
   7.409 -handleEndDataSingleton( SSRSemReq *semReq, VirtProcr *requestingPr,
   7.410 +handleEndDataSingleton( SSRSemReq *semReq, SlaveVP *requestingPr,
   7.411                          SSRSemEnv *semEnv )
   7.412   {
   7.413     SSRSingleton   *singleton;
   7.414  
   7.415 -   DEBUG1(dbgRqstHdlr,"EndDataSingleton request from processor %d\n",requestingPr->procrID)
   7.416 +   DEBUG1(dbgRqstHdlr,"EndDataSingleton request from processor %d\n",requestingPr->slaveID)
   7.417     
   7.418     singleton = *(semReq->singletonPtrAddr);
   7.419     handleEndSingleton_helper( singleton, requestingPr, semEnv );
   7.420 @@ -556,11 +556,11 @@
   7.421   * pointer out of the request and call it, then resume the VP.
   7.422   */
   7.423  void
   7.424 -handleAtomic( SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv )
   7.425 +handleAtomic( SSRSemReq *semReq, SlaveVP *requestingPr, SSRSemEnv *semEnv )
   7.426   {
   7.427 -    DEBUG1(dbgRqstHdlr,"Atomic request from processor %d\n",requestingPr->procrID)
   7.428 +    DEBUG1(dbgRqstHdlr,"Atomic request from processor %d\n",requestingPr->slaveID)
   7.429     semReq->fnToExecInMaster( semReq->dataForFn );
   7.430 -   resume_procr( requestingPr, semEnv );
   7.431 +   resume_slaveVP( requestingPr, semEnv );
   7.432   }
   7.433  
   7.434  /*First, it looks at the VP's semantic data, to see the highest transactionID
   7.435 @@ -578,23 +578,23 @@
   7.436   *If NULL, then write requesting into the field and resume.
   7.437   */
   7.438  void
   7.439 -handleTransStart( SSRSemReq *semReq, VirtProcr *requestingPr,
   7.440 +handleTransStart( SSRSemReq *semReq, SlaveVP *requestingPr,
   7.441                    SSRSemEnv *semEnv )
   7.442   { SSRSemData *semData;
   7.443     TransListElem *nextTransElem;
   7.444  
   7.445 -   DEBUG1(dbgRqstHdlr,"TransStart request from processor %d\n",requestingPr->procrID)
   7.446 +   DEBUG1(dbgRqstHdlr,"TransStart request from processor %d\n",requestingPr->slaveID)
   7.447     
   7.448        //check ordering of entering transactions is correct
   7.449     semData = requestingPr->semanticData;
   7.450     if( semData->highestTransEntered > semReq->transID )
   7.451      {    //throw VMS exception, which shuts down VMS.
   7.452 -      VMS__throw_exception( "transID smaller than prev", requestingPr, NULL);
   7.453 +      VMS_PI__throw_exception( "transID smaller than prev", requestingPr, NULL);
   7.454      }
   7.455        //add this trans ID to the list of transactions entered -- check when
   7.456        // end a transaction
   7.457     semData->highestTransEntered = semReq->transID;
   7.458 -   nextTransElem = VMS__malloc( sizeof(TransListElem) );
   7.459 +   nextTransElem = VMS_PI__malloc( sizeof(TransListElem) );
   7.460     nextTransElem->transID = semReq->transID;
   7.461     nextTransElem->nextTrans = semData->lastTransEntered;
   7.462     semData->lastTransEntered = nextTransElem;
   7.463 @@ -606,7 +606,7 @@
   7.464     if( transStruc->VPCurrentlyExecuting == NULL )
   7.465      {
   7.466        transStruc->VPCurrentlyExecuting = requestingPr;
   7.467 -      resume_procr( requestingPr, semEnv );
   7.468 +      resume_slaveVP( requestingPr, semEnv );
   7.469      }
   7.470     else
   7.471      {    //note, might make future things cleaner if save request with VP and
   7.472 @@ -631,20 +631,20 @@
   7.473   * resume both.
   7.474   */
   7.475  void
   7.476 -handleTransEnd(SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv)
   7.477 +handleTransEnd(SSRSemReq *semReq, SlaveVP *requestingPr, SSRSemEnv *semEnv)
   7.478   { SSRSemData    *semData;
   7.479 -   VirtProcr     *waitingPr;
   7.480 +   SlaveVP     *waitingPr;
   7.481     SSRTrans      *transStruc;
   7.482     TransListElem *lastTrans;
   7.483     
   7.484 -   DEBUG1(dbgRqstHdlr,"TransEnd request from processor %d\n",requestingPr->procrID)
   7.485 +   DEBUG1(dbgRqstHdlr,"TransEnd request from processor %d\n",requestingPr->slaveID)
   7.486     
   7.487     transStruc = &(semEnv->transactionStrucs[ semReq->transID ]);
   7.488  
   7.489        //make sure transaction ended in same VP as started it.
   7.490     if( transStruc->VPCurrentlyExecuting != requestingPr )
   7.491      {
   7.492 -      VMS__throw_exception( "trans ended in diff VP", requestingPr, NULL );
   7.493 +      VMS_PI__throw_exception( "trans ended in diff VP", requestingPr, NULL );
   7.494      }
   7.495  
   7.496        //make sure nesting is correct -- last ID entered should == this ID
   7.497 @@ -652,7 +652,7 @@
   7.498     lastTrans = semData->lastTransEntered;
   7.499     if( lastTrans->transID != semReq->transID )
   7.500      {
   7.501 -      VMS__throw_exception( "trans incorrectly nested", requestingPr, NULL );
   7.502 +      VMS_PI__throw_exception( "trans incorrectly nested", requestingPr, NULL );
   7.503      }
   7.504  
   7.505     semData->lastTransEntered = semData->lastTransEntered->nextTrans;
   7.506 @@ -662,7 +662,7 @@
   7.507     transStruc->VPCurrentlyExecuting = waitingPr;
   7.508  
   7.509     if( waitingPr != NULL )
   7.510 -      resume_procr( waitingPr, semEnv );
   7.511 +      resume_slaveVP( waitingPr, semEnv );
   7.512  
   7.513 -   resume_procr( requestingPr, semEnv );
   7.514 +   resume_slaveVP( requestingPr, semEnv );
   7.515   }
     8.1 --- a/SSR_Request_Handlers.h	Fri Mar 09 19:01:21 2012 +0100
     8.2 +++ b/SSR_Request_Handlers.h	Fri Mar 09 22:28:08 2012 -0800
     8.3 @@ -29,27 +29,27 @@
     8.4  inline void
     8.5  handleTransferOut( SSRSemReq *semReq, SSRSemEnv *semEnv);
     8.6  inline void
     8.7 -handleMalloc( SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv);
     8.8 +handleMalloc( SSRSemReq *semReq, SlaveVP *requestingSlv, SSRSemEnv *semEnv);
     8.9  inline void
    8.10 -handleFree( SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv );
    8.11 +handleFree( SSRSemReq *semReq, SlaveVP *requestingSlv, SSRSemEnv *semEnv );
    8.12  inline void
    8.13 -handleTransEnd(SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv*semEnv);
    8.14 +handleTransEnd(SSRSemReq *semReq, SlaveVP *requestingSlv, SSRSemEnv*semEnv);
    8.15  inline void
    8.16 -handleTransStart( SSRSemReq *semReq, VirtProcr *requestingPr,
    8.17 +handleTransStart( SSRSemReq *semReq, SlaveVP *requestingSlv,
    8.18                    SSRSemEnv *semEnv );
    8.19  inline void
    8.20 -handleAtomic( SSRSemReq *semReq, VirtProcr *requestingPr, SSRSemEnv *semEnv);
    8.21 +handleAtomic( SSRSemReq *semReq, SlaveVP *requestingSlv, SSRSemEnv *semEnv);
    8.22  inline void
    8.23 -handleStartFnSingleton( SSRSemReq *semReq, VirtProcr *reqstingPr,
    8.24 +handleStartFnSingleton( SSRSemReq *semReq, SlaveVP *reqstingSlv,
    8.25                        SSRSemEnv *semEnv );
    8.26  inline void
    8.27 -handleEndFnSingleton( SSRSemReq *semReq, VirtProcr *requestingPr,
    8.28 +handleEndFnSingleton( SSRSemReq *semReq, SlaveVP *requestingSlv,
    8.29                      SSRSemEnv *semEnv );
    8.30  inline void
    8.31 -handleStartDataSingleton( SSRSemReq *semReq, VirtProcr *reqstingPr,
    8.32 +handleStartDataSingleton( SSRSemReq *semReq, SlaveVP *reqstingSlv,
    8.33                        SSRSemEnv *semEnv );
    8.34  inline void
    8.35 -handleEndDataSingleton( SSRSemReq *semReq, VirtProcr *requestingPr,
    8.36 +handleEndDataSingleton( SSRSemReq *semReq, SlaveVP *requestingSlv,
    8.37                      SSRSemEnv *semEnv );
    8.38  
    8.39  #endif	/* _SSR_REQ_H */
     9.1 --- a/SSR_lib.c	Fri Mar 09 19:01:21 2012 +0100
     9.2 +++ b/SSR_lib.c	Fri Mar 09 22:28:08 2012 -0800
     9.3 @@ -91,11 +91,11 @@
     9.4   * any of the data reachable from initData passed in to here
     9.5   */
     9.6  void
     9.7 -SSR__create_seed_procr_and_do_work( VirtProcrFnPtr fnPtr, void *initData )
     9.8 +SSR__create_seed_procr_and_do_work( TopLevelFnPtr fnPtr, void *initData )
     9.9   { SSRSemEnv *semEnv;
    9.10 -   VirtProcr *seedPr;
    9.11 +   SlaveVP *seedPr;
    9.12  
    9.13 -   #ifdef SEQUENTIAL
    9.14 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    9.15     SSR__init_Seq();  //debug sequential exe
    9.16     #else
    9.17     SSR__init();      //normal multi-thd
    9.18 @@ -107,12 +107,12 @@
    9.19     seedPr = SSR__create_procr_helper( fnPtr, initData,
    9.20                                        semEnv, semEnv->nextCoreToGetNewPr++ );
    9.21  
    9.22 -   resume_procr( seedPr, semEnv );
    9.23 +   resume_slaveVP( seedPr, semEnv );
    9.24     
    9.25 -   #ifdef SEQUENTIAL
    9.26 -   VMS__start_the_work_then_wait_until_done_Seq();  //debug sequential exe
    9.27 +   #ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    9.28 +   VMS_SS__start_the_work_then_wait_until_done_Seq();  //debug sequential exe
    9.29     #else
    9.30 -   VMS__start_the_work_then_wait_until_done();      //normal multi-thd
    9.31 +   VMS_SS__start_the_work_then_wait_until_done();      //normal multi-thd
    9.32     #endif
    9.33  
    9.34     SSR__cleanup_after_shutdown();
    9.35 @@ -175,18 +175,18 @@
    9.36  void
    9.37  SSR__init()
    9.38   {
    9.39 -   VMS__init();
    9.40 +   VMS_SS__init();
    9.41        //masterEnv, a global var, now is partially set up by init_VMS
    9.42 -      // after this, have VMS__malloc and VMS__free available
    9.43 +      // after this, have VMS_int__malloc and VMS_int__free available
    9.44  
    9.45     SSR__init_Helper();
    9.46   }
    9.47  
    9.48 -#ifdef SEQUENTIAL
    9.49 +#ifdef DEBUG__TURN_ON_SEQUENTIAL_MODE
    9.50  void
    9.51  SSR__init_Seq()
    9.52   {
    9.53 -   VMS__init_Seq();
    9.54 +   VMS_SS__init_Seq();
    9.55     flushRegisters();
    9.56        //masterEnv, a global var, now is partially set up by init_VMS
    9.57  
    9.58 @@ -194,9 +194,9 @@
    9.59   }
    9.60  #endif
    9.61  
    9.62 -void idle_fn(void* data, VirtProcr *animatingPr){
    9.63 +void idle_fn(void* data, SlaveVP *animatingSlv){
    9.64      while(1){
    9.65 -        VMS__suspend_procr(animatingPr);
    9.66 +        VMS_int__suspend_slaveVP_and_send_req(animatingSlv);
    9.67      }
    9.68  }
    9.69  
    9.70 @@ -208,27 +208,27 @@
    9.71   
    9.72        //Hook up the semantic layer's plug-ins to the Master virt procr
    9.73     _VMSMasterEnv->requestHandler = &SSR__Request_Handler;
    9.74 -   _VMSMasterEnv->slaveScheduler = &SSR__schedule_virt_procr;
    9.75 -   #ifdef MEAS__PERF_COUNTERS
    9.76 +   _VMSMasterEnv->slaveAssigner = &SSR__schedule_slaveVP;
    9.77 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    9.78     _VMSMasterEnv->counterHandler = &SSR__counter_handler;
    9.79     #endif
    9.80  
    9.81        //create the semantic layer's environment (all its data) and add to
    9.82        // the master environment
    9.83 -   semanticEnv = VMS__malloc( sizeof( SSRSemEnv ) );
    9.84 +   semanticEnv = VMS_int__malloc( sizeof( SSRSemEnv ) );
    9.85     _VMSMasterEnv->semanticEnv = semanticEnv;
    9.86     
    9.87 -   #ifdef MEAS__PERF_COUNTERS
    9.88 +   #ifdef HOLISTIC__TURN_ON_PERF_COUNTERS
    9.89     SSR__init_counter_data_structs();
    9.90     #endif
    9.91     for(i=0;i<NUM_CORES;++i){
    9.92         for(j=0;j<NUM_SCHED_SLOTS;++j){
    9.93 -           semanticEnv->idlePr[i][j] = VMS__create_procr(&idle_fn,NULL);
    9.94 +           semanticEnv->idlePr[i][j] = VMS_int__create_slaveVP(&idle_fn,NULL);
    9.95             semanticEnv->idlePr[i][j]->coreAnimatedBy = i;
    9.96         }
    9.97     }
    9.98  
    9.99 -   #ifdef OBSERVE_UCC
   9.100 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   9.101     semanticEnv->unitList = makeListOfArrays(sizeof(Unit),128);
   9.102     semanticEnv->ctlDependenciesList = makeListOfArrays(sizeof(Dependency),128);
   9.103     semanticEnv->commDependenciesList = makeListOfArrays(sizeof(Dependency),128);
   9.104 @@ -243,19 +243,19 @@
   9.105        // and so forth
   9.106        //TODO: add hash tables for pairing sends with receives, and
   9.107        // initialize the data ownership system
   9.108 -   readyVPQs = VMS__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
   9.109 +   readyVPQs = VMS_int__malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
   9.110  
   9.111     for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   9.112      {
   9.113 -      readyVPQs[ coreIdx ] = makeVMSPrivQ();
   9.114 +      readyVPQs[ coreIdx ] = makeVMSQ();
   9.115      }
   9.116     
   9.117     semanticEnv->readyVPQs = readyVPQs;
   9.118     
   9.119     semanticEnv->nextCoreToGetNewPr = 0;
   9.120 -   semanticEnv->numVirtPr = 0;
   9.121 +   semanticEnv->numSlaveVP = 0;
   9.122     
   9.123 -   semanticEnv->commHashTbl  = makeHashTable( 1<<16, &VMS__free );//start big
   9.124 +   semanticEnv->commHashTbl  = makeHashTable( 1<<16, &VMS_int__free );//start big
   9.125  
   9.126     //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
   9.127     //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
   9.128 @@ -265,13 +265,13 @@
   9.129        semanticEnv->fnSingletons[i].endInstrAddr      = NULL;
   9.130        semanticEnv->fnSingletons[i].hasBeenStarted    = FALSE;
   9.131        semanticEnv->fnSingletons[i].hasFinished       = FALSE;
   9.132 -      semanticEnv->fnSingletons[i].waitQ             = makeVMSPrivQ();
   9.133 -      semanticEnv->transactionStrucs[i].waitingVPQ   = makeVMSPrivQ();
   9.134 +      semanticEnv->fnSingletons[i].waitQ             = makeVMSQ();
   9.135 +      semanticEnv->transactionStrucs[i].waitingVPQ   = makeVMSQ();
   9.136      }
   9.137   }
   9.138  
   9.139  
   9.140 -/*Frees any memory allocated by SSR__init() then calls VMS__shutdown
   9.141 +/*Frees any memory allocated by SSR__init() then calls VMS_int__shutdown
   9.142   */
   9.143  void
   9.144  SSR__cleanup_after_shutdown()
   9.145 @@ -279,7 +279,7 @@
   9.146     
   9.147     semanticEnv = _VMSMasterEnv->semanticEnv;
   9.148  
   9.149 -   #ifdef OBSERVE_UCC
   9.150 +   #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   9.151     //UCC
   9.152     FILE* output;
   9.153     int n;
   9.154 @@ -358,7 +358,7 @@
   9.155     freeListOfArrays(semanticEnv->dynDependenciesList);
   9.156     
   9.157     #endif
   9.158 -#ifdef MEAS__PERF_COUNTERS    
   9.159 +#ifdef HOLISTIC__TURN_ON_PERF_COUNTERS    
   9.160      for(n=0;n<255;n++)
   9.161      {
   9.162          sprintf(filename, "./counters/Counters.%d.csv",n);
   9.163 @@ -394,15 +394,15 @@
   9.164  
   9.165     for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   9.166      {
   9.167 -      VMS__free( semanticEnv->readyVPQs[coreIdx]->startOfData );
   9.168 -      VMS__free( semanticEnv->readyVPQs[coreIdx] );
   9.169 +      VMS_int__free( semanticEnv->readyVPQs[coreIdx]->startOfData );
   9.170 +      VMS_int__free( semanticEnv->readyVPQs[coreIdx] );
   9.171      }
   9.172 -   VMS__free( semanticEnv->readyVPQs );
   9.173 +   VMS_int__free( semanticEnv->readyVPQs );
   9.174     
   9.175     freeHashTable( semanticEnv->commHashTbl );
   9.176 -   VMS__free( _VMSMasterEnv->semanticEnv );
   9.177 +   VMS_int__free( _VMSMasterEnv->semanticEnv );
   9.178   */
   9.179 -   VMS__cleanup_at_end_of_shutdown();
   9.180 +   VMS_SS__cleanup_at_end_of_shutdown();
   9.181   }
   9.182  
   9.183  
   9.184 @@ -410,9 +410,9 @@
   9.185  
   9.186  /*
   9.187   */
   9.188 -  VirtProcr *
   9.189 -SSR__create_procr_with( VirtProcrFnPtr fnPtr,   void *initData,
   9.190 -                        VirtProcr *creatingPr )
   9.191 +  SlaveVP *
   9.192 +SSR__create_procr_with( TopLevelFnPtr fnPtr,   void *initData,
   9.193 +                        SlaveVP *creatingPr )
   9.194   { SSRSemReq reqData;
   9.195  
   9.196        //the semantic request data is on the stack and disappears when this
   9.197 @@ -424,14 +424,14 @@
   9.198     reqData.initData           = initData;
   9.199     reqData.sendPr             = creatingPr;
   9.200  
   9.201 -   VMS__send_create_procr_req( &reqData, creatingPr );
   9.202 +   VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
   9.203  
   9.204     return creatingPr->dataRetFromReq;
   9.205   }
   9.206  
   9.207 -  VirtProcr *
   9.208 -SSR__create_procr_with_affinity( VirtProcrFnPtr fnPtr, void *initData,
   9.209 -                        VirtProcr *creatingPr,  int32  coreToScheduleOnto )
   9.210 +  SlaveVP *
   9.211 +SSR__create_procr_with_affinity( TopLevelFnPtr fnPtr, void *initData,
   9.212 +                        SlaveVP *creatingPr,  int32  coreToScheduleOnto )
   9.213   { SSRSemReq  reqData;
   9.214  
   9.215        //the semantic request data is on the stack and disappears when this
   9.216 @@ -443,30 +443,30 @@
   9.217     reqData.initData           = initData;
   9.218     reqData.sendPr             = creatingPr;
   9.219  
   9.220 -   VMS__send_create_procr_req( &reqData, creatingPr );
   9.221 +   VMS_WL__send_create_slaveVP_req( &reqData, creatingPr );
   9.222  
   9.223     return creatingPr->dataRetFromReq;
   9.224   }
   9.225  
   9.226  
   9.227    void
   9.228 -SSR__dissipate_procr( VirtProcr *procrToDissipate )
   9.229 +SSR__dissipate_procr( SlaveVP *procrToDissipate )
   9.230   {
   9.231 -   VMS__send_dissipate_req( procrToDissipate );
   9.232 +   VMS_WL__send_dissipate_req( procrToDissipate );
   9.233   }
   9.234  
   9.235  
   9.236  //===========================================================================
   9.237  
   9.238  void *
   9.239 -SSR__malloc_to( int32 sizeToMalloc, VirtProcr *owningPr )
   9.240 +SSR__malloc_to( int32 sizeToMalloc, SlaveVP *owningPr )
   9.241   { SSRSemReq reqData;
   9.242  
   9.243     reqData.reqType      = malloc_req;
   9.244     reqData.sendPr       = owningPr;
   9.245     reqData.sizeToMalloc = sizeToMalloc;
   9.246  
   9.247 -   VMS__send_sem_request( &reqData, owningPr );
   9.248 +   VMS_WL__send_sem_request( &reqData, owningPr );
   9.249  
   9.250     return owningPr->dataRetFromReq;
   9.251   }
   9.252 @@ -475,20 +475,20 @@
   9.253  /*Sends request to Master, which does the work of freeing
   9.254   */
   9.255  void
   9.256 -SSR__free( void *ptrToFree, VirtProcr *owningPr )
   9.257 +SSR__free( void *ptrToFree, SlaveVP *owningPr )
   9.258   { SSRSemReq reqData;
   9.259  
   9.260     reqData.reqType      = free_req;
   9.261     reqData.sendPr       = owningPr;
   9.262     reqData.ptrToFree    = ptrToFree;
   9.263  
   9.264 -   VMS__send_sem_request( &reqData, owningPr );
   9.265 +   VMS_WL__send_sem_request( &reqData, owningPr );
   9.266   }
   9.267  
   9.268  
   9.269  void
   9.270 -SSR__transfer_ownership_of_from_to( void *data, VirtProcr *oldOwnerPr,
   9.271 -                                                  VirtProcr *newOwnerPr )
   9.272 +SSR__transfer_ownership_of_from_to( void *data, SlaveVP *oldOwnerSlv,
   9.273 +                                                  SlaveVP *newOwnerPr )
   9.274   {
   9.275     //TODO: put in the ownership system that automatically frees when no
   9.276     // owners of data left -- will need keeper for keeping data around when
   9.277 @@ -497,14 +497,14 @@
   9.278  
   9.279  
   9.280  void
   9.281 -SSR__add_ownership_by_to( VirtProcr *newOwnerPr, void *data )
   9.282 +SSR__add_ownership_by_to( SlaveVP *newOwnerSlv, void *data )
   9.283   {
   9.284  
   9.285   }
   9.286  
   9.287  
   9.288  void
   9.289 -SSR__remove_ownership_by_from( VirtProcr *loserPr, void *dataLosing )
   9.290 +SSR__remove_ownership_by_from( SlaveVP *loserSlv, void *dataLosing )
   9.291   {
   9.292  
   9.293   }
   9.294 @@ -536,8 +536,8 @@
   9.295  //===========================================================================
   9.296  
   9.297  void
   9.298 -SSR__send_of_type_to( VirtProcr *sendPr, void *msg, const int type,
   9.299 -                        VirtProcr *receivePr)
   9.300 +SSR__send_of_type_to( SlaveVP *sendPr, void *msg, const int type,
   9.301 +                        SlaveVP *receivePr)
   9.302   { SSRSemReq  reqData;
   9.303  
   9.304     reqData.receivePr = receivePr;
   9.305 @@ -551,14 +551,14 @@
   9.306        // as a potential in an entry in the hash table, when this receive msg
   9.307        // gets paired to a send, the ownership gets added to the receivePr --
   9.308        // the next work-unit in the receivePr's trace will have ownership.
   9.309 -   VMS__send_sem_request( &reqData, sendPr );
   9.310 +   VMS_WL__send_sem_request( &reqData, sendPr );
   9.311  
   9.312        //When come back from suspend, no longer own data reachable from msg
   9.313        //TODO: release ownership here
   9.314   }
   9.315  
   9.316  void
   9.317 -SSR__send_from_to( void *msg, VirtProcr *sendPr, VirtProcr *receivePr )
   9.318 +SSR__send_from_to( void *msg, SlaveVP *sendPr, SlaveVP *receivePr )
   9.319   { SSRSemReq  reqData;
   9.320  
   9.321        //hash on the receiver, 'cause always know it, but sometimes want to
   9.322 @@ -570,20 +570,20 @@
   9.323     reqData.msg       = msg;
   9.324     reqData.nextReqInHashEntry = NULL;
   9.325  
   9.326 -   VMS__send_sem_request( &reqData, sendPr );
   9.327 +   VMS_WL__send_sem_request( &reqData, sendPr );
   9.328   }
   9.329  
   9.330  
   9.331  //===========================================================================
   9.332  
   9.333  void *
   9.334 -SSR__receive_any_to( VirtProcr *receivePr )
   9.335 +SSR__receive_any_to( SlaveVP *receivePr )
   9.336   {
   9.337  
   9.338   }
   9.339  
   9.340  void *
   9.341 -SSR__receive_type_to( const int type, VirtProcr *receivePr )
   9.342 +SSR__receive_type_to( const int type, SlaveVP *receivePr )
   9.343   { 
   9.344     SSRSemReq  reqData;
   9.345  
   9.346 @@ -592,7 +592,7 @@
   9.347     reqData.msgType   = type;
   9.348     reqData.nextReqInHashEntry = NULL;
   9.349  
   9.350 -   VMS__send_sem_request( &reqData, receivePr );
   9.351 +   VMS_WL__send_sem_request( &reqData, receivePr );
   9.352     
   9.353     return receivePr->dataRetFromReq;
   9.354   }
   9.355 @@ -606,7 +606,7 @@
   9.356   * loc structure can only be modified by itself.
   9.357   */
   9.358  void *
   9.359 -SSR__receive_from_to( VirtProcr *sendPr, VirtProcr *receivePr )
   9.360 +SSR__receive_from_to( SlaveVP *sendPr, SlaveVP *receivePr )
   9.361   { SSRSemReq  reqData;
   9.362  
   9.363        //hash on the receiver, 'cause always know it, but sometimes want to
   9.364 @@ -617,7 +617,7 @@
   9.365     reqData.reqType   = receive_from_to;
   9.366     reqData.nextReqInHashEntry = NULL;
   9.367  
   9.368 -   VMS__send_sem_request( &reqData, receivePr );
   9.369 +   VMS_WL__send_sem_request( &reqData, receivePr );
   9.370  
   9.371     return receivePr->dataRetFromReq;
   9.372   }
   9.373 @@ -643,7 +643,7 @@
   9.374   * semantic environment.
   9.375   */
   9.376  void
   9.377 -SSR__start_fn_singleton( int32 singletonID,   VirtProcr *animPr )
   9.378 +SSR__start_fn_singleton( int32 singletonID,   SlaveVP *animPr )
   9.379   {
   9.380     SSRSemReq  reqData;
   9.381  
   9.382 @@ -651,10 +651,10 @@
   9.383     reqData.reqType     = singleton_fn_start;
   9.384     reqData.singletonID = singletonID;
   9.385  
   9.386 -   VMS__send_sem_request( &reqData, animPr );
   9.387 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.388     if( animPr->dataRetFromReq ) //will be 0 or addr of label in end singleton
   9.389      {
   9.390 -       SSRSemEnv *semEnv = VMS__give_sem_env_for( animPr );
   9.391 +       SSRSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   9.392         asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
   9.393      }
   9.394   }
   9.395 @@ -664,7 +664,7 @@
   9.396   * location.
   9.397   */
   9.398  void
   9.399 -SSR__start_data_singleton( SSRSingleton **singletonAddr,  VirtProcr *animPr )
   9.400 +SSR__start_data_singleton( SSRSingleton **singletonAddr,  SlaveVP *animPr )
   9.401   {
   9.402     SSRSemReq  reqData;
   9.403  
   9.404 @@ -674,7 +674,7 @@
   9.405     reqData.reqType          = singleton_data_start;
   9.406     reqData.singletonPtrAddr = singletonAddr;
   9.407  
   9.408 -   VMS__send_sem_request( &reqData, animPr );
   9.409 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.410     if( animPr->dataRetFromReq ) //either 0 or end singleton's return addr
   9.411      {    //Assembly code changes the return addr on the stack to the one
   9.412           // saved into the singleton by the end-singleton-fn
   9.413 @@ -693,26 +693,26 @@
   9.414   * inside is shared by all invocations of a given singleton ID.
   9.415   */
   9.416  void
   9.417 -SSR__end_fn_singleton( int32 singletonID, VirtProcr *animPr )
   9.418 +SSR__end_fn_singleton( int32 singletonID, SlaveVP *animPr )
   9.419   {
   9.420     SSRSemReq  reqData;
   9.421  
   9.422        //don't need this addr until after at least one singleton has reached
   9.423        // this function
   9.424 -   SSRSemEnv *semEnv = VMS__give_sem_env_for( animPr );
   9.425 +   SSRSemEnv *semEnv = VMS_int__give_sem_env_for( animPr );
   9.426     asm_write_ret_from_singleton(&(semEnv->fnSingletons[ singletonID]));
   9.427  
   9.428     reqData.reqType     = singleton_fn_end;
   9.429     reqData.singletonID = singletonID;
   9.430  
   9.431 -   VMS__send_sem_request( &reqData, animPr );
   9.432 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.433  
   9.434  EndSingletonInstrAddr:
   9.435     return;
   9.436   }
   9.437  
   9.438  void
   9.439 -SSR__end_data_singleton(  SSRSingleton **singletonPtrAddr, VirtProcr *animPr )
   9.440 +SSR__end_data_singleton(  SSRSingleton **singletonPtrAddr, SlaveVP *animPr )
   9.441   {
   9.442     SSRSemReq  reqData;
   9.443  
   9.444 @@ -730,7 +730,7 @@
   9.445     reqData.reqType          = singleton_data_end;
   9.446     reqData.singletonPtrAddr = singletonPtrAddr;
   9.447  
   9.448 -   VMS__send_sem_request( &reqData, animPr );
   9.449 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.450   }
   9.451  
   9.452  /*This executes the function in the masterVP, so it executes in isolation
   9.453 @@ -745,7 +745,7 @@
   9.454   */
   9.455  void
   9.456  SSR__animate_short_fn_in_isolation( PtrToAtomicFn ptrToFnToExecInMaster,
   9.457 -                                    void *data, VirtProcr *animPr )
   9.458 +                                    void *data, SlaveVP *animPr )
   9.459   {
   9.460     SSRSemReq  reqData;
   9.461  
   9.462 @@ -754,7 +754,7 @@
   9.463     reqData.fnToExecInMaster = ptrToFnToExecInMaster;
   9.464     reqData.dataForFn        = data;
   9.465  
   9.466 -   VMS__send_sem_request( &reqData, animPr );
   9.467 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.468   }
   9.469  
   9.470  
   9.471 @@ -772,7 +772,7 @@
   9.472   *If NULL, then write requesting into the field and resume.
   9.473   */
   9.474  void
   9.475 -SSR__start_transaction( int32 transactionID, VirtProcr *animPr )
   9.476 +SSR__start_transaction( int32 transactionID, SlaveVP *animPr )
   9.477   {
   9.478     SSRSemReq  reqData;
   9.479  
   9.480 @@ -781,7 +781,7 @@
   9.481     reqData.reqType     = trans_start;
   9.482     reqData.transID     = transactionID;
   9.483  
   9.484 -   VMS__send_sem_request( &reqData, animPr );
   9.485 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.486   }
   9.487  
   9.488  /*This suspends to the master, then uses transactionID as index into an
   9.489 @@ -794,7 +794,7 @@
   9.490   * resumes both.
   9.491   */
   9.492  void
   9.493 -SSR__end_transaction( int32 transactionID, VirtProcr *animPr )
   9.494 +SSR__end_transaction( int32 transactionID, SlaveVP *animPr )
   9.495   {
   9.496     SSRSemReq  reqData;
   9.497  
   9.498 @@ -803,5 +803,5 @@
   9.499     reqData.reqType     = trans_end;
   9.500     reqData.transID     = transactionID;
   9.501  
   9.502 -   VMS__send_sem_request( &reqData, animPr );
   9.503 +   VMS_WL__send_sem_request( &reqData, animPr );
   9.504   }
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/__brch__Holistic_Model	Fri Mar 09 22:28:08 2012 -0800
    10.3 @@ -0,0 +1,4 @@
    10.4 +This branch is for the project structure defined Jan 2012..  the #includes reflect this directory structure.
    10.5 +
    10.6 +More importantly, the MC_shared  version of VMS requires a separat malloc implemeted by VMS code..  so this branch has modified the library to use the VMS-specific malloc.
    10.7 +
    11.1 --- a/dependency.c	Fri Mar 09 19:01:21 2012 +0100
    11.2 +++ b/dependency.c	Fri Mar 09 22:28:08 2012 -0800
    11.3 @@ -2,7 +2,7 @@
    11.4  #include "../VMS_impl/VMS.h"
    11.5  
    11.6  Dependency* new_dependency(int from_vp, int from_task, int to_vp, int to_task){
    11.7 -    Dependency* newDep = (Dependency*) VMS__malloc(sizeof(Dependency));
    11.8 +    Dependency* newDep = (Dependency*) VMS_int__malloc(sizeof(Dependency));
    11.9      if (newDep!=NULL){
   11.10          newDep->from_vp = from_vp;
   11.11          newDep->from_task = from_task;
   11.12 @@ -13,7 +13,7 @@
   11.13  }
   11.14  
   11.15  NtoN* new_NtoN(int id){
   11.16 -    NtoN* newn = (NtoN*) VMS__malloc(sizeof(NtoN));
   11.17 +    NtoN* newn = (NtoN*) VMS_int__malloc(sizeof(NtoN));
   11.18      newn->id = id;
   11.19      newn->senders = makeListOfArrays(sizeof(Unit), 64);
   11.20      newn->receivers = makeListOfArrays(sizeof(Unit), 64);
    12.1 --- a/dependency.h	Fri Mar 09 19:01:21 2012 +0100
    12.2 +++ b/dependency.h	Fri Mar 09 22:28:08 2012 -0800
    12.3 @@ -10,7 +10,7 @@
    12.4  
    12.5  
    12.6  #include <stdio.h>
    12.7 -#include "../../C_Libraries/ListOfArrays/ListOfArrays.h"
    12.8 +#include "ListOfArrays/ListOfArrays.h"
    12.9  
   12.10  typedef struct {
   12.11      int vp;