changeset 28:8b9e4c333fe6

Sequential Version -- first compile succeeded
author Me
date Mon, 26 Jul 2010 16:42:59 -0700
parents 5a2068cbc28b
children 0e008278fe3c
files CoreLoop.c CoreLoop_Seq.c VMS.c VMS.h VMS_Seq.c
diffstat 5 files changed, 148 insertions(+), 530 deletions(-) [+]
line diff
     1.1 --- a/CoreLoop.c	Mon Jul 26 15:29:26 2010 -0700
     1.2 +++ b/CoreLoop.c	Mon Jul 26 16:42:59 2010 -0700
     1.3 @@ -129,3 +129,83 @@
     1.4     CoreLoopEndPt:
     1.5     pthread_exit( NULL );
     1.6   }
     1.7 +
     1.8 +
     1.9 +
    1.10 +/*This sequential version is exact same as threaded, except doesn't do the
    1.11 + * pin-threads part, nor the wait until setup complete part.
    1.12 + */
    1.13 +void *
    1.14 +coreLoop_Seq( void *paramsIn )
    1.15 + {
    1.16 +   VirtProcr      *currPr;
    1.17 +   VMSQueueStruc  *workQ;
    1.18 +
    1.19 +
    1.20 +      //Save addr of "end core loop" label - jump to it to shut down coreloop
    1.21 +      //To get label addr in non-gcc compiler, can trick it by making a call
    1.22 +      // to a fn that does asm that pulls the "return"
    1.23 +      // addr off the stack and stores it in a pointed-to location.
    1.24 +   _VMSMasterEnv->coreLoopShutDownPt = &&CoreLoopEndPt;
    1.25 +
    1.26 +      //Core loop has no values live upon CoreLoopStartPt except workQ
    1.27 +      // every value in the code is defined by a statement in core loop,
    1.28 +      // after the start point -- with the one exception of _VMSWorkQ
    1.29 +
    1.30 +
    1.31 +      // Get to work!  --  virt procr jumps back here when done or suspends
    1.32 +      //Note, have to restore the frame-pointer before jump to here, to get
    1.33 +      // this code to work right (workQ and so forth are frame-ptr relative)
    1.34 +CoreLoopStartPt:
    1.35 +
    1.36 +      //Get virtual processor from queue
    1.37 +      //_VMSWorkQ must be a global, static volatile var, so not kept in reg,
    1.38 +      // which forces reloading the pointer after each jmp to this point
    1.39 +   workQ  = _VMSWorkQ;
    1.40 +   currPr = (VirtProcr *) readVMSQ( workQ );
    1.41 +
    1.42 +//   printf("core %d loop procr addr: %d\n", coreLoopThdParams->coreNum, \
    1.43 +//       (int)currPr ); fflush(stdin);
    1.44 +   currPr->coreLoopStartPt = &&CoreLoopStartPt;  //to be sure.(GCC specific)
    1.45 +
    1.46 +//   currPr->coreAnimatedBy  = coreLoopThdParams->coreNum;
    1.47 +
    1.48 +      //switch to virt procr's stack and frame ptr then jump to virt procr
    1.49 +   void *stackPtr, *framePtr, *jmpPt, *coreLoopFramePtrAddr, \
    1.50 +        *coreLoopStackPtrAddr;
    1.51 +
    1.52 +   stackPtr = currPr->stackPtr;
    1.53 +   framePtr = currPr->framePtr;
    1.54 +   jmpPt    = currPr->nextInstrPt;
    1.55 +   coreLoopFramePtrAddr = &(currPr->coreLoopFramePtr);
    1.56 +   coreLoopStackPtrAddr = &(currPr->coreLoopStackPtr);
    1.57 +
    1.58 +      //Save the core loop's stack and frame pointers into virt procr struct
    1.59 +      // then switch to stack ptr and frame ptr of virt procr & jmp to it
    1.60 +      //This was a pain to get right because GCC converts the "(jmpPt)" to
    1.61 +      // frame-relative mem-op -- so generated machine code first changed the
    1.62 +      // frame pointer, then tried to jump to an addr stored on stack, which
    1.63 +      // it accessed as an offset from frame-ptr!  (wrong frame-ptr now)
    1.64 +      //Explicitly loading into eax before changing frame-ptr fixed it
    1.65 +      //Also, it turns "(currPr->coreLoopFramePtr)" into a temporary on the
    1.66 +      // stack, so "movl %%ebp, %0" saves to the temp, NOT the data-struc!
    1.67 +   asm volatile("movl %0, %%eax;      \
    1.68 +                 movl %%esp, (%%eax); \
    1.69 +                 movl %1, %%eax;      \
    1.70 +                 movl %%ebp, (%%eax); \
    1.71 +                 movl %2, %%eax;      \
    1.72 +                 movl %3, %%esp;      \
    1.73 +                 movl %4, %%ebp;      \
    1.74 +                 jmp  %%eax"          \
    1.75 +   /* outputs */ : "=g"(coreLoopStackPtrAddr),                 \
    1.76 +                   "=g"(coreLoopFramePtrAddr)                  \
    1.77 +   /* inputs  */ : "g" (jmpPt), "g" (stackPtr), "g" (framePtr) \
    1.78 +   /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi" \
    1.79 +                );
    1.80 +
    1.81 +   //========================================================================
    1.82 +
    1.83 +      //jmp to here when want to shut down the VMS system
    1.84 +   CoreLoopEndPt:
    1.85 +   return;
    1.86 + }
     2.1 --- a/CoreLoop_Seq.c	Mon Jul 26 15:29:26 2010 -0700
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,97 +0,0 @@
     2.4 -/*
     2.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
     2.6 - *
     2.7 - * Licensed under BSD
     2.8 - */
     2.9 -
    2.10 -
    2.11 -#include "VMS.h"
    2.12 -#include "Queue_impl/BlockingQueue.h"
    2.13 -
    2.14 -#include <stdio.h>
    2.15 -#include <stdlib.h>
    2.16 -#include <time.h>
    2.17 -
    2.18 -
    2.19 -
    2.20 -/*This is the loop that runs in the PThread pinned to each core
    2.21 - * get work-unit struc from queue, 
    2.22 - * call function-ptr, passing it pointer to data
    2.23 - * transfer return value to slave's "requests" pointer
    2.24 - * write the slave's "Done" flag and repeat.
    2.25 - */
    2.26 -//pthread_create requires ptr to func that takes void * and returns void *
    2.27 -void *
    2.28 -coreLoop_Seq( void *paramsIn )
    2.29 - {   
    2.30 -   VirtProcr      *currPr;
    2.31 -   VMSQueueStruc  *workQ;
    2.32 -   
    2.33 -   
    2.34 -      //Save addr of "end core loop" label - jump to it to shut down coreloop
    2.35 -      //To get label addr in non-gcc compiler, can trick it by making a call
    2.36 -      // to a fn that does asm that pulls the "return"
    2.37 -      // addr off the stack and stores it in a pointed-to location.
    2.38 -   _VMSMasterEnv->coreLoopShutDownPt = &&CoreLoopEndPt;
    2.39 -   
    2.40 -      //Core loop has no values live upon CoreLoopStartPt except workQ
    2.41 -      // every value in the code is defined by a statement in core loop,
    2.42 -      // after the start point -- with the one exception of _VMSWorkQ
    2.43 - 
    2.44 -   
    2.45 -      // Get to work!  --  virt procr jumps back here when done or suspends
    2.46 -      //Note, have to restore the frame-pointer before jump to here, to get
    2.47 -      // this code to work right (workQ and so forth are frame-ptr relative)
    2.48 -CoreLoopStartPt:
    2.49 -   
    2.50 -      //Get virtual processor from queue
    2.51 -      //_VMSWorkQ must be a global, static volatile var, so not kept in reg,
    2.52 -      // which forces reloading the pointer after each jmp to this point
    2.53 -   workQ  = _VMSWorkQ;
    2.54 -   currPr = (VirtProcr *) readVMSQ( workQ );
    2.55 -
    2.56 -//   printf("core %d loop procr addr: %d\n", coreLoopThdParams->coreNum, \
    2.57 -//       (int)currPr ); fflush(stdin);
    2.58 -   currPr->coreLoopStartPt = &&CoreLoopStartPt;  //to be sure.(GCC specific)
    2.59 -   
    2.60 -   currPr->coreAnimatedBy  = coreLoopThdParams->coreNum;
    2.61 -
    2.62 -      //switch to virt procr's stack and frame ptr then jump to virt procr
    2.63 -   void *stackPtr, *framePtr, *jmpPt, *coreLoopFramePtrAddr, \
    2.64 -        *coreLoopStackPtrAddr;
    2.65 -   
    2.66 -   stackPtr = currPr->stackPtr;
    2.67 -   framePtr = currPr->framePtr;
    2.68 -   jmpPt    = currPr->nextInstrPt;
    2.69 -   coreLoopFramePtrAddr = &(currPr->coreLoopFramePtr);
    2.70 -   coreLoopStackPtrAddr = &(currPr->coreLoopStackPtr);
    2.71 -
    2.72 -      //Save the core loop's stack and frame pointers into virt procr struct
    2.73 -      // then switch to stack ptr and frame ptr of virt procr & jmp to it
    2.74 -      //This was a pain to get right because GCC converts the "(jmpPt)" to
    2.75 -      // frame-relative mem-op -- so generated machine code first changed the
    2.76 -      // frame pointer, then tried to jump to an addr stored on stack, which
    2.77 -      // it accessed as an offset from frame-ptr!  (wrong frame-ptr now)
    2.78 -      //Explicitly loading into eax before changing frame-ptr fixed it
    2.79 -      //Also, it turns "(currPr->coreLoopFramePtr)" into a temporary on the
    2.80 -      // stack, so "movl %%ebp, %0" saves to the temp, NOT the data-struc!
    2.81 -   asm volatile("movl %0, %%eax;      \
    2.82 -                 movl %%esp, (%%eax); \
    2.83 -                 movl %1, %%eax;      \
    2.84 -                 movl %%ebp, (%%eax); \
    2.85 -                 movl %2, %%eax;      \
    2.86 -                 movl %3, %%esp;      \
    2.87 -                 movl %4, %%ebp;      \
    2.88 -                 jmp  %%eax"          \
    2.89 -   /* outputs */ : "=g"(coreLoopStackPtrAddr),                 \
    2.90 -                   "=g"(coreLoopFramePtrAddr)                  \
    2.91 -   /* inputs  */ : "g" (jmpPt), "g" (stackPtr), "g" (framePtr) \
    2.92 -   /* clobber */ : "memory", "%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi" \
    2.93 -                );
    2.94 -
    2.95 -   //========================================================================
    2.96 -
    2.97 -      //jmp to here when want to shut down the VMS system
    2.98 -   CoreLoopEndPt:
    2.99 -   return;
   2.100 - }
     3.1 --- a/VMS.c	Mon Jul 26 15:29:26 2010 -0700
     3.2 +++ b/VMS.c	Mon Jul 26 16:42:59 2010 -0700
     3.3 @@ -21,6 +21,12 @@
     3.4  void
     3.5  create_sched_slots( MasterEnv *masterEnv );
     3.6  
     3.7 +void
     3.8 +create_masterEnv();
     3.9 +
    3.10 +void
    3.11 +create_the_coreLoop_OS_threads();
    3.12 +
    3.13  pthread_mutex_t suspendLock = PTHREAD_MUTEX_INITIALIZER;
    3.14  pthread_cond_t  suspend_cond  = PTHREAD_COND_INITIALIZER;
    3.15  
    3.16 @@ -56,6 +62,21 @@
    3.17   */
    3.18  void
    3.19  VMS__init()
    3.20 + {
    3.21 +   create_masterEnv();
    3.22 +   create_the_coreLoop_OS_threads();
    3.23 + }
    3.24 +
    3.25 +/*To initialize the sequential version, just don't create the threads
    3.26 + */
    3.27 +void
    3.28 +VMS__init_Seq()
    3.29 + {
    3.30 +   create_masterEnv();
    3.31 + }
    3.32 +
    3.33 +void
    3.34 +create_masterEnv()
    3.35   { MasterEnv  *masterEnv;
    3.36     VMSQueueStruc *workQ;
    3.37  
    3.38 @@ -73,34 +94,12 @@
    3.39  
    3.40     masterEnv->stillRunning = FALSE;
    3.41     masterEnv->numToPrecede = NUM_CORES;
    3.42 -   
    3.43 +
    3.44        //First core loop to start up gets this, which will schedule seed Pr
    3.45        //TODO: debug: check address of masterVirtPr
    3.46     writeVMSQ( masterEnv->masterVirtPr, workQ );
    3.47  
    3.48 -   numProcrsCreated = 1;
    3.49 -
    3.50 -   //========================================================================
    3.51 -   //                      Create the Threads
    3.52 -   int coreIdx, retCode;
    3.53 -   
    3.54 -      //Need the threads to be created suspended, and wait for a signal
    3.55 -      // before proceeding -- gives time after creating to initialize other
    3.56 -      // stuff before the coreLoops set off.
    3.57 -   _VMSMasterEnv->setupComplete = 0;
    3.58 -
    3.59 -      //Make the threads that animate the core loops
    3.60 -   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
    3.61 -    { coreLoopThdParams[coreIdx]          = malloc( sizeof(ThdParams) );
    3.62 -      coreLoopThdParams[coreIdx]->coreNum = coreIdx;
    3.63 -
    3.64 -      retCode =
    3.65 -      pthread_create( &(coreLoopThdHandles[coreIdx]), 
    3.66 -                        thdAttrs,
    3.67 -                       &coreLoop,
    3.68 -               (void *)(coreLoopThdParams[coreIdx]) );
    3.69 -      if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
    3.70 -    }
    3.71 +   numProcrsCreated = 1;  //global counter for debugging
    3.72   }
    3.73  
    3.74  void
    3.75 @@ -124,6 +123,32 @@
    3.76   }
    3.77  
    3.78  
    3.79 +void
    3.80 +create_the_coreLoop_OS_threads()
    3.81 + {
    3.82 +   //========================================================================
    3.83 +   //                      Create the Threads
    3.84 +   int coreIdx, retCode;
    3.85 +
    3.86 +      //Need the threads to be created suspended, and wait for a signal
    3.87 +      // before proceeding -- gives time after creating to initialize other
    3.88 +      // stuff before the coreLoops set off.
    3.89 +   _VMSMasterEnv->setupComplete = 0;
    3.90 +
    3.91 +      //Make the threads that animate the core loops
    3.92 +   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
    3.93 +    { coreLoopThdParams[coreIdx]          = malloc( sizeof(ThdParams) );
    3.94 +      coreLoopThdParams[coreIdx]->coreNum = coreIdx;
    3.95 +
    3.96 +      retCode =
    3.97 +      pthread_create( &(coreLoopThdHandles[coreIdx]),
    3.98 +                        thdAttrs,
    3.99 +                       &coreLoop,
   3.100 +               (void *)(coreLoopThdParams[coreIdx]) );
   3.101 +      if(retCode){printf("ERROR creating thread: %d\n", retCode); exit(0);}
   3.102 +    }
   3.103 + }
   3.104 +
   3.105  /*Semantic layer calls this when it want the system to start running..
   3.106   *
   3.107   *This starts the core loops running then waits for them to exit.
   3.108 @@ -168,6 +193,18 @@
   3.109        printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
   3.110   }
   3.111  
   3.112 +/*Only difference between version with an OS thread pinned to each core and
   3.113 + * the sequential version of VMS is VMS__init_Seq, this, and coreLoop_Seq.
   3.114 + */
   3.115 +void
   3.116 +VMS__start_the_work_then_wait_until_done_Seq()
   3.117 + {
   3.118 +         //Instead of un-suspending threads, just call the one and only
   3.119 +         // core loop (sequential version), in the main thread.
   3.120 +      coreLoop_Seq( NULL );
   3.121 +
   3.122 + }
   3.123 +
   3.124  
   3.125  
   3.126  /*Create stack, then create __cdecl structure on it and put initialData and
     4.1 --- a/VMS.h	Mon Jul 26 15:29:26 2010 -0700
     4.2 +++ b/VMS.h	Mon Jul 26 16:42:59 2010 -0700
     4.3 @@ -127,6 +127,7 @@
     4.4  //==========================================================
     4.5  
     4.6  void * coreLoop( void *paramsIn );  //standard PThreads fn prototype
     4.7 +void * coreLoop_Seq( void *paramsIn );  //standard PThreads fn prototype
     4.8  void masterLoop( void *initData, VirtProcr *masterPr );
     4.9  
    4.10  
    4.11 @@ -150,8 +151,14 @@
    4.12  VMS__init();
    4.13  
    4.14  void
    4.15 +VMS__init_Seq();
    4.16 +
    4.17 +void
    4.18  VMS__start_the_work_then_wait_until_done();
    4.19  
    4.20 +void
    4.21 +VMS__start_the_work_then_wait_until_done_Seq();
    4.22 +
    4.23  VirtProcr *
    4.24  VMS__create_procr( VirtProcrFnPtr fnPtr, void *initialData );
    4.25  
     5.1 --- a/VMS_Seq.c	Mon Jul 26 15:29:26 2010 -0700
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,409 +0,0 @@
     5.4 -/*
     5.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
     5.6 - *
     5.7 - * Licensed under BSD
     5.8 - */
     5.9 -
    5.10 -#include <stdio.h>
    5.11 -#include <stdlib.h>
    5.12 -#include <malloc.h>
    5.13 -
    5.14 -#include "VMS.h"
    5.15 -#include "Queue_impl/BlockingQueue.h"
    5.16 -
    5.17 -
    5.18 -#define thdAttrs NULL
    5.19 -
    5.20 -//===========================================================================
    5.21 -void
    5.22 -shutdownFnSeq( void *dummy, VirtProcr *dummy2 );
    5.23 -
    5.24 -void
    5.25 -create_sched_slots( MasterEnv *masterEnv );
    5.26 -
    5.27 -//===========================================================================
    5.28 -
    5.29 -/*Setup has two phases:
    5.30 - * 1) Semantic layer first calls init_VMS, which creates masterEnv, and puts
    5.31 - *    the master virt procr into the work-queue, ready for first "call"
    5.32 - * 2) Semantic layer then does its own init, which creates the seed virt
    5.33 - *    procr inside the semantic layer, ready to schedule it when
    5.34 - *    asked by the first run of the masterLoop.
    5.35 - *
    5.36 - *This part is bit weird because VMS really wants to be "always there", and
    5.37 - * have applications attach and detach..  for now, this VMS is part of
    5.38 - * the app, so the VMS system starts up as part of running the app.
    5.39 - *
    5.40 - *The semantic layer is isolated from the VMS internals by making the
    5.41 - * semantic layer do setup to a state that it's ready with its
    5.42 - * initial virt procrs, ready to schedule them to slots when the masterLoop
    5.43 - * asks.  Without this pattern, the semantic layer's setup would
    5.44 - * have to modify slots directly to assign the initial virt-procrs, and put
    5.45 - * them into the workQ itself, breaking the isolation completely.
    5.46 - *
    5.47 - * 
    5.48 - *The semantic layer creates the initial virt procr(s), and adds its
    5.49 - * own environment to masterEnv, and fills in the pointers to
    5.50 - * the requestHandler and slaveScheduler plug-in functions
    5.51 - */
    5.52 -
    5.53 -/*This allocates VMS data structures, populates the master VMSProc,
    5.54 - * and master environment, and returns the master environment to the semantic
    5.55 - * layer.
    5.56 - */
    5.57 -void
    5.58 -VMS__init_Seq()
    5.59 - { MasterEnv  *masterEnv;
    5.60 -   VMSQueueStruc *workQ;
    5.61 -
    5.62 -      //Make the central work-queue
    5.63 -   _VMSWorkQ = makeVMSQ();
    5.64 -   workQ     = _VMSWorkQ;
    5.65 -
    5.66 -   _VMSMasterEnv = malloc( sizeof(MasterEnv) );
    5.67 -   masterEnv     = _VMSMasterEnv;
    5.68 -
    5.69 -      //create the master virtual processor
    5.70 -   masterEnv->masterVirtPr = VMS__create_procr( &masterLoop, masterEnv );
    5.71 -
    5.72 -   create_sched_slots( masterEnv );
    5.73 -
    5.74 -   masterEnv->stillRunning = FALSE;
    5.75 -   masterEnv->numToPrecede = NUM_CORES;
    5.76 -   
    5.77 -      //First core loop to start up gets this, which will schedule seed Pr
    5.78 -      //TODO: debug: check address of masterVirtPr
    5.79 -   writeVMSQ( masterEnv->masterVirtPr, workQ );
    5.80 -
    5.81 -   numProcrsCreated = 1;
    5.82 -
    5.83 -   //========================================================================
    5.84 -   //                      Create the Threads
    5.85 -   
    5.86 - }
    5.87 -
    5.88 -
    5.89 -/*Semantic layer calls this when it want the system to start running..
    5.90 - *
    5.91 - *This starts the core loops running then waits for them to exit.
    5.92 - */
    5.93 -void
    5.94 -VMS__start_the_work_then_wait_until_done_Seq()
    5.95 - { int coreIdx;
    5.96 -      //Start the core loops running
    5.97 -//===========================================================================
    5.98 -   TSCount  startCount, endCount;
    5.99 -   unsigned long long count = 0, freq = 0;
   5.100 -   double   runTime;
   5.101 -
   5.102 -      startCount = getTSCount();
   5.103 -      
   5.104 -         //Instead of un-suspending threads, just call the one and only
   5.105 -         // core loop, in the main thread.
   5.106 -      coreLoop_Seq( NULL );
   5.107 -
   5.108 -      //NOTE: do not clean up VMS env here -- semantic layer has to have
   5.109 -      // a chance to clean up its environment first, then do a call to free
   5.110 -      // the Master env and rest of VMS locations
   5.111 -
   5.112 -
   5.113 -      endCount = getTSCount();
   5.114 -      count = endCount - startCount;
   5.115 -
   5.116 -      runTime = (double)count / (double)TSCOUNT_FREQ;
   5.117 -
   5.118 -      printf("\n Time startup to shutdown: %f\n", runTime); fflush( stdin );
   5.119 - }
   5.120 -
   5.121 -
   5.122 -
   5.123 -
   5.124 -
   5.125 -/*This is equivalent to "jump back to core loop" -- it's mainly only used
   5.126 - * just after adding dissipate request to a processor -- so the semantic
   5.127 - * layer is the only place it will be seen and/or used.
   5.128 - *
   5.129 - *It does almost the same thing as suspend, except don't need to save the
   5.130 - * stack nor set the nextInstrPt
   5.131 - *
   5.132 - *As of June 30, 2010  just implementing as a call to suspend -- just sugar
   5.133 - */
   5.134 -void
   5.135 -VMS__return_from_fn( VirtProcr *animatingPr )
   5.136 - {
   5.137 -   VMS__suspend_procr( animatingPr );
   5.138 - }
   5.139 -
   5.140 -
   5.141 -/*Not sure yet the form going to put "dissipate" in, so this is the third
   5.142 - * possibility -- the semantic layer can just make a macro that looks like
   5.143 - * a call to its name, then expands to a call to this.
   5.144 - *
   5.145 - *As of June 30, 2010  this looks like the top choice..
   5.146 - *
   5.147 - *This adds a request to dissipate, then suspends the processor so that the
   5.148 - * request handler will receive the request.  The request handler is what
   5.149 - * does the work of freeing memory and removing the processor from the
   5.150 - * semantic environment's data structures.
   5.151 - *The request handler also is what figures out when to shutdown the VMS
   5.152 - * system -- which causes all the core loop threads to die, and returns from
   5.153 - * the call that started up VMS to perform the work.
   5.154 - *
   5.155 - *This form is a bit misleading to understand if one is trying to figure out
   5.156 - * how VMS works -- it looks like a normal function call, but inside it
   5.157 - * sends a request to the request handler and suspends the processor, which
   5.158 - * jumps out of the VMS__dissipate_procr function, and out of all nestings
   5.159 - * above it, transferring the work of dissipating to the request handler,
   5.160 - * which then does the actual work -- causing the processor that animated
   5.161 - * the call of this function to disappear and the "hanging" state of this
   5.162 - * function to just poof into thin air -- the virtual processor's trace
   5.163 - * never returns from this call, but instead the virtual processor's trace
   5.164 - * gets suspended in this call and all the virt processor's state disap-
   5.165 - * pears -- making that suspend the last thing in the virt procr's trace.
   5.166 - */
   5.167 -void
   5.168 -VMS__dissipate_procr( VirtProcr *procrToDissipate )
   5.169 - { VMSReqst *req;
   5.170 -
   5.171 -   req = malloc( sizeof(VMSReqst) );
   5.172 -//   req->virtProcrFrom      = callingPr;
   5.173 -   req->reqType               = dissipate;
   5.174 -   req->nextReqst             = procrToDissipate->requests;
   5.175 -   procrToDissipate->requests = req;
   5.176 -   
   5.177 -   VMS__suspend_procr( procrToDissipate );
   5.178 -}
   5.179 -
   5.180 -
   5.181 -/*This inserts the semantic-layer's request data into standard VMS carrier
   5.182 - */
   5.183 -inline void
   5.184 -VMS__add_sem_request( void *semReqData, VirtProcr *callingPr )
   5.185 - { VMSReqst *req;
   5.186 -
   5.187 -   req = malloc( sizeof(VMSReqst) );
   5.188 -//   req->virtProcrFrom      = callingPr;
   5.189 -   req->reqType        = semantic;
   5.190 -   req->semReqData     = semReqData;
   5.191 -   req->nextReqst      = callingPr->requests;
   5.192 -   callingPr->requests = req;
   5.193 - }
   5.194 -
   5.195 -
   5.196 -
   5.197 -//TODO: add a semantic-layer supplied "freer" for the semantic-data portion
   5.198 -// of a request -- IE call with both a virt procr and a fn-ptr to request
   5.199 -// freer (or maybe put request freer as a field in virt procr?)
   5.200 -void
   5.201 -VMS__remove_and_free_top_request( VirtProcr *procrWithReq )
   5.202 - { VMSReqst *req;
   5.203 -
   5.204 -   req = procrWithReq->requests;
   5.205 -   procrWithReq->requests = procrWithReq->requests->nextReqst;
   5.206 -   free( req );
   5.207 - }
   5.208 -
   5.209 -
   5.210 -//TODO: add a semantic-layer supplied "freer" for the semantic-data portion
   5.211 -// of a request -- IE call with both a virt procr and a fn-ptr to request
   5.212 -// freer (also maybe put sem request freer as a field in virt procr?)
   5.213 -//VMSHW relies right now on this only freeing VMS layer of request -- the
   5.214 -// semantic portion of request is alloc'd and freed by request handler
   5.215 -void
   5.216 -VMS__free_request( VMSReqst *req )
   5.217 - { 
   5.218 -   free( req );
   5.219 - }
   5.220 -
   5.221 -VMSReqst *
   5.222 -VMS__take_top_request_from( VirtProcr *procrWithReq )
   5.223 - { VMSReqst *req;
   5.224 -
   5.225 -   req = procrWithReq->requests;
   5.226 -   if( req == NULL ) return req;
   5.227 -   
   5.228 -   procrWithReq->requests = procrWithReq->requests->nextReqst;
   5.229 -   return req;
   5.230 - }
   5.231 -
   5.232 -inline int
   5.233 -VMS__isSemanticReqst( VMSReqst *req )
   5.234 - {
   5.235 -   return ( req->reqType == semantic );
   5.236 - }
   5.237 -
   5.238 -
   5.239 -inline void *
   5.240 -VMS__take_sem_reqst_from( VMSReqst *req )
   5.241 - {
   5.242 -   return req->semReqData;
   5.243 - }
   5.244 -
   5.245 -inline int
   5.246 -VMS__isDissipateReqst( VMSReqst *req )
   5.247 - {
   5.248 -   return ( req->reqType == dissipate );
   5.249 - }
   5.250 -
   5.251 -inline int
   5.252 -VMS__isCreateReqst( VMSReqst *req )
   5.253 - {
   5.254 -   return ( req->reqType == regCreated );
   5.255 - }
   5.256 -
   5.257 -void
   5.258 -VMS__send_register_new_procr_request(VirtProcr *newPr, VirtProcr *reqstingPr)
   5.259 - { VMSReqst *req;
   5.260 -
   5.261 -   req                  = malloc( sizeof(VMSReqst) );
   5.262 -   req->reqType         = regCreated;
   5.263 -   req->semReqData      = newPr;
   5.264 -   req->nextReqst       = reqstingPr->requests;
   5.265 -   reqstingPr->requests = req;
   5.266 -
   5.267 -   VMS__suspend_procr( reqstingPr );
   5.268 - }
   5.269 -
   5.270 -
   5.271 -/*The semantic layer figures out when the work is done ( perhaps by a call
   5.272 - * in the application to "work all done", or perhaps all the virtual
   5.273 - * processors have dissipated.. a.s.o. )
   5.274 - *
   5.275 - *The semantic layer is responsible for making sure all work has fully
   5.276 - * completed before using this to shutdown the VMS system.
   5.277 - *
   5.278 - *After the semantic layer has determined it wants to shut down, the
   5.279 - * next time the Master Loop calls the scheduler plug-in, the scheduler
   5.280 - * then calls this function and returns the virtual processor it gets back.
   5.281 - *
   5.282 - *When the shut-down processor runs, it first frees all locations malloc'd to
   5.283 - * the VMS system (that wasn't
   5.284 - * specified as return-locations).  Then it creates one core-loop shut-down
   5.285 - * processor for each core loop and puts them all into the workQ.  When a
   5.286 - * core loop animates a core loop shut-down processor, it causes exit-thread
   5.287 - * to run, and when all core loop threads have exited, then the "wait for
   5.288 - * work to finish" in the main thread is woken, and the function-call that
   5.289 - * started all the work returns.
   5.290 - *
   5.291 - *The function animated by this processor performs the shut-down work.
   5.292 - */
   5.293 -VirtProcr *
   5.294 -VMS__create_the_shutdown_procr()
   5.295 - {
   5.296 -   return VMS__create_procr( &shutdownFn, NULL );
   5.297 - }
   5.298 -
   5.299 -
   5.300 -/*This must be called by the request handler plugin -- it cannot be called
   5.301 - * from the semantic library "dissipate processor" function -- instead, the
   5.302 - * semantic layer has to generate a request for the plug-in to call this
   5.303 - * function.
   5.304 - *The reason is that this frees the virtual processor's stack -- which is
   5.305 - * still in use inside semantic library calls!
   5.306 - *
   5.307 - *This frees or recycles all the state owned by and comprising the VMS
   5.308 - * portion of the animating virtual procr.  The request handler must first
   5.309 - * free any semantic data created for the processor that didn't use the
   5.310 - * VMS_malloc mechanism.  Then it calls this, which first asks the malloc
   5.311 - * system to disown any state that did use VMS_malloc, and then frees the
   5.312 - * statck and the processor-struct itself.
   5.313 - *If the dissipated processor is the sole (remaining) owner of VMS__malloc'd
   5.314 - * state, then that state gets freed (or sent to recycling) as a side-effect
   5.315 - * of dis-owning it.
   5.316 - */
   5.317 -void
   5.318 -VMS__free_procr_locs( VirtProcr *animatingPr )
   5.319 - {
   5.320 -      //dis-own all locations owned by this processor, causing to be freed
   5.321 -      // any locations that it is (was) sole owner of
   5.322 -   //TODO: implement VMS__malloc system, including "give up ownership"
   5.323 -
   5.324 -      //The dissipate request might still be attached, so remove and free it
   5.325 -   VMS__remove_and_free_top_request( animatingPr );
   5.326 -   free( animatingPr->startOfStack );
   5.327 -
   5.328 -      //NOTE: initialData was given to the processor, so should either have
   5.329 -      // been alloc'd with VMS__malloc, or freed by the level above animPr.
   5.330 -      //So, all that's left to free here is the stack and the VirtProcr struc
   5.331 -      // itself
   5.332 -   free( animatingPr->startOfStack );
   5.333 -   free( animatingPr );
   5.334 - }
   5.335 -
   5.336 -
   5.337 -
   5.338 -/*This is the function run by the special "shut-down" processor
   5.339 - * 
   5.340 - *The _VMSMasterEnv is needed by this shut down function, so the "wait"
   5.341 - * function run in the main loop has to free it, and the thread-related
   5.342 - * locations (coreLoopThdParams a.s.o.).
   5.343 - *However, the semantic environment and all data malloc'd to VMS can be
   5.344 - * freed here.
   5.345 - *
   5.346 - *NOTE: the semantic plug-in is expected to use VMS__malloc to get all the
   5.347 - * locations it needs -- they will be automatically freed by the standard
   5.348 - * "free all owned locations"
   5.349 - *
   5.350 - *Free any locations malloc'd to the VMS system (that weren't
   5.351 - * specified as return-locations).
   5.352 - *Then create one core-loop shut-down processor for each core loop and puts
   5.353 - * them all into the workQ.
   5.354 - */
   5.355 -void
   5.356 -shutdownFn( void *dummy, VirtProcr *animatingPr )
   5.357 - { int coreIdx;
   5.358 -   VirtProcr *shutDownPr;
   5.359 -   VMSQueueStruc *workQ = _VMSWorkQ;
   5.360 -
   5.361 -      //free all the locations owned within the VMS system
   5.362 -   //TODO: write VMS__malloc and free.. -- take the DKU malloc as starting pt
   5.363 -
   5.364 -      //make the core loop shut-down processors and put them into the workQ
   5.365 -   for( coreIdx=0; coreIdx < NUM_CORES; coreIdx++ )
   5.366 -    {
   5.367 -      shutDownPr = VMS__create_procr( NULL, NULL );
   5.368 -      shutDownPr->nextInstrPt = _VMSMasterEnv->coreLoopShutDownPt;
   5.369 -      writeVMSQ( shutDownPr, workQ );
   5.370 -    }
   5.371 -
   5.372 -      //This is an issue: the animating processor of this function may not
   5.373 -      // get its request handled before all the cores have shutdown.
   5.374 -      //TODO: after all the threads stop, clean out the MasterEnv, the
   5.375 -      // SemanticEnv, and the workQ before returning.
   5.376 -   VMS__dissipate_procr( animatingPr );  //will never come back from this
   5.377 - }
   5.378 -
   5.379 -
   5.380 -/*This has to free anything allocated during VMS_init, and any other alloc'd
   5.381 - * locations that might be left over.
   5.382 - */
   5.383 -void
   5.384 -VMS__shutdown()
   5.385 - { int i;
   5.386 - 
   5.387 -   free( _VMSWorkQ );
   5.388 -   free( _VMSMasterEnv->filledSlots );
   5.389 -   for( i = 0; i < NUM_SCHED_SLOTS; i++ )
   5.390 -    {
   5.391 -      free( _VMSMasterEnv->schedSlots[i] );
   5.392 -    }
   5.393 -
   5.394 -   free( _VMSMasterEnv->schedSlots);
   5.395 -   VMS__free_procr_locs( _VMSMasterEnv->masterVirtPr );
   5.396 -   
   5.397 -   free( _VMSMasterEnv );
   5.398 - }
   5.399 -
   5.400 -
   5.401 -//===========================================================================
   5.402 -
   5.403 -inline TSCount getTSCount()
   5.404 - { unsigned int low, high;
   5.405 -   TSCount  out;
   5.406 -
   5.407 -   saveTimeStampCountInto( low, high );
   5.408 -   out = high;
   5.409 -   out = (out << 32) + low;
   5.410 -   return out;
   5.411 - }
   5.412 -