changeset 4:13af59ed7ea5

Works -- with send-receive plus normal dependencies
author Sean Halle <seanhalle@yahoo.com>
date Thu, 14 Jun 2012 18:44:47 -0700
parents 468b8638ff92
children 8188c5b4bfd7
files VSs.c VSs.h VSs_PluginFns.c VSs_Request_Handlers.c
diffstat 4 files changed, 550 insertions(+), 45 deletions(-) [+]
line diff
     1.1 --- a/VSs.c	Wed Jun 06 17:55:36 2012 -0700
     1.2 +++ b/VSs.c	Thu Jun 14 18:44:47 2012 -0700
     1.3 @@ -226,6 +226,7 @@
     1.4     semanticEnv->numSlaveVP = 0;
     1.5     
     1.6     semanticEnv->argPtrHashTbl  = makeHashTable32( 16, &VMS_int__free );
     1.7 +   semanticEnv->commHashTbl    = makeHashTable32( 16, &VMS_int__free );
     1.8  
     1.9     //TODO: bug -- turn these arrays into dyn arrays to eliminate limit
    1.10     //semanticEnv->singletonHasBeenExecutedFlags = makeDynArrayInfo( );
    1.11 @@ -429,31 +430,50 @@
    1.12  //===========================================================================
    1.13  
    1.14  
    1.15 -//===========================================================================
    1.16 -/*Returns a taskID, which can be used to communicate between tasks with
    1.17 - * send-receive, or to use other kinds of constructs with tasks.
    1.18 +//======================= task submit and end ==============================
    1.19 +/*
    1.20   */
    1.21 -int32
    1.22 +void
    1.23  VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv)
    1.24   { VSsSemReq  reqData;
    1.25  
    1.26     reqData.reqType    = submit_task;
    1.27 -   reqData.callingSlv = animSlv;
    1.28 +   
    1.29     reqData.taskType   = taskType;
    1.30     reqData.args       = args;
    1.31 +   reqData.callingSlv = animSlv;
    1.32    
    1.33 +   reqData.taskID     = NULL;
    1.34   
    1.35     VMS_WL__send_sem_request( &reqData, animSlv );
    1.36 -   return (int32)animSlv->dataRetFromReq;
    1.37   }
    1.38  
    1.39 -/*NOTE: if want, don't need to send the animating SlaveVP around.. 
    1.40 - * instead, can make a single slave per core, and coreCtrlr looks up the
    1.41 - * slave from having the core number.
    1.42 - * 
    1.43 - *But, to stay compatible with all the other VMS languages, leave it in..
    1.44 - *
    1.45 - *This call is the last to happen in every task.  It causes the slave to
    1.46 +inline int32 *
    1.47 +VSs__create_taskID_of_size( int32 numInts, SlaveVP *animSlv )
    1.48 + { int32 *taskID;
    1.49 +   
    1.50 +   taskID    = VMS_WL__malloc( sizeof(int32) + numInts * sizeof(int32) );
    1.51 +   taskID[0] = numInts;
    1.52 +   return taskID;
    1.53 + }
    1.54 +
    1.55 +void
    1.56 +VSs__submit_task_with_ID( VSsTaskType *taskType, void *args, int32 *taskID, 
    1.57 +                          SlaveVP     *animSlv)
    1.58 + { VSsSemReq  reqData;
    1.59 +
    1.60 +   reqData.reqType    = submit_task;
    1.61 +   
    1.62 +   reqData.taskType   = taskType;
    1.63 +   reqData.args       = args;
    1.64 +   reqData.taskID     = taskID;
    1.65 +   reqData.callingSlv = animSlv;
    1.66 + 
    1.67 +   VMS_WL__send_sem_request( &reqData, animSlv );
    1.68 + }
    1.69 +
    1.70 +
    1.71 +/*This call is the last to happen in every task.  It causes the slave to
    1.72   * suspend and get the next task out of the task-queue.  Notice there is no
    1.73   * assigner here.. only one slave, no slave ReadyQ, and so on..
    1.74   *Can either make the assigner take the next task out of the taskQ, or can
    1.75 @@ -463,6 +483,12 @@
    1.76   * 
    1.77   *The task-stub is saved in the animSlv, so the request handler will get it
    1.78   * from there, along with the task-type which has arg types, and so on..
    1.79 + * 
    1.80 + * NOTE: if want, don't need to send the animating SlaveVP around.. 
    1.81 + * instead, can make a single slave per core, and coreCtrlr looks up the
    1.82 + * slave from having the core number.
    1.83 + * 
    1.84 + *But, to stay compatible with all the other VMS languages, leave it in..
    1.85   */
    1.86  void
    1.87  VSs__end_task( SlaveVP *animSlv )
    1.88 @@ -474,6 +500,108 @@
    1.89     VMS_WL__send_sem_request( &reqData, animSlv );
    1.90   }
    1.91  
    1.92 +
    1.93 +//==========================  send and receive ============================
    1.94 +//
    1.95 +
    1.96 +inline int32 *
    1.97 +VSs__give_self_taskID( SlaveVP *animSlv )
    1.98 + {
    1.99 +   return ((VSsSemData*)animSlv->semanticData)->taskStub->taskID;
   1.100 + }
   1.101 +
   1.102 +//================================ send ===================================
   1.103 +
   1.104 +void
   1.105 +VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
   1.106 +                      SlaveVP *senderSlv )
   1.107 + { VSsSemReq  reqData;
   1.108 +
   1.109 +   reqData.reqType    = send_type_to;
   1.110 +   
   1.111 +   reqData.msg        = msg;
   1.112 +   reqData.msgType    = type;
   1.113 +   reqData.receiverID = receiverID;
   1.114 +   reqData.senderSlv  = senderSlv;
   1.115 +   
   1.116 +   reqData.nextReqInHashEntry = NULL;
   1.117 +
   1.118 +   VMS_WL__send_sem_request( &reqData, senderSlv );
   1.119 +
   1.120 +      //When come back from suspend, no longer own data reachable from msg
   1.121 + }
   1.122 +
   1.123 +void
   1.124 +VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv )
   1.125 + { VSsSemReq  reqData;
   1.126 +
   1.127 +   reqData.reqType     = send_from_to;
   1.128 +   
   1.129 +   reqData.msg         = msg;
   1.130 +   reqData.senderID    = senderID;
   1.131 +   reqData.receiverID  = receiverID;
   1.132 +   reqData.senderSlv   = senderSlv;
   1.133 +
   1.134 +   reqData.nextReqInHashEntry = NULL;
   1.135 +
   1.136 +   VMS_WL__send_sem_request( &reqData, senderSlv );
   1.137 + }
   1.138 +
   1.139 +
   1.140 +//================================ receive ================================
   1.141 +
   1.142 +/*The "type" version of send and receive creates a many-to-one relationship.
   1.143 + * The sender is anonymous, and many sends can stack up, waiting to be
   1.144 + * received.  The same receiver can also have send from-to's
   1.145 + * waiting for it, and those will be kept separate from the "type"
   1.146 + * messages.
   1.147 + */
   1.148 +void *
   1.149 +VSs__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv )
   1.150 + {       DEBUG__printf1(dbgRqstHdlr,"WL: receive type to %d",receiverID[1] );
   1.151 +   VSsSemReq  reqData;
   1.152 +
   1.153 +   reqData.reqType     = receive_type_to;
   1.154 +   
   1.155 +   reqData.msgType     = type;
   1.156 +   reqData.receiverID  = receiverID;
   1.157 +   reqData.receiverSlv = receiverSlv;
   1.158 +   
   1.159 +   reqData.nextReqInHashEntry = NULL;
   1.160 +
   1.161 +   VMS_WL__send_sem_request( &reqData, receiverSlv );
   1.162 +   
   1.163 +   return receiverSlv->dataRetFromReq;
   1.164 + }
   1.165 +
   1.166 +
   1.167 +
   1.168 +/*Call this at the point a receiving task wants in-coming data.
   1.169 + * Use this from-to form when know senderID -- it makes a direct channel
   1.170 + * between sender and receiver.
   1.171 + */
   1.172 +void *
   1.173 +VSs__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv )
   1.174 + { 
   1.175 +   VSsSemReq  reqData;
   1.176 +
   1.177 +   reqData.reqType     = receive_from_to;
   1.178 +
   1.179 +   reqData.senderID    = senderID;
   1.180 +   reqData.receiverID  = receiverID;
   1.181 +   reqData.receiverSlv = receiverSlv;
   1.182 +
   1.183 +   reqData.nextReqInHashEntry = NULL;
   1.184 +      DEBUG__printf2(dbgRqstHdlr,"WL: receive from %d to: %d", reqData.senderID[1], reqData.receiverID[1]);
   1.185 +      
   1.186 +   VMS_WL__send_sem_request( &reqData, receiverSlv );
   1.187 +
   1.188 +   return receiverSlv->dataRetFromReq;
   1.189 + }
   1.190 +
   1.191 +
   1.192 +
   1.193 +
   1.194  //==========================================================================
   1.195  //
   1.196  /*A function singleton is a function whose body executes exactly once, on a
     2.1 --- a/VSs.h	Wed Jun 06 17:55:36 2012 -0700
     2.2 +++ b/VSs.h	Thu Jun 14 18:44:47 2012 -0700
     2.3 @@ -33,12 +33,13 @@
     2.4  typedef void  (*PtrToAtomicFn )  ( void * ); //executed atomically in master
     2.5  //===========================================================================
     2.6  
     2.7 -#define IN    1
     2.8 -#define OUT   2
     2.9 -#define INOUT 2
    2.10 +#define NONCTLD 0
    2.11 +#define IN      1  /*Trick -- READER same as IN*/
    2.12 +#define OUT     2  /*Trick -- WRITER same as OUT and INOUT*/
    2.13 +#define INOUT   2  /*Trick -- WRITER same as OUT and INOUT*/
    2.14  
    2.15 -#define READER  1
    2.16 -#define WRITER  2
    2.17 +#define READER  1  /*Trick -- READER same as IN*/
    2.18 +#define WRITER  2  /*Trick -- WRITER same as OUT and INOUT*/
    2.19  
    2.20  typedef struct
    2.21   {
    2.22 @@ -64,6 +65,7 @@
    2.23   {
    2.24     void       **args; //ctld args must come first, as ptrs
    2.25     VSsTaskType *taskType;
    2.26 +   int32       *taskID;
    2.27     int32        numBlockingProp;
    2.28     SlaveVP     *slaveAssignedTo;
    2.29     VSsPointerEntry  **ptrEntries;
    2.30 @@ -116,6 +118,11 @@
    2.31     create_slave_w_aff,
    2.32     dissipate_slave,
    2.33     //===============================
    2.34 +   send_type_to,
    2.35 +   receive_type_to,
    2.36 +   send_from_to,
    2.37 +   receive_from_to,
    2.38 +   //===============================
    2.39     malloc_req,
    2.40     free_req,
    2.41     singleton_fn_start,
    2.42 @@ -134,6 +141,15 @@
    2.43     void              *args;
    2.44     VSsTaskStub       *taskStub;
    2.45     
    2.46 +   SlaveVP           *senderSlv;
    2.47 +   SlaveVP           *receiverSlv;
    2.48 +   int32             *senderID;
    2.49 +   int32             *receiverID;
    2.50 +   int32              msgType;
    2.51 +   void              *msg;
    2.52 +   VSsSemReq         *nextReqInHashEntry;
    2.53 +   int32             *taskID;
    2.54 +   
    2.55     TopLevelFnPtr      fnPtr;
    2.56     void              *initData;
    2.57     int32              coreToAssignOnto;
    2.58 @@ -157,6 +173,7 @@
    2.59     PrivQueueStruc **readyVPQs;
    2.60     PrivQueueStruc  *taskReadyQ;  //Q: shared or local?
    2.61     HashTable       *argPtrHashTbl;
    2.62 +   HashTable       *commHashTbl;
    2.63     int32            numSlaveVP;
    2.64     int32            nextCoreToGetNewSlv;
    2.65     int32            primitiveStartTime;
    2.66 @@ -235,11 +252,11 @@
    2.67  
    2.68  //=======================
    2.69  
    2.70 -  SlaveVP *
    2.71 +SlaveVP *
    2.72  VSs__create_slave_with( TopLevelFnPtr fnPtr, void *initData,
    2.73                            SlaveVP *creatingSlv );
    2.74  
    2.75 -  SlaveVP *
    2.76 +SlaveVP *
    2.77  VSs__create_slave_with_affinity( TopLevelFnPtr fnPtr,    void *initData,
    2.78                              SlaveVP *creatingSlv, int32 coreToAssignOnto);
    2.79  
    2.80 @@ -254,13 +271,36 @@
    2.81  
    2.82  
    2.83  //=======================
    2.84 -int32
    2.85 +void
    2.86  VSs__submit_task( VSsTaskType *taskType, void *args, SlaveVP *animSlv);
    2.87  
    2.88 +inline int32 *
    2.89 +VSs__create_taskID_of_size( int32 numInts, SlaveVP *animSlv );
    2.90 +
    2.91 +void
    2.92 +VSs__submit_task_with_ID( VSsTaskType *taskType, void *args, int32 *taskID, 
    2.93 +                          SlaveVP     *animSlv);
    2.94  
    2.95  void
    2.96  VSs__end_task( SlaveVP *animSlv );
    2.97  
    2.98 +//=========================
    2.99 +
   2.100 +inline int32 *
   2.101 +VSs__give_self_taskID( SlaveVP *animSlv );
   2.102 +
   2.103 +void
   2.104 +VSs__send_of_type_to( void *msg, const int32 type, int32 *receiverID,
   2.105 +                      SlaveVP *senderSlv );
   2.106 +
   2.107 +void
   2.108 +VSs__send_from_to( void *msg, int32 *senderID, int32 *receiverID, SlaveVP *senderSlv );
   2.109 +
   2.110 +void *
   2.111 +VSs__receive_type_to( const int32 type, int32* receiverID, SlaveVP *receiverSlv );
   2.112 +
   2.113 +void *
   2.114 +VSs__receive_from_to( int32 *senderID, int32 *receiverID, SlaveVP *receiverSlv );
   2.115  
   2.116  //======================= Concurrency Stuff ======================
   2.117  void
     3.1 --- a/VSs_PluginFns.c	Wed Jun 06 17:55:36 2012 -0700
     3.2 +++ b/VSs_PluginFns.c	Thu Jun 14 18:44:47 2012 -0700
     3.3 @@ -217,6 +217,15 @@
     3.4           break;
     3.5        case end_task:        handleEndTask(      semReq,        semEnv);
     3.6           break;
     3.7 +      case send_type_to:    handleSendTypeTo(   semReq,        semEnv);
     3.8 +         break;
     3.9 +      case send_from_to:    handleSendFromTo(   semReq,        semEnv);
    3.10 +         break;
    3.11 +      case receive_type_to: handleReceiveTypeTo(semReq,        semEnv);
    3.12 +         break;
    3.13 +      case receive_from_to: handleReceiveFromTo(semReq,        semEnv);
    3.14 +         break;
    3.15 +         
    3.16        //====================================================================
    3.17        case malloc_req:      handleMalloc(       semReq, reqSlv, semEnv);
    3.18           break;
     4.1 --- a/VSs_Request_Handlers.c	Wed Jun 06 17:55:36 2012 -0700
     4.2 +++ b/VSs_Request_Handlers.c	Thu Jun 14 18:44:47 2012 -0700
     4.3 @@ -25,33 +25,34 @@
     4.4  //
     4.5  
     4.6  /*Only clone the elements of req used in these reqst handlers
     4.7 - *
     4.8 -  VSsSemReq *
     4.9 + */
    4.10 +VSsSemReq *
    4.11  cloneReq( VSsSemReq *semReq )
    4.12   { VSsSemReq *clonedReq;
    4.13  
    4.14     clonedReq             = VMS_PI__malloc( sizeof(VSsSemReq) );
    4.15     clonedReq->reqType    = semReq->reqType;
    4.16 -   clonedReq->callingSlv     = semReq->callingSlv;
    4.17 +   clonedReq->senderSlv  = semReq->senderSlv;
    4.18 +   clonedReq->receiverSlv= semReq->receiverSlv;
    4.19     clonedReq->msg        = semReq->msg;
    4.20     clonedReq->nextReqInHashEntry = NULL;
    4.21     
    4.22     return clonedReq;
    4.23   }
    4.24 -*/
    4.25  
    4.26 -/*
    4.27 +
    4.28 +
    4.29  HashEntry *
    4.30 -giveEntryElseInsertReqst( char *key, VSsSemReq *semReq,
    4.31 -    HashTable   *commHashTbl )
    4.32 +giveEntryElseInsertReqst32( int32 *key, VSsSemReq *semReq,
    4.33 +                            HashTable   *commHashTbl )
    4.34   { HashEntry    *entry;
    4.35     VSsSemReq    *waitingReq;
    4.36  
    4.37 -   entry = getEntryFromTable( (char *)key, commHashTbl );
    4.38 +   entry = getEntryFromTable32( key, commHashTbl );
    4.39     if( entry == NULL )
    4.40      {    //no waiting sends or receives, so add this request and exit
    4.41           // note: have to clone the request because it's on stack of sender
    4.42 -      addValueIntoTable( key, cloneReq( semReq ), commHashTbl );
    4.43 +      addValueIntoTable32( key, cloneReq( semReq ), commHashTbl );
    4.44        return NULL;
    4.45      }
    4.46     waitingReq = (VSsSemReq *)entry->content;
    4.47 @@ -62,19 +63,7 @@
    4.48      }
    4.49     return entry;
    4.50   }
    4.51 -*/
    4.52 -  
    4.53 -/*Various ideas for getting the 64b pointer into the two 32b key-array
    4.54 - * positions
    4.55 -   key[0] = 2; //two 32b values in key
    4.56 -  OR 
    4.57 -   (uint64) (key[1]) = argPtr;
    4.58 -  OR
    4.59 -   *( (uint64*)&key[1] ) = argPtr;
    4.60 -  OR
    4.61 -   key[2] = (uint32)argPtr;           //low bits
    4.62 -   key[1] = (uint32)(argPtr >> 32);   //high bits
    4.63 -*/
    4.64 +
    4.65        
    4.66  inline VSsPointerEntry *
    4.67  create_pointer_entry( )
    4.68 @@ -224,7 +213,7 @@
    4.69   */
    4.70  void
    4.71  handleSubmitTask( VSsSemReq *semReq, VSsSemEnv *semEnv )
    4.72 - { uint32            key[3];
    4.73 + { uint32           key[3];
    4.74     HashEntry       *rawHashEntry; //has char *, but use with uint32 *
    4.75     VSsPointerEntry *ptrEntry; //contents of hash table entry for an arg pointer
    4.76     void           **args;
    4.77 @@ -249,6 +238,7 @@
    4.78     taskType = semReq->taskType;
    4.79     taskStub = create_task_stub( taskType, args );//copies arg ptrs
    4.80     taskStub->numBlockingProp = taskType->numCtldArgs;
    4.81 +   taskStub->taskID = semReq->taskID; //may be NULL
    4.82     
    4.83     /*The controlled arguments are then processed one by one.
    4.84      *Processing an argument means getting the hash of the pointer.  Then,
    4.85 @@ -331,6 +321,10 @@
    4.86     return;
    4.87   }
    4.88  
    4.89 +inline void
    4.90 +handleSubmitTaskWID( VSsSemReq *semReq, VSsSemEnv *semEnv)
    4.91 + {
    4.92 + }
    4.93  
    4.94  
    4.95  /* ========================== end of task ===========================
    4.96 @@ -399,7 +393,7 @@
    4.97     int32 argNum;
    4.98     for( argNum = 0; argNum < endingTaskType->numCtldArgs; argNum++ )
    4.99      { 
   4.100 -      /*
   4.101 +      /* commented out 'cause saving entry ptr when create stub
   4.102        key[0] = 2; //says are 2 32b values in key
   4.103        *( (uint64*)&key[1] ) = args[argNum];  //write 64b ptr into two 32b
   4.104  
   4.105 @@ -509,6 +503,340 @@
   4.106   }
   4.107  
   4.108  
   4.109 +//========================== Task Comm handlers ===========================
   4.110 +
   4.111 +
   4.112 +
   4.113 +//============================  Send Handlers ==============================
   4.114 +/*Send of Type -- The semantic request has the receiving task ID and Type
   4.115 + *
   4.116 + *Messages of a given Type have to be kept separate..  so need a separate
   4.117 + * entry in the hash table for each pair: receiverID, Type
   4.118 + *
   4.119 + *Also, if same sender sends multiple before any get received, then need to
   4.120 + * stack the sends up -- even if a send waits until it's paired, several
   4.121 + * separate tasks can send to the same receiver, and doing hash on the
   4.122 + * receive task, so they will stack up.
   4.123 + */
   4.124 +void
   4.125 +handleSendTypeTo( VSsSemReq *semReq, VSsSemEnv *semEnv )
   4.126 + { SlaveVP    *senderSlv, *receiverSlv;
   4.127 +   int32      *senderID, *receiverID;
   4.128 +   int32      *key, keySz, receiverIDNumInt;
   4.129 +   VSsSemReq  *waitingReq;
   4.130 +   HashEntry  *entry;
   4.131 +   HashTable  *commHashTbl = semEnv->commHashTbl;
   4.132 +   
   4.133 +         DEBUG__printf1(dbgRqstHdlr,"SendType request from processor %d",semReq->sendPr->slaveID)
   4.134 + 
   4.135 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
   4.136 +   senderSlv   = semReq->senderSlv;
   4.137 +
   4.138 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
   4.139 +   keySz = receiverIDNumInt * sizeof(int32) + sizeof(int32);
   4.140 +   key = VMS_PI__malloc( keySz );
   4.141 +   memcpy( key, receiverID, receiverIDNumInt * sizeof(int32) );
   4.142 +   key[ receiverIDNumInt ] = semReq->msgType; //no +1 'cause starts at 0
   4.143 +   
   4.144 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   4.145 +   if( entry == NULL ) return;  //was just inserted
   4.146 +
   4.147 +      //if here, found a waiting request with same key
   4.148 +   waitingReq = (VSsSemReq *)entry->content;
   4.149 +
   4.150 +      //At this point, know have waiting request(s) -- either sends or recv
   4.151 +      //Note, can only have max of one receive waiting, and cannot have both
   4.152 +      // sends and receives waiting (they would have paired off)
   4.153 +      // but can have multiple sends from diff sending VPs, all same msg-type
   4.154 +   if( waitingReq->reqType == send_type_to )
   4.155 +    {    //waiting request is another send, so stack this up on list
   4.156 +         // but first clone the sending request so it persists.
   4.157 +      VSsSemReq *clonedReq = cloneReq( semReq );
   4.158 +      clonedReq-> nextReqInHashEntry = waitingReq->nextReqInHashEntry;
   4.159 +      waitingReq->nextReqInHashEntry = clonedReq;
   4.160 +         DEBUG__printf2( dbgRqstHdlr, "linked requests: %p, %p ", clonedReq,\
   4.161 +                                                                 waitingReq )
   4.162 +      return;
   4.163 +    }
   4.164 +   else
   4.165 +    {    
   4.166 +       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.167 +        Dependency newd;
   4.168 +        newd.from_vp = senderID->slaveID;
   4.169 +        newd.from_task = senderID->assignCount;
   4.170 +        newd.to_vp = receiverID->slaveID;
   4.171 +        newd.to_task = receiverID->assignCount +1;
   4.172 +        //(newd,semEnv->commDependenciesList);  
   4.173 +        addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList);  
   4.174 +                int32 groupId = semReq->msgType;
   4.175 +        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
   4.176 +            makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
   4.177 +        }
   4.178 +        if(semEnv->ntonGroups[groupId] == NULL){
   4.179 +            semEnv->ntonGroups[groupId] = new_NtoN(groupId);
   4.180 +        }
   4.181 +        Unit u;
   4.182 +        u.vp = senderID->slaveID;
   4.183 +        u.task = senderID->assignCount;
   4.184 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
   4.185 +        u.vp = receiverID->slaveID;
   4.186 +        u.task = receiverID->assignCount +1;
   4.187 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
   4.188 +       #endif
   4.189 +
   4.190 +         //set receiver slave, from the waiting request
   4.191 +      receiverSlv = waitingReq->receiverSlv;
   4.192 +      
   4.193 +         //waiting request is a receive_type_to, so it pairs to this send
   4.194 +         //First, remove the waiting receive request from the entry
   4.195 +      entry->content = waitingReq->nextReqInHashEntry;
   4.196 +      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
   4.197 +      
   4.198 +      if( entry->content == NULL )
   4.199 +       {    //TODO: mod hash table to double-link, so can delete entry from
   4.200 +            // table without hashing the key and looking it up again
   4.201 +         deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees hashEntry
   4.202 +       }
   4.203 +      
   4.204 +         //attach msg that's in this send request to receiving task's Slv
   4.205 +         // when comes back from suspend will have msg in dataRetFromReq
   4.206 +      receiverSlv->dataRetFromReq = semReq->msg;
   4.207 +
   4.208 +         //bring both processors back from suspend
   4.209 +      resume_slaveVP( senderSlv,   semEnv );
   4.210 +      resume_slaveVP( receiverSlv, semEnv );
   4.211 +
   4.212 +      return;
   4.213 +    }
   4.214 + }
   4.215 +
   4.216 +
   4.217 +/*Looks like can make single handler for both sends..
   4.218 + */
   4.219 +//TODO: combine both send handlers into single handler
   4.220 +void
   4.221 +handleSendFromTo( VSsSemReq *semReq, VSsSemEnv *semEnv)
   4.222 + { SlaveVP     *senderSlv, *receiverSlv;
   4.223 +   int32       *senderID, *receiverID;
   4.224 +   int32       *key, keySz, receiverIDNumInt, senderIDNumInt;
   4.225 +   VSsSemReq   *waitingReq;
   4.226 +   HashEntry   *entry;
   4.227 +   HashTable   *commHashTbl = semEnv->commHashTbl;
   4.228 +
   4.229 +         DEBUG__printf2(dbgRqstHdlr,"SendFromTo request from processor %d to %d",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
   4.230 +   
   4.231 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
   4.232 +   senderID    = semReq->senderID;
   4.233 +   //receiverSlv = semReq->receiverSlv;
   4.234 +   senderSlv   = semReq->senderSlv;
   4.235 +
   4.236 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
   4.237 +   senderIDNumInt   = senderID[0] + 1;
   4.238 +   keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32);
   4.239 +   key   = VMS_PI__malloc( keySz );
   4.240 +   memcpy(  key, receiverID, receiverIDNumInt * sizeof(int32) );
   4.241 +   memcpy( &key[receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32) );
   4.242 +
   4.243 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   4.244 +   if( entry == NULL ) return;  //was just inserted
   4.245 +
   4.246 +   waitingReq = (VSsSemReq *)entry->content;
   4.247 +
   4.248 +      //At this point, know have waiting request(s) -- either sends or recv
   4.249 +   if( waitingReq->reqType == send_from_to )
   4.250 +    { printf("\n ERROR: shouldn't be two send-from-tos waiting \n");
   4.251 +    }
   4.252 +   else
   4.253 +    {    //waiting request is a receive, so it completes pair with this send
   4.254 +      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.255 +        Dependency newd;
   4.256 +        newd.from_vp = sendPr->slaveID;
   4.257 +        newd.from_task = sendPr->assignCount;
   4.258 +        newd.to_vp = receivePr->slaveID;
   4.259 +        newd.to_task = receivePr->assignCount +1;
   4.260 +        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   4.261 +        addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);   
   4.262 +      #endif 
   4.263 +
   4.264 +         //set receiver slave, from the waiting request
   4.265 +      receiverSlv = waitingReq->receiverSlv;
   4.266 +       
   4.267 +         //First, remove the waiting receive request from the entry
   4.268 +      entry->content = waitingReq->nextReqInHashEntry;
   4.269 +      VMS_PI__free( waitingReq ); //Don't use contents -- so free it
   4.270 +      
   4.271 +         //can only be one waiting req for "from-to" semantics
   4.272 +      if( entry->content != NULL )
   4.273 +       {
   4.274 +         printf("\nERROR in handleSendFromTo\n");
   4.275 +       }
   4.276 +      deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees HashEntry
   4.277 +
   4.278 +         //attach msg that's in this send request to receiving procr
   4.279 +         // when comes back from suspend, will have msg in dataRetFromReq
   4.280 +      receiverSlv->dataRetFromReq = semReq->msg;
   4.281 +
   4.282 +         //bring both processors back from suspend
   4.283 +      resume_slaveVP( senderSlv,   semEnv );
   4.284 +      resume_slaveVP( receiverSlv, semEnv );
   4.285 +            
   4.286 +      return;
   4.287 +    }
   4.288 + }
   4.289 +
   4.290 +
   4.291 +
   4.292 +//==============================  Receives  ===========================
   4.293 +//
   4.294 +
   4.295 +
   4.296 +void
   4.297 +handleReceiveTypeTo( VSsSemReq *semReq, VSsSemEnv *semEnv)
   4.298 + { SlaveVP    *senderSlv, *receiverSlv;
   4.299 +   int32      *receiverID;
   4.300 +   int32      *key, keySz, receiverIDNumInt;
   4.301 +   VSsSemReq  *waitingReq;
   4.302 +   HashEntry  *entry;
   4.303 +   HashTable  *commHashTbl = semEnv->commHashTbl;
   4.304 +   
   4.305 +         DEBUG__printf1(dbgRqstHdlr,"SendType request from processor %d",semReq->sendPr->slaveID)
   4.306 + 
   4.307 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
   4.308 +   receiverSlv = semReq->receiverSlv;
   4.309 +
   4.310 +      //key is the receiverID plus the type -- have to copy them into key
   4.311 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
   4.312 +   keySz = receiverIDNumInt * sizeof(int32) + sizeof(int32);
   4.313 +   key = VMS_PI__malloc( keySz );
   4.314 +   memcpy( key, receiverID, receiverIDNumInt * sizeof(int32) );
   4.315 +   key[ receiverIDNumInt ] = semReq->msgType; //no +1 'cause starts at 0
   4.316 +
   4.317 +
   4.318 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );//clones
   4.319 +   if( entry == NULL ) return;  //was just inserted
   4.320 +
   4.321 +   waitingReq = (VSsSemReq *)entry->content;  //previously cloned by insert
   4.322 +
   4.323 +      //At this point, know have waiting request(s) -- should be send(s)
   4.324 +   if( waitingReq->reqType == send_type_to )
   4.325 +    {    
   4.326 +         //set sending slave  from the request
   4.327 +      senderSlv = waitingReq->senderSlv;
   4.328 +      
   4.329 +         //waiting request is a send, so pair it with this receive
   4.330 +         //first, remove the waiting send request from the list in entry
   4.331 +      entry->content = waitingReq->nextReqInHashEntry;
   4.332 +      if( entry->content == NULL )
   4.333 +       { deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees HashEntry
   4.334 +       }
   4.335 +      
   4.336 +         //attach msg that's in the send request to receiving procr
   4.337 +         // when comes back from suspend, will have msg in dataRetFromReq
   4.338 +      receiverSlv->dataRetFromReq = waitingReq->msg;
   4.339 +
   4.340 +         //bring both processors back from suspend
   4.341 +      VMS_PI__free( waitingReq );
   4.342 +
   4.343 +       #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.344 +        Dependency newd;
   4.345 +        newd.from_vp = sendPr->slaveID;
   4.346 +        newd.from_task = sendPr->assignCount;
   4.347 +        newd.to_vp = receivePr->slaveID;
   4.348 +        newd.to_task = receivePr->assignCount +1;
   4.349 +        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   4.350 +        addToListOfArrays(Dependency,newd,semEnv->dynDependenciesList); 
   4.351 +        int32 groupId = semReq->msgType;
   4.352 +        if(semEnv->ntonGroupsInfo->numInArray <= groupId){
   4.353 +            makeHighestDynArrayIndexBeAtLeast(semEnv->ntonGroupsInfo, groupId);
   4.354 +        }
   4.355 +        if(semEnv->ntonGroups[groupId] == NULL){
   4.356 +            semEnv->ntonGroups[groupId] = new_NtoN(groupId);
   4.357 +        }
   4.358 +        Unit u;
   4.359 +        u.vp = sendPr->slaveID;
   4.360 +        u.task = sendPr->assignCount;
   4.361 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->senders);
   4.362 +        u.vp = receivePr->slaveID;
   4.363 +        u.task = receivePr->assignCount +1;
   4.364 +        addToListOfArrays(Unit,u,semEnv->ntonGroups[groupId]->receivers);
   4.365 +       #endif
   4.366 +      
   4.367 +      resume_slaveVP( senderSlv,   semEnv );
   4.368 +      resume_slaveVP( receiverSlv, semEnv );
   4.369 +
   4.370 +      return;
   4.371 +    }
   4.372 +   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   4.373 + }
   4.374 +
   4.375 +
   4.376 +/*
   4.377 + */
   4.378 +void
   4.379 +handleReceiveFromTo( VSsSemReq *semReq, VSsSemEnv *semEnv)
   4.380 + { SlaveVP     *senderSlv, *receiverSlv;
   4.381 +   int32       *senderID,  *receiverID;
   4.382 +   int32       *key, keySz, receiverIDNumInt, senderIDNumInt;
   4.383 +   VSsSemReq   *waitingReq;
   4.384 +   HashEntry   *entry;
   4.385 +   HashTable   *commHashTbl = semEnv->commHashTbl;
   4.386 +
   4.387 +         DEBUG__printf2(dbgRqstHdlr,"SendFromTo request from processor %d to %d",semReq->sendPr->slaveID,semReq->receivePr->slaveID)
   4.388 +   
   4.389 +   receiverID  = semReq->receiverID; //For "send", know both send & recv procrs
   4.390 +   senderID    = semReq->senderID;
   4.391 +   receiverSlv = semReq->receiverSlv;
   4.392 +
   4.393 +   receiverIDNumInt = receiverID[0] + 1; //pos 0 doesn't include itself
   4.394 +   senderIDNumInt   = senderID[0] + 1;
   4.395 +   keySz = (receiverIDNumInt + senderIDNumInt) * sizeof(int32);
   4.396 +   key = VMS_PI__malloc( keySz );
   4.397 +   memcpy(  key, receiverID, receiverIDNumInt * sizeof(int32) );
   4.398 +   memcpy( &key[receiverIDNumInt], senderID, senderIDNumInt * sizeof(int32) );
   4.399 +
   4.400 +   entry = giveEntryElseInsertReqst32( key, semReq, commHashTbl );
   4.401 +   if( entry == NULL ) return;  //was just inserted
   4.402 +
   4.403 +   waitingReq = (VSsSemReq *)entry->content;
   4.404 +
   4.405 +      //At this point, know have waiting request(s) -- should be send(s)
   4.406 +   if( waitingReq->reqType == send_from_to )
   4.407 +    {    //waiting request is a send, so pair it with this receive
   4.408 +      #ifdef HOLISTIC__TURN_ON_OBSERVE_UCC
   4.409 +        Dependency newd;
   4.410 +        newd.from_vp = sendPr->slaveID;
   4.411 +        newd.from_task = sendPr->assignCount;
   4.412 +        newd.to_vp = receivePr->slaveID;
   4.413 +        newd.to_task = receivePr->assignCount +1;
   4.414 +        //addToListOfArraysDependency(newd,semEnv->commDependenciesList);  
   4.415 +        addToListOfArrays(Dependency,newd,semEnv->commDependenciesList);    
   4.416 +      #endif
   4.417 +      
   4.418 +         //have receiver slave, now set sender slave
   4.419 +      senderSlv = waitingReq->senderSlv;
   4.420 +      
   4.421 +         //For from-to, should only ever be a single reqst waiting tobe paird
   4.422 +      entry->content = waitingReq->nextReqInHashEntry;
   4.423 +      if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
   4.424 +      deleteEntryFromTable32( (uint32*)entry->key, commHashTbl );  //frees entry too
   4.425 +
   4.426 +         //attach msg that's in the send request to receiving procr
   4.427 +         // when comes back from suspend, will have msg in dataRetFromReq
   4.428 +      receiverSlv->dataRetFromReq = waitingReq->msg;
   4.429 +
   4.430 +         //bring both processors back from suspend
   4.431 +      VMS_PI__free( waitingReq );
   4.432 +
   4.433 +      resume_slaveVP( senderSlv,   semEnv );
   4.434 +      resume_slaveVP( receiverSlv, semEnv );
   4.435 +
   4.436 +      return;
   4.437 +    }
   4.438 +   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   4.439 + }
   4.440 +
   4.441 +
   4.442 +
   4.443  //==========================================================================
   4.444  /*
   4.445   */