changeset 6:98fd084badde Pin2Core

Works multi-core.. pinned VP to a coreloop, changed to SSR
author Me
date Wed, 01 Sep 2010 08:22:54 -0700
parents 833c981134dd
children 8a333ddb87c1
files SSR.h SSR_PluginFns.c SSR_Request_Handlers.c SSR_Request_Handlers.h SSR_lib.c VMSHW.h VMSHW_PluginFns.c VMSHW_Request_Handlers.c VMSHW_Request_Handlers.h VMSHW_lib.c
diffstat 10 files changed, 1031 insertions(+), 991 deletions(-) [+]
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/SSR.h	Wed Sep 01 08:22:54 2010 -0700
     1.3 @@ -0,0 +1,126 @@
     1.4 +/*
     1.5 + *  Copyright 2009 OpenSourceStewardshipFoundation.org
     1.6 + *  Licensed under GNU General Public License version 2
     1.7 + *
     1.8 + * Author: seanhalle@yahoo.com
     1.9 + *
    1.10 + */
    1.11 +
    1.12 +#ifndef _SSR_H
    1.13 +#define	_SSR_H
    1.14 +
    1.15 +#include "VMS/Queue_impl/PrivateQueue.h"
    1.16 +#include "VMS/Hash_impl/PrivateHash.h"
    1.17 +#include "VMS/VMS.h"
    1.18 +
    1.19 +/*This header defines everything specific to the SSR semantic plug-in
    1.20 + */
    1.21 +typedef struct _SSRSemReq   SSRSemReq;
    1.22 +
    1.23 +
    1.24 +/*Semantic layer-specific data sent inside a request from lib called in app
    1.25 + * to request handler called in MasterLoop
    1.26 + */
    1.27 +enum SSRReqType
    1.28 + {
    1.29 +   send_type = 1,
    1.30 +   send_from_to,
    1.31 +   receive_any,    //order and grouping matter -- send before receive
    1.32 +   receive_type,   // and receive_any first of the receives -- Handlers
    1.33 +   receive_from_to,// rely upon this ordering of enum
    1.34 +   transfer_to,
    1.35 +   transfer_out
    1.36 + };
    1.37 +
    1.38 +struct _SSRSemReq
    1.39 + { enum SSRReqType    reqType;
    1.40 +   VirtProcr           *sendPr;
    1.41 +   VirtProcr           *receivePr;
    1.42 +   int32                msgType;
    1.43 +   void                *msg;
    1.44 +   SSRSemReq         *nextReqInHashEntry;
    1.45 + }
    1.46 +/* SSRSemReq */;
    1.47 +
    1.48 +typedef struct
    1.49 + {
    1.50 +   PrivQueueStruc **readyVPQs;
    1.51 +   HashTable      *commHashTbl;
    1.52 +   int             numVirtPr;
    1.53 +   int             nextCoreToGetNewPr;
    1.54 + }
    1.55 +SSRSemEnv;
    1.56 +
    1.57 +
    1.58 +//===========================================================================
    1.59 +
    1.60 +void
    1.61 +SSR__create_seed_procr_and_do_work( VirtProcrFnPtr fn, void *initData );
    1.62 +
    1.63 +//=======================
    1.64 +
    1.65 +void
    1.66 +SSR__init();
    1.67 +
    1.68 +void
    1.69 +SSR__cleanup_after_shutdown();
    1.70 +
    1.71 +//=======================
    1.72 +
    1.73 +inline VirtProcr *
    1.74 +SSR__create_procr_with( VirtProcrFnPtr fnPtr, void *initData,
    1.75 +                          VirtProcr *creatingPr );
    1.76 +
    1.77 +void
    1.78 +SSR__dissipate_procr( VirtProcr *procrToDissipate );
    1.79 +
    1.80 +//=======================
    1.81 +void *
    1.82 +SSR__malloc_size_to( int numBytes, VirtProcr *ownerPr );
    1.83 +
    1.84 +void
    1.85 +SSR__transfer_ownership_of_from_to( void *data, VirtProcr *oldOwnerPr,
    1.86 +                                                    VirtProcr *newOwnerPr );
    1.87 +                                                    
    1.88 +void
    1.89 +SSR__add_ownership_by_to( VirtProcr *newOwnerPr, void *data );
    1.90 +
    1.91 +void
    1.92 +SSR__remove_ownership_by_from( VirtProcr *loserPr, void *dataLosing );
    1.93 +
    1.94 +void
    1.95 +SSR__transfer_ownership_to_outside( void *dataToTransferOwnershipOf );
    1.96 +
    1.97 +
    1.98 +
    1.99 +//=======================
   1.100 +void
   1.101 +SSR__send_of_type_to( VirtProcr *sendPr, void *msg, const int type,
   1.102 +                        VirtProcr *receivePr);
   1.103 +
   1.104 +void
   1.105 +SSR__send_from_to( void *msg, VirtProcr *sendPr, VirtProcr *receivePr);
   1.106 +
   1.107 +void *
   1.108 +SSR__receive_type_to( const int type, VirtProcr *receivePr );
   1.109 +
   1.110 +void *
   1.111 +SSR__receive_from_to( VirtProcr *sendPr, VirtProcr *receivePr );
   1.112 +
   1.113 +
   1.114 +//=======================
   1.115 +
   1.116 +void
   1.117 +SSR__free_semantic_request( SSRSemReq *semReq );
   1.118 +
   1.119 +
   1.120 +//=========================  Internal use only  =============================
   1.121 +void
   1.122 +SSR__Request_Handler( VirtProcr *requestingPr, void *_semEnv );
   1.123 +
   1.124 +VirtProcr *
   1.125 +SSR__schedule_virt_procr( void *_semEnv, int coreNum );
   1.126 +
   1.127 +
   1.128 +#endif	/* _SSR_H */
   1.129 +
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/SSR_PluginFns.c	Wed Sep 01 08:22:54 2010 -0700
     2.3 @@ -0,0 +1,125 @@
     2.4 +/*
     2.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     2.6 + *
     2.7 + * Licensed under BSD
     2.8 + */
     2.9 +
    2.10 +#include <stdio.h>
    2.11 +#include <stdlib.h>
    2.12 +#include <malloc.h>
    2.13 +
    2.14 +#include "VMS/Queue_impl/PrivateQueue.h"
    2.15 +#include "SSR.h"
    2.16 +#include "SSR_Request_Handlers.h"
    2.17 +
    2.18 +
    2.19 +/*Will get requests to send, to receive, and to create new processors.
    2.20 + * Upon send, check the hash to see if a receive is waiting.
    2.21 + * Upon receive, check hash to see if a send has already happened.
    2.22 + * When other is not there, put in.  When other is there, the comm.
    2.23 + *  completes, which means the receiver P gets scheduled and
    2.24 + *  picks up right after the receive request.  So make the work-unit
    2.25 + *  and put it into the queue of work-units ready to go.
    2.26 + * Other request is create a new Processor, with the function to run in the
    2.27 + *  Processor, and initial data.
    2.28 + */
    2.29 +void
    2.30 +SSR__Request_Handler( VirtProcr *requestingPr, void *_semEnv )
    2.31 + { SSRSemEnv *semEnv;
    2.32 +   VMSReqst    *req;
    2.33 +   SSRSemReq *semReq;
    2.34 + 
    2.35 +   semEnv = (SSRSemEnv *)_semEnv;
    2.36 +
    2.37 +   req = VMS__take_top_request_from( requestingPr );
    2.38 +   
    2.39 +   while( req != NULL )
    2.40 +    {
    2.41 +      if( VMS__isSemanticReqst( req ) )
    2.42 +       {
    2.43 +         semReq = VMS__take_sem_reqst_from( req );
    2.44 +         if( semReq == NULL ) goto DoneHandlingReqst;
    2.45 +         switch( semReq->reqType )
    2.46 +          {
    2.47 +            case send_type:       handleSendType(     semReq, semEnv);
    2.48 +               break;
    2.49 +            case send_from_to:    handleSendFromTo(   semReq, semEnv);
    2.50 +               break;
    2.51 +            case receive_type:    handleReceiveType(  semReq, semEnv);
    2.52 +               break;
    2.53 +            case receive_from_to: handleReceiveFromTo(semReq, semEnv);
    2.54 +               break;
    2.55 +            case transfer_to:     handleTransferTo(   semReq, semEnv);
    2.56 +               SSR__free_semantic_request( semReq );
    2.57 +               break;
    2.58 +            case transfer_out:    handleTransferOut(  semReq, semEnv);
    2.59 +               SSR__free_semantic_request( semReq );
    2.60 +               break;
    2.61 +          }
    2.62 +         //NOTE: freeing semantic request data strucs handled inside these
    2.63 +       }
    2.64 +      else if( VMS__isCreateReqst( req ) ) //only plugin can add to ready Q
    2.65 +       { VirtProcr *
    2.66 +         newPr = (VirtProcr *)req->semReqData;
    2.67 +         semEnv->numVirtPr += 1;
    2.68 +
    2.69 +            //Assign new processor to next core in line & queue it up
    2.70 +         #ifdef DEBUG
    2.71 +         newPr->coreAnimatedBy = 0;
    2.72 +         #else
    2.73 +         newPr->coreAnimatedBy = semEnv->nextCoreToGetNewPr;
    2.74 +         if( semEnv->nextCoreToGetNewPr >= NUM_CORES - 1 )
    2.75 +             semEnv->nextCoreToGetNewPr  = 0;
    2.76 +         else
    2.77 +             semEnv->nextCoreToGetNewPr += 1;
    2.78 +         #endif
    2.79 +         writePrivQ( newPr, semEnv->readyVPQs[requestingPr->coreAnimatedBy]);
    2.80 +
    2.81 +            //resume procr that asked for registration
    2.82 +         writePrivQ( requestingPr, 
    2.83 +                            semEnv->readyVPQs[requestingPr->coreAnimatedBy]);
    2.84 +       }
    2.85 +      else if( VMS__isDissipateReqst( req ) )
    2.86 +       {
    2.87 +            //free any semantic data allocated to the virt procr
    2.88 +
    2.89 +            //Now, call VMS to free_all AppVP state -- stack and so on
    2.90 +         VMS__handle_dissipate_reqst( requestingPr );
    2.91 +
    2.92 +         semEnv->numVirtPr -= 1;
    2.93 +         if( semEnv->numVirtPr == 0 )
    2.94 +          {    //no more work, so shutdown
    2.95 +            VMS__handle_shutdown_reqst( requestingPr );
    2.96 +          }
    2.97 +       }
    2.98 +
    2.99 +      DoneHandlingReqst:
   2.100 +         //Free VMS portion of request, no matter what -- sem request data
   2.101 +         // struc instances may still be around..  VMS__free_request doesn't
   2.102 +         // affect the semantic request that was carried by it
   2.103 +      req = VMS__free_top_and_give_next_request_from( requestingPr );
   2.104 +    } //while( req != NULL )
   2.105 + }
   2.106 +
   2.107 +//===========================================================================
   2.108 +
   2.109 +
   2.110 +/*For SSR, scheduling a slave simply takes the next work-unit off the
   2.111 + * ready-to-go work-unit queue and assigns it to the slaveToSched.
   2.112 + *If the ready-to-go work-unit queue is empty, then nothing to schedule
   2.113 + * to the slave -- return FALSE to let Master loop know scheduling that
   2.114 + * slave failed.
   2.115 + */
   2.116 +VirtProcr *
   2.117 +SSR__schedule_virt_procr( void *_semEnv, int coreNum )
   2.118 + { VirtProcr   *schedPr;
   2.119 +   SSRSemEnv *semEnv;
   2.120 +
   2.121 +   semEnv = (SSRSemEnv *)_semEnv;
   2.122 +
   2.123 +   schedPr = readPrivQ( semEnv->readyVPQs[coreNum] );
   2.124 +      //Note, using a non-blocking queue -- it returns NULL if queue empty
   2.125 +
   2.126 +   return( schedPr );
   2.127 + }
   2.128 +
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/SSR_Request_Handlers.c	Wed Sep 01 08:22:54 2010 -0700
     3.3 @@ -0,0 +1,330 @@
     3.4 +/*
     3.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     3.6 + *
     3.7 + * Licensed under BSD
     3.8 + */
     3.9 +
    3.10 +#include <stdio.h>
    3.11 +#include <stdlib.h>
    3.12 +#include <malloc.h>
    3.13 +
    3.14 +#include "VMS/VMS.h"
    3.15 +#include "VMS/Queue_impl/PrivateQueue.h"
    3.16 +#include "VMS/Hash_impl/PrivateHash.h"
    3.17 +#include "SSR.h"
    3.18 +
    3.19 +
    3.20 +
    3.21 +//===========================================================================
    3.22 +//                           Helpers
    3.23 +
    3.24 +HashEntry *
    3.25 +giveEntryElseInsertReqst( char *key, SSRSemReq *semReq,
    3.26 +    HashTable   *commHashTbl )
    3.27 + { HashEntry    *entry;
    3.28 +   SSRSemReq  *waitingReq;
    3.29 +
    3.30 +   entry = getEntryFromTable( (char *)key, commHashTbl );
    3.31 +   if( entry == NULL )
    3.32 +    {    //no waiting sends or receives, so add this request and exit
    3.33 +      addValueIntoTable( key, semReq, commHashTbl );
    3.34 +      return NULL;
    3.35 +    }
    3.36 +   waitingReq = (SSRSemReq *)entry->content;
    3.37 +   if( waitingReq == NULL )  //might happen when last waiting gets paired
    3.38 +    {    //no waiting sends or receives, so add this request and exit
    3.39 +      entry->content = semReq;
    3.40 +      return NULL;
    3.41 +    }
    3.42 +   return entry;
    3.43 + }
    3.44 +
    3.45 +
    3.46 +
    3.47 +
    3.48 +//===========================================================================
    3.49 +/*The semantic request has the receiving processor and the message type
    3.50 + *
    3.51 + *Note one value in this approach: without the extra VMS layer,
    3.52 + * the send and receive would happen in real time instead of virtual time,
    3.53 + * which would waste real time while one of them waited for other
    3.54 + *
    3.55 + *When successfully pair-up, transfer ownership of the sent data
    3.56 + * to the receiving processor
    3.57 + *
    3.58 + *Messages of a given Type have to be kept separate..  so need a separate
    3.59 + * entry in the hash table for each pair: receivePr, msgType
    3.60 + *
    3.61 + *Also, if same sender sends multiple before any get received, then need to
    3.62 + * stack the sends up -- even if a send waits until it's paired, several
    3.63 + * separate processors can send to the same receiver, and hashing on the
    3.64 + * receive processor, so they will stack up.
    3.65 + */
    3.66 +void
    3.67 +handleSendType( SSRSemReq *semReq, SSRSemEnv *semEnv )
    3.68 + { VirtProcr   *sendPr, *receivePr;
    3.69 +   int          key[] = {0,0,0};
    3.70 +   SSRSemReq *waitingReq;
    3.71 +   HashEntry   *entry;
    3.72 +   HashTable   *commHashTbl = semEnv->commHashTbl;
    3.73 + 
    3.74 +   receivePr = semReq->receivePr; //For "send", know both send & recv procrs
    3.75 +   sendPr    = semReq->sendPr;
    3.76 +
    3.77 +         //TODO: handle transfer of msg-locs ownership
    3.78 +         //TODO: hash table implemented such that using "addEntry" or
    3.79 +         //  "addValue" to table causes the *value* in old entry to be
    3.80 +         //  *freed* -- this is bad.  Want to stack up values in a linked
    3.81 +         //  list when multiple have the same key.
    3.82 +
    3.83 +      //TODO: use a faster hash function -- see notes in intelligence gather
    3.84 +   key[0] = (int)receivePr;
    3.85 +   key[1] = (int)(semReq->msgType);
    3.86 + //key[2] acts as the 0 that terminates the string
    3.87 +
    3.88 +   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
    3.89 +   if( entry == NULL ) return;  //was just inserted
    3.90 +
    3.91 +   waitingReq = (SSRSemReq *)entry->content;
    3.92 +
    3.93 +      //At this point, know have waiting request(s) -- either sends or recv
    3.94 +      //Note, can only have max of one receive waiting, and cannot have both
    3.95 +      // sends and receives waiting (they would have paired off)
    3.96 +      // but can have multiple send_type requests waiting (from diff senders)
    3.97 +   if( waitingReq->reqType == send_type )
    3.98 +    {    //waiting request is another send, so stack this up on list
    3.99 +      semReq->    nextReqInHashEntry = waitingReq->nextReqInHashEntry;
   3.100 +      waitingReq->nextReqInHashEntry = semReq;
   3.101 +         //printf("linked requests: %d, %d  |  ", semReq, waitingReq );
   3.102 +         //printf("type: %d, %d\n", semReq->reqType, waitingReq->reqType );
   3.103 +      return;
   3.104 +    }
   3.105 +   else
   3.106 +    {    //waiting request is a receive, so pair it to this send
   3.107 +         //first, remove the waiting receive request from the list in entry
   3.108 +      entry->content = waitingReq->nextReqInHashEntry;
   3.109 +      if( entry->content == NULL )
   3.110 +       {    //TODO: mod hash table to double-link, so can delete entry from
   3.111 +            // table without hashing the key and looking it up again
   3.112 +         deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   3.113 +       }
   3.114 +      
   3.115 +         //attach msg that's in this send request to receiving procr
   3.116 +         // when comes back from suspend, will have msg in semanticData
   3.117 +      receivePr->semanticData = semReq->msg;
   3.118 +
   3.119 +         //bring both processors back from suspend
   3.120 +      writePrivQ( sendPr,    semEnv->readyVPQs[sendPr->coreAnimatedBy] );
   3.121 +      writePrivQ( receivePr, semEnv->readyVPQs[receivePr->coreAnimatedBy] );
   3.122 +
   3.123 +         //don't need semReq anymore -- free it
   3.124 +      SSR__free_semantic_request( waitingReq );
   3.125 +      SSR__free_semantic_request( semReq );
   3.126 +      return;
   3.127 +    }
   3.128 + }
   3.129 +
   3.130 +
   3.131 +/*Looks like can make single handler for both sends..
   3.132 + */
   3.133 +//TODO: combine both send handlers into single handler
   3.134 +void
   3.135 +handleSendFromTo( SSRSemReq *semReq, SSRSemEnv *semEnv)
   3.136 + { VirtProcr   *sendPr, *receivePr;
   3.137 +   int          key[] = {0,0,0};
   3.138 +   SSRSemReq *waitingReq;
   3.139 +   HashEntry   *entry;
   3.140 +   HashTable   *commHashTbl = semEnv->commHashTbl;
   3.141 +
   3.142 +   receivePr = semReq->receivePr; //For "send", know both send & recv procrs
   3.143 +   sendPr    = semReq->sendPr;    
   3.144 +
   3.145 +   key[0] = (int)receivePr;
   3.146 +   key[1] = (int)sendPr;
   3.147 + //key[2] acts at the 0 that terminates the string
   3.148 +
   3.149 +   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
   3.150 +   if( entry == NULL ) return;  //was just inserted
   3.151 +
   3.152 +   waitingReq = (SSRSemReq *)entry->content;
   3.153 +
   3.154 +      //At this point, know have waiting request(s) -- either sends or recv
   3.155 +   if( waitingReq->reqType == send_from_to )
   3.156 +    { printf("\n ERROR: shouldn't be two send from-tos waiting \n");
   3.157 +    }
   3.158 +   else
   3.159 +    {    //waiting request is a receive, so it completes pair with this send
   3.160 +
   3.161 +         //remove the waiting receive request from the entry
   3.162 +      entry->content = waitingReq->nextReqInHashEntry;
   3.163 +         //can only be one waiting req for "from-to" semantics
   3.164 +      if( entry->content != NULL )
   3.165 +       {
   3.166 +         printf("\nERROR in handleSendFromTo\n");
   3.167 +         printf("waitReq: %d | next req: %d\n", waitingReq, entry->content);
   3.168 +       }
   3.169 +      deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   3.170 +
   3.171 +         //attach msg that's in this send request to receiving procr
   3.172 +         // when comes back from suspend, will have msg in semanticData
   3.173 +      receivePr->semanticData = semReq->msg;
   3.174 +
   3.175 +         //bring both processors back from suspend
   3.176 +      writePrivQ( sendPr,    semEnv->readyVPQs[sendPr->coreAnimatedBy] );
   3.177 +      writePrivQ( receivePr, semEnv->readyVPQs[receivePr->coreAnimatedBy] );
   3.178 +      
   3.179 +         //done with requests, so free them
   3.180 +      SSR__free_semantic_request( waitingReq );
   3.181 +      SSR__free_semantic_request( semReq );
   3.182 +      return;
   3.183 +    }
   3.184 + }
   3.185 +
   3.186 +
   3.187 +
   3.188 +//=======================================================
   3.189 +
   3.190 +/*Removed this one for now, because forces either a search or going to a
   3.191 + * two-level hash table, where one level the key is the receivePr, in the
   3.192 + * other level, the key is the type.
   3.193 + *So, each dest procr that either does a receive_type or that a send_type
   3.194 + * targets it, would have a hash table created just for it and placed
   3.195 + * into the first-level hash table entry for that receive procr.
   3.196 + *Then, doing a receive_type first looks up entry for receive procr in first
   3.197 + * table, gets the type-table out of that entry, and does a second lookup
   3.198 + * in the type-table.
   3.199 + *Doing a receive from-to looks up in the first table, gets the second table
   3.200 + * hashed on "from" procr.
   3.201 + *Doing a receive_any looks up in the first table, then looks to see if
   3.202 + * either of the hash tables have any entries -- would then have to do a
   3.203 + * linear search through the hash-table's array for the first non-empty
   3.204 + * spot
   3.205 + *Yuck.
   3.206 + *
   3.207 + *Alternatively, could keep two hash tables updated all the time -- one that
   3.208 + * does the receive_type and receive_from_to and a second that does
   3.209 + * receive_any -- would only hash the second table by the receive procr.
   3.210 + * When remove from one table, keep back-links to both tables, so can also
   3.211 + * quickly remove from other table.
   3.212 + *Cost is doing two hash-table lookups for every insert.
   3.213 + * If ever add receive_any, looking like this second option easier and even
   3.214 + * less costly.
   3.215 + */
   3.216 +void
   3.217 +handleReceiveAny( SSRSemReq *semReq, SSRSemEnv *semEnv)
   3.218 + {
   3.219 + 
   3.220 + }
   3.221 +
   3.222 +
   3.223 +void
   3.224 +handleReceiveType( SSRSemReq *semReq, SSRSemEnv *semEnv)
   3.225 + { VirtProcr   *sendPr, *receivePr;
   3.226 +   int          key[] = {0,0,0};
   3.227 +   SSRSemReq *waitingReq;
   3.228 +   HashEntry   *entry;
   3.229 +   HashTable   *commHashTbl = semEnv->commHashTbl;
   3.230 +
   3.231 +   receivePr = semReq->receivePr;
   3.232 +
   3.233 +   key[0] = (int)receivePr;
   3.234 +   key[1] = (int)(semReq->msgType);
   3.235 + //key[2] acts at the 0 that terminates the string
   3.236 +
   3.237 +
   3.238 +   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
   3.239 +   if( entry == NULL ) return;  //was just inserted
   3.240 +
   3.241 +   waitingReq = (SSRSemReq *)entry->content;
   3.242 +
   3.243 +      //At this point, know have waiting request(s) -- should be send(s)
   3.244 +   if( waitingReq->reqType == send_type )
   3.245 +    {    //waiting request is a send, so pair it with this receive
   3.246 +         //first, remove the waiting send request from the list in entry
   3.247 +      entry->content = waitingReq->nextReqInHashEntry;
   3.248 +      if( entry->content == NULL )
   3.249 +       { deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   3.250 +       }
   3.251 +      
   3.252 +         //attach msg that's in the send request to receiving procr
   3.253 +         // when comes back from suspend, will have msg in semanticData
   3.254 +      receivePr->semanticData = waitingReq->msg;
   3.255 +
   3.256 +         //bring both processors back from suspend
   3.257 +      sendPr = waitingReq->sendPr;
   3.258 +      writePrivQ( sendPr,    semEnv->readyVPQs[sendPr->coreAnimatedBy] );
   3.259 +      writePrivQ( receivePr, semEnv->readyVPQs[receivePr->coreAnimatedBy] );
   3.260 +
   3.261 +         //done with requests, so free them
   3.262 +      SSR__free_semantic_request( waitingReq );
   3.263 +      SSR__free_semantic_request( semReq );
   3.264 +      return;
   3.265 +    }
   3.266 +   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   3.267 + }
   3.268 +
   3.269 +
   3.270 +/*
   3.271 + */
   3.272 +void
   3.273 +handleReceiveFromTo( SSRSemReq *semReq, SSRSemEnv *semEnv)
   3.274 + { VirtProcr   *sendPr, *receivePr;
   3.275 +   int          key[] = {0,0,0};
   3.276 +   SSRSemReq *waitingReq;
   3.277 +   HashEntry   *entry;
   3.278 +   HashTable   *commHashTbl = semEnv->commHashTbl;
   3.279 +
   3.280 +   receivePr = semReq->receivePr;
   3.281 +   sendPr    = semReq->sendPr;    //for receive from-to, know send procr
   3.282 +
   3.283 +   key[0] = (int)receivePr;
   3.284 +   key[1] = (int)sendPr;
   3.285 + //key[2] acts at the 0 that terminates the string
   3.286 +
   3.287 +   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
   3.288 +   if( entry == NULL ) return;  //was just inserted
   3.289 +
   3.290 +   waitingReq = (SSRSemReq *)entry->content;
   3.291 +
   3.292 +      //At this point, know have waiting request(s) -- should be send(s)
   3.293 +   if( waitingReq->reqType == send_from_to )
   3.294 +    {    //waiting request is a send, so pair it with this receive
   3.295 +
   3.296 +         //For from-to, should only ever be a single reqst waiting tobe paird
   3.297 +      entry->content = waitingReq->nextReqInHashEntry;
   3.298 +      if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
   3.299 +      deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   3.300 +
   3.301 +         //attach msg that's in the send request to receiving procr
   3.302 +         // when comes back from suspend, will have msg in semanticData
   3.303 +      receivePr->semanticData = waitingReq->msg;
   3.304 +
   3.305 +         //bring both processors back from suspend
   3.306 +      sendPr = waitingReq->sendPr;
   3.307 +      writePrivQ( sendPr,    semEnv->readyVPQs[sendPr->coreAnimatedBy] );
   3.308 +      writePrivQ( receivePr, semEnv->readyVPQs[receivePr->coreAnimatedBy] );
   3.309 +
   3.310 +         //done with requests, so free them
   3.311 +      SSR__free_semantic_request( waitingReq );
   3.312 +      SSR__free_semantic_request( semReq );
   3.313 +      return;
   3.314 +    }
   3.315 +   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   3.316 + }
   3.317 +
   3.318 +
   3.319 +
   3.320 +//===============================================
   3.321 +void
   3.322 +handleTransferTo( SSRSemReq *semReq, SSRSemEnv *semEnv)
   3.323 + {
   3.324 +
   3.325 + }
   3.326 +
   3.327 +void
   3.328 +handleTransferOut( SSRSemReq *semReq, SSRSemEnv *semEnv)
   3.329 + {
   3.330 +
   3.331 + }
   3.332 +
   3.333 +
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/SSR_Request_Handlers.h	Wed Sep 01 08:22:54 2010 -0700
     4.3 @@ -0,0 +1,33 @@
     4.4 +/*
     4.5 + *  Copyright 2009 OpenSourceStewardshipFoundation.org
     4.6 + *  Licensed under GNU General Public License version 2
     4.7 + *
     4.8 + * Author: seanhalle@yahoo.com
     4.9 + *
    4.10 + */
    4.11 +
    4.12 +#ifndef _SSR_REQ_H
    4.13 +#define	_SSR_REQ_H
    4.14 +
    4.15 +#include "SSR.h"
    4.16 +
    4.17 +/*This header defines everything specific to the SSR semantic plug-in
    4.18 + */
    4.19 +
    4.20 +void
    4.21 +handleSendType( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.22 +void
    4.23 +handleSendFromTo( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.24 +void
    4.25 +handleReceiveAny( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.26 +void
    4.27 +handleReceiveType( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.28 +void
    4.29 +handleReceiveFromTo( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.30 +void
    4.31 +handleTransferTo( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.32 +void
    4.33 +handleTransferOut( SSRSemReq *semReq, SSRSemEnv *semEnv);
    4.34 +
    4.35 +#endif	/* _SSR_REQ_H */
    4.36 +
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/SSR_lib.c	Wed Sep 01 08:22:54 2010 -0700
     5.3 @@ -0,0 +1,417 @@
     5.4 +/*
     5.5 + * Copyright 2010  OpenSourceCodeStewardshipFoundation
     5.6 + *
     5.7 + * Licensed under BSD
     5.8 + */
     5.9 +
    5.10 +#include <stdio.h>
    5.11 +#include <stdlib.h>
    5.12 +#include <malloc.h>
    5.13 +
    5.14 +#include "VMS/VMS.h"
    5.15 +#include "SSR.h"
    5.16 +#include "VMS/Queue_impl/PrivateQueue.h"
    5.17 +#include "VMS/Hash_impl/PrivateHash.h"
    5.18 +
    5.19 +
    5.20 +//==========================================================================
    5.21 +
    5.22 +void
    5.23 +SSR__init();
    5.24 +
    5.25 +void
    5.26 +SSR__init_Seq();
    5.27 +
    5.28 +void
    5.29 +SSR__init_Helper();
    5.30 +//==========================================================================
    5.31 +
    5.32 +
    5.33 +/*TODO: Q: dealing with library f()s and DKU vs WT vs FoR
    5.34 + * (still want to do FoR, with time-lines as syntax, could be super cool)
    5.35 + * A: thinking pin the coreLoops for all of BLIS -- let Master arbitrate
    5.36 + * among library, DKU, WT, FoR -- all the patterns in terms of virtual
    5.37 + * processors (or equivalently work-units), so Master picks which virt procr
    5.38 + * from which portions of app (DKU, WT, FoR) onto which sched slots
    5.39 + *Might even do hierarchy of masters -- group of sched slots for each core
    5.40 + * has its own master, that keeps generated work local
    5.41 + * single-reader-single-writer sync everywhere -- no atomic primitives
    5.42 + * Might have the different schedulers talk to each other, to negotiate
    5.43 + * larger-grain sharing of resources, according to predicted critical
    5.44 + * path, and expansion of work
    5.45 + */
    5.46 +
    5.47 +
    5.48 +
    5.49 +//===========================================================================
    5.50 +
    5.51 +
    5.52 +/*These are the library functions *called in the application*
    5.53 + * 
    5.54 + *There's a pattern for the outside sequential code to interact with the
    5.55 + * VMS_HW code.
    5.56 + *The VMS_HW system is inside a boundary..  every SSR system is in its
    5.57 + * own directory that contains the functions for each of the processor types.
    5.58 + * One of the processor types is the "seed" processor that starts the
    5.59 + * cascade of creating all the processors that do the work.
    5.60 + *So, in the directory is a file called "EntryPoint.c" that contains the
    5.61 + * function, named appropriately to the work performed, that the outside
    5.62 + * sequential code calls.  This function follows a pattern:
    5.63 + *1) it calls SSR__init()
    5.64 + *2) it creates the initial data for the seed processor, which is passed
    5.65 + *    in to the function
    5.66 + *3) it creates the seed SSR processor, with the data to start it with.
    5.67 + *4) it calls startSSRThenWaitUntilWorkDone
    5.68 + *5) it gets the returnValue from the transfer struc and returns that
    5.69 + *    from the function
    5.70 + *
    5.71 + *For now, a new SSR system has to be created via SSR__init every
    5.72 + * time an entry point function is called -- later, might add letting the
    5.73 + * SSR system be created once, and let all the entry points just reuse
    5.74 + * it -- want to be as simple as possible now, and see by using what makes
    5.75 + * sense for later..
    5.76 + */
    5.77 +
    5.78 +
    5.79 +
    5.80 +//===========================================================================
    5.81 +
    5.82 +/*This is the "border crossing" function -- the thing that crosses from the
    5.83 + * outside world, into the VMS_HW world.  It initializes and starts up the
    5.84 + * VMS system, then creates one processor from the specified function and
    5.85 + * puts it into the readyQ.  From that point, that one function is resp.
    5.86 + * for creating all the other processors, that then create others, and so
    5.87 + * forth.
    5.88 + *When all the processors, including the seed, have dissipated, then this
    5.89 + * function returns.  The results will have been written by side-effect via
    5.90 + * pointers read from, or written into initData.
    5.91 + *
    5.92 + *NOTE: no Threads should exist in the outside program that might touch
    5.93 + * any of the data reachable from initData passed in to here
    5.94 + */
    5.95 +void
    5.96 +SSR__create_seed_procr_and_do_work( VirtProcrFnPtr fnPtr, void *initData )
    5.97 + { SSRSemEnv *semEnv;
    5.98 +   VirtProcr *seedPr;
    5.99 +
   5.100 +   #ifdef DEBUG
   5.101 +   SSR__init_Seq();  //debug sequential exe
   5.102 +   #else
   5.103 +   SSR__init();      //normal multi-thd
   5.104 +   #endif
   5.105 +   semEnv = _VMSMasterEnv->semanticEnv;
   5.106 +
   5.107 +      //SSR starts with one processor, which is put into initial environ,
   5.108 +      // and which then calls create() to create more, thereby expanding work
   5.109 +   seedPr = VMS__create_procr( fnPtr, initData );
   5.110 +
   5.111 +   seedPr->coreAnimatedBy = semEnv->nextCoreToGetNewPr++;
   5.112 +
   5.113 +   writePrivQ( seedPr, semEnv->readyVPQs[seedPr->coreAnimatedBy] );
   5.114 +   semEnv->numVirtPr = 1;
   5.115 +
   5.116 +   #ifdef DEBUG
   5.117 +   VMS__start_the_work_then_wait_until_done_Seq();  //debug sequential exe
   5.118 +   #else
   5.119 +   VMS__start_the_work_then_wait_until_done();      //normal multi-thd
   5.120 +   #endif
   5.121 +
   5.122 +   SSR__cleanup_after_shutdown();
   5.123 + }
   5.124 +
   5.125 +
   5.126 +//===========================================================================
   5.127 +
   5.128 +/*Initializes all the data-structures for a SSR system -- but doesn't
   5.129 + * start it running yet!
   5.130 + *
   5.131 + * 
   5.132 + *This sets up the semantic layer over the VMS system
   5.133 + *
   5.134 + *First, calls VMS_Setup, then creates own environment, making it ready
   5.135 + * for creating the seed processor and then starting the work.
   5.136 + */
   5.137 +void
   5.138 +SSR__init()
   5.139 + {
   5.140 +   VMS__init();
   5.141 +      //masterEnv, a global var, now is partially set up by init_VMS
   5.142 +
   5.143 +   SSR__init_Helper();
   5.144 + }
   5.145 +
   5.146 +void
   5.147 +SSR__init_Seq()
   5.148 + {
   5.149 +   VMS__init_Seq();
   5.150 +      //masterEnv, a global var, now is partially set up by init_VMS
   5.151 +
   5.152 +   SSR__init_Helper();
   5.153 + }
   5.154 +
   5.155 +void
   5.156 +SSR__init_Helper()
   5.157 + { SSRSemEnv       *semanticEnv;
   5.158 +   PrivQueueStruc **readyVPQs;
   5.159 +   int              coreIdx;
   5.160 + 
   5.161 +      //Hook up the semantic layer's plug-ins to the Master virt procr
   5.162 +   _VMSMasterEnv->requestHandler = &SSR__Request_Handler;
   5.163 +   _VMSMasterEnv->slaveScheduler = &SSR__schedule_virt_procr;
   5.164 +
   5.165 +      //create the semantic layer's environment (all its data) and add to
   5.166 +      // the master environment
   5.167 +   semanticEnv = malloc( sizeof( SSRSemEnv ) );
   5.168 +   _VMSMasterEnv->semanticEnv = semanticEnv;
   5.169 +
   5.170 +      //create the ready queue, hash tables used for pairing send to receive
   5.171 +      // and so forth
   5.172 +      //TODO: add hash tables for pairing sends with receives, and
   5.173 +      // initialize the data ownership system
   5.174 +   readyVPQs = malloc( NUM_CORES * sizeof(PrivQueueStruc *) );
   5.175 +
   5.176 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   5.177 +    {
   5.178 +      readyVPQs[ coreIdx ] = makePrivQ();
   5.179 +    }
   5.180 +   
   5.181 +   semanticEnv->readyVPQs = readyVPQs;
   5.182 +   
   5.183 +   semanticEnv->nextCoreToGetNewPr = 0;
   5.184 +   
   5.185 +   semanticEnv->commHashTbl     = makeHashTable( 1<<16, NULL ); //start big
   5.186 + }
   5.187 +
   5.188 +
   5.189 +/*Frees any memory allocated by SSR__init() then calls VMS__shutdown
   5.190 + */
   5.191 +void
   5.192 +SSR__cleanup_after_shutdown()
   5.193 + { SSRSemEnv *semanticEnv;
   5.194 +   int coreIdx;
   5.195 + 
   5.196 +   semanticEnv = _VMSMasterEnv->semanticEnv;
   5.197 +
   5.198 +//TODO: double check all sem env locations freed
   5.199 +
   5.200 +   for( coreIdx = 0; coreIdx < NUM_CORES; coreIdx++ )
   5.201 +    {
   5.202 +      free( semanticEnv->readyVPQs[coreIdx]->startOfData );
   5.203 +      free( semanticEnv->readyVPQs[coreIdx] );
   5.204 +    }
   5.205 +   free( semanticEnv->readyVPQs );
   5.206 +   
   5.207 +   freeHashTable( semanticEnv->commHashTbl );
   5.208 +   free( _VMSMasterEnv->semanticEnv );
   5.209 +   VMS__cleanup_after_shutdown();
   5.210 + }
   5.211 +
   5.212 +
   5.213 +//===========================================================================
   5.214 +
   5.215 +/*
   5.216 + */
   5.217 +inline VirtProcr *
   5.218 +SSR__create_procr_with( VirtProcrFnPtr fnPtr, void *initData,
   5.219 +                          VirtProcr *creatingPr )
   5.220 + { VirtProcr *newPr;
   5.221 +
   5.222 +   newPr = VMS__create_procr( fnPtr, initData );
   5.223 +
   5.224 +      //After create, have to send request to plugin for any sem env
   5.225 +      // modifications -- such as putting the new procr into the ready Q
   5.226 +      //Need a processor to "animate" the creation -- it's one the register
   5.227 +      // request is attached to, and one suspended in order to send req
   5.228 +      // to plugin
   5.229 +   VMS__send_register_new_procr_request( newPr, creatingPr );
   5.230 +
   5.231 +   return newPr;
   5.232 + }
   5.233 +
   5.234 +
   5.235 +inline void
   5.236 +SSR__dissipate_procr( VirtProcr *procrToDissipate )
   5.237 + {
   5.238 +   VMS__dissipate_procr( procrToDissipate );
   5.239 + }
   5.240 +
   5.241 +
   5.242 +//===========================================================================
   5.243 +
   5.244 +void *
   5.245 +SSR__malloc_size_to( int numBytes, VirtProcr *ownerPr )
   5.246 + {
   5.247 +//TODO: Put in the ownership system from DKU -- have it working, just adapt
   5.248 +//  it to here
   5.249 +   return malloc( numBytes );
   5.250 + }
   5.251 +
   5.252 +
   5.253 +void
   5.254 +SSR__transfer_ownership_of_from_to( void *data, VirtProcr *oldOwnerPr,
   5.255 +                                                  VirtProcr *newOwnerPr )
   5.256 + {
   5.257 +
   5.258 + }
   5.259 +
   5.260 +
   5.261 +void
   5.262 +SSR__add_ownership_by_to( VirtProcr *newOwnerPr, void *data )
   5.263 + {
   5.264 +
   5.265 + }
   5.266 +
   5.267 +
   5.268 +void
   5.269 +SSR__remove_ownership_by_from( VirtProcr *loserPr, void *dataLosing )
   5.270 + {
   5.271 +
   5.272 + }
   5.273 +
   5.274 +
   5.275 +/*Causes the SSR system to remove internal ownership, so data won't be
   5.276 + * freed when SSR shuts down, and will persist in the external program.
   5.277 + *
   5.278 + *Must be called from the processor that currently owns the data.
   5.279 + *
   5.280 + *IMPL: Transferring ownership touches two different virtual processor's
   5.281 + * state -- which means it has to be done carefully -- the VMS rules for
   5.282 + * semantic layers say that a work-unit is only allowed to touch the
   5.283 + * virtual processor it is part of, and that only a single work-unit per
   5.284 + * virtual processor be scheduled to a slave at a time.  So, this has to
   5.285 + * modify the virtual processor that owns the work-unit that called this
   5.286 + * function, then create a request to have the other processor modified.
   5.287 + *However, in this case, the TO processor is the outside, and transfers
   5.288 + * are only allowed to be called by the giver-upper, so can mark caller of
   5.289 + * this function as no longer owner, and return -- done.
   5.290 + */
   5.291 +void
   5.292 +SSR__transfer_ownership_to_outside( void *data )
   5.293 + {
   5.294 +   //TODO: removeAllOwnersFrom( data );
   5.295 + }
   5.296 +
   5.297 +
   5.298 +//===========================================================================
   5.299 +
   5.300 +void
   5.301 +SSR__send_of_type_to( VirtProcr *sendPr, void *msg, const int type,
   5.302 +                        VirtProcr *receivePr)
   5.303 + { SSRSemReq *reqData;
   5.304 +
   5.305 +   reqData = malloc( sizeof(SSRSemReq) );
   5.306 +   reqData->receivePr = receivePr;
   5.307 +   reqData->sendPr    = sendPr;
   5.308 +   reqData->reqType   = send_type;
   5.309 +   reqData->msgType   = type;
   5.310 +   reqData->msg       = msg;
   5.311 +   reqData->nextReqInHashEntry = NULL;
   5.312 +
   5.313 +      //On ownership -- remove inside the send and let ownership sit in limbo
   5.314 +      // as a potential in an entry in the hash table, when this receive msg
   5.315 +      // gets paired to a send, the ownership gets added to the receivePr --
   5.316 +      // the next work-unit in the receivePr's trace will have ownership.
   5.317 +   VMS__add_sem_request( reqData, sendPr );
   5.318 +   VMS__suspend_procr( sendPr ); //will suspend then resume and continue
   5.319 +
   5.320 +      //When come back from suspend, no longer own data reachable from msg
   5.321 +      //TODO: release ownership here
   5.322 + }
   5.323 +
   5.324 +void
   5.325 +SSR__send_from_to( void *msg, VirtProcr *sendPr, VirtProcr *receivePr )
   5.326 + { SSRSemReq *reqData;
   5.327 +
   5.328 +      //hash on the receiver, 'cause always know it, but sometimes want to
   5.329 +      // receive from anonymous sender
   5.330 +
   5.331 +   reqData = malloc( sizeof(SSRSemReq) );
   5.332 +   reqData->receivePr = receivePr;
   5.333 +   reqData->sendPr    = sendPr;
   5.334 +   reqData->reqType   = send_from_to;
   5.335 +   reqData->msg       = msg;
   5.336 +   reqData->nextReqInHashEntry = NULL;
   5.337 +
   5.338 +      //On ownership -- remove inside the send and let ownership sit in limbo
   5.339 +      // as a potential in an entry in the hash table, when this receive msg
   5.340 +      // gets paired to a send, the ownership gets added to the receivePr --
   5.341 +      // the next work-unit in the receivePr's trace will have ownership.
   5.342 +   VMS__add_sem_request( reqData, sendPr );
   5.343 +   VMS__suspend_procr( sendPr ); //will suspend then resume and continue
   5.344 +
   5.345 +      //When come back from suspend, no longer own data reachable from msg
   5.346 +      //TODO: release ownership here
   5.347 + }
   5.348 +
   5.349 +
   5.350 +//===========================================================================
   5.351 +
   5.352 +void *
   5.353 +SSR__receive_any_to( VirtProcr *receivePr )
   5.354 + {
   5.355 +
   5.356 + }
   5.357 +
   5.358 +void *
   5.359 +SSR__receive_type_to( const int type, VirtProcr *receivePr )
   5.360 + { void *msg;
   5.361 +   SSRSemReq *reqData;
   5.362 +
   5.363 +   reqData = malloc( sizeof(SSRSemReq) );
   5.364 +   reqData->receivePr = receivePr;
   5.365 +   reqData->reqType   = receive_type;
   5.366 +   reqData->msgType   = type;
   5.367 +   reqData->nextReqInHashEntry = NULL;
   5.368 +
   5.369 +   VMS__add_sem_request( reqData, receivePr );
   5.370 +   VMS__suspend_procr( receivePr );
   5.371 +   msg = receivePr->semanticData;
   5.372 +   return msg;
   5.373 + }
   5.374 +
   5.375 +
   5.376 +
   5.377 +/*Call this at point receiving virt pr wants in-coming data.
   5.378 + * 
   5.379 + *The reason receivePr must call this is that it modifies the receivPr
   5.380 + * loc structure directly -- and the VMS rules state a virtual processor
   5.381 + * loc structure can only be modified by itself.
   5.382 + */
   5.383 +void *
   5.384 +SSR__receive_from_to( VirtProcr *sendPr, VirtProcr *receivePr )
   5.385 + { SSRSemReq *reqData;
   5.386 +
   5.387 +      //hash on the receiver, 'cause always know it, but sometimes want to
   5.388 +      // receive from anonymous sender
   5.389 +
   5.390 +   reqData = malloc( sizeof(SSRSemReq) );
   5.391 +   reqData->receivePr = receivePr;
   5.392 +   reqData->sendPr    = sendPr;
   5.393 +   reqData->reqType   = receive_from_to;
   5.394 +   reqData->nextReqInHashEntry = NULL;
   5.395 +
   5.396 +      //On ownership -- remove inside the send after receive successful.
   5.397 +      // Below, add ownership when come back from suspend
   5.398 +      //Reason: Thinking of impl ownership mech such that it automatically
   5.399 +      // frees any data has no owners -- so have to add receiver before
   5.400 +      // remove sender
   5.401 +   VMS__add_sem_request( reqData, receivePr );
   5.402 +      //TODO: add ownership of locs reachable from msg inside reqst handler
   5.403 +   VMS__suspend_procr( receivePr ); //will suspend then resume and continue
   5.404 +
   5.405 +      //When come back from suspend, the msg data is in receivePr->semData
   5.406 +   return receivePr->semanticData;
   5.407 + }
   5.408 +
   5.409 +
   5.410 +//===========================================================================
   5.411 +
   5.412 +/*Just thin wrapper for now -- semantic request is still a simple thing
   5.413 + * (July 3, 2010)
   5.414 + */
   5.415 +inline void
   5.416 +SSR__free_semantic_request( SSRSemReq *semReq )
   5.417 + {
   5.418 +   free( semReq );
   5.419 + }
   5.420 +
     6.1 --- a/VMSHW.h	Mon Aug 09 02:29:31 2010 -0700
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,125 +0,0 @@
     6.4 -/*
     6.5 - *  Copyright 2009 OpenSourceStewardshipFoundation.org
     6.6 - *  Licensed under GNU General Public License version 2
     6.7 - *
     6.8 - * Author: seanhalle@yahoo.com
     6.9 - *
    6.10 - */
    6.11 -
    6.12 -#ifndef _VMSHW_H
    6.13 -#define	_VMSHW_H
    6.14 -
    6.15 -#include "VMS/Queue_impl/PrivateQueue.h"
    6.16 -#include "VMS/Hash_impl/PrivateHash.h"
    6.17 -#include "VMS/VMS.h"
    6.18 -
    6.19 -/*This header defines everything specific to the VMSHW semantic plug-in
    6.20 - */
    6.21 -typedef struct _VMSHWSemReq   VMSHWSemReq;
    6.22 -
    6.23 -
    6.24 -/*Semantic layer-specific data sent inside a request from lib called in app
    6.25 - * to request handler called in MasterLoop
    6.26 - */
    6.27 -enum VMSHWReqType
    6.28 - {
    6.29 -   send_type = 1,
    6.30 -   send_from_to,
    6.31 -   receive_any,    //order and grouping matter -- send before receive
    6.32 -   receive_type,   // and receive_any first of the receives -- Handlers
    6.33 -   receive_from_to,// rely upon this ordering of enum
    6.34 -   transfer_to,
    6.35 -   transfer_out
    6.36 - };
    6.37 -
    6.38 -struct _VMSHWSemReq
    6.39 - { enum VMSHWReqType    reqType;
    6.40 -   VirtProcr           *sendPr;
    6.41 -   VirtProcr           *receivePr;
    6.42 -   int32                msgType;
    6.43 -   void                *msg;
    6.44 -   VMSHWSemReq         *nextReqInHashEntry;
    6.45 - }
    6.46 -/* VMSHWSemReq */;
    6.47 -
    6.48 -typedef struct
    6.49 - {
    6.50 -   PrivQueueStruc *readyVirtProcrQ;
    6.51 -   HashTable      *commHashTbl;
    6.52 -   int             numVirtPr;
    6.53 - }
    6.54 -VMSHWSemEnv;
    6.55 -
    6.56 -
    6.57 -//===========================================================================
    6.58 -
    6.59 -void
    6.60 -VMSHW__create_seed_procr_and_do_work( VirtProcrFnPtr fn, void *initData );
    6.61 -
    6.62 -//=======================
    6.63 -
    6.64 -void
    6.65 -VMSHW__init();
    6.66 -
    6.67 -void
    6.68 -VMSHW__cleanup_after_shutdown();
    6.69 -
    6.70 -//=======================
    6.71 -
    6.72 -inline VirtProcr *
    6.73 -VMSHW__create_procr_with( VirtProcrFnPtr fnPtr, void *initData,
    6.74 -                          VirtProcr *creatingPr );
    6.75 -
    6.76 -void
    6.77 -VMSHW__dissipate_procr( VirtProcr *procrToDissipate );
    6.78 -
    6.79 -//=======================
    6.80 -void *
    6.81 -VMSHW__malloc_size_to( int numBytes, VirtProcr *ownerPr );
    6.82 -
    6.83 -void
    6.84 -VMSHW__transfer_ownership_of_from_to( void *data, VirtProcr *oldOwnerPr,
    6.85 -                                                    VirtProcr *newOwnerPr );
    6.86 -                                                    
    6.87 -void
    6.88 -VMSHW__add_ownership_by_to( VirtProcr *newOwnerPr, void *data );
    6.89 -
    6.90 -void
    6.91 -VMSHW__remove_ownership_by_from( VirtProcr *loserPr, void *dataLosing );
    6.92 -
    6.93 -void
    6.94 -VMSHW__transfer_ownership_to_outside( void *dataToTransferOwnershipOf );
    6.95 -
    6.96 -
    6.97 -
    6.98 -//=======================
    6.99 -void
   6.100 -VMSHW__send_of_type_to( VirtProcr *sendPr, void *msg, const int type,
   6.101 -                        VirtProcr *receivePr);
   6.102 -
   6.103 -void
   6.104 -VMSHW__send_from_to( void *msg, VirtProcr *sendPr, VirtProcr *receivePr);
   6.105 -
   6.106 -void *
   6.107 -VMSHW__receive_type_to( const int type, VirtProcr *receivePr );
   6.108 -
   6.109 -void *
   6.110 -VMSHW__receive_from_to( VirtProcr *sendPr, VirtProcr *receivePr );
   6.111 -
   6.112 -
   6.113 -//=======================
   6.114 -
   6.115 -void
   6.116 -VMSHW__free_semantic_request( VMSHWSemReq *semReq );
   6.117 -
   6.118 -
   6.119 -//=========================  Internal use only  =============================
   6.120 -void
   6.121 -VMSHW__Request_Handler( VirtProcr *requestingPr, void *_semEnv );
   6.122 -
   6.123 -VirtProcr *
   6.124 -VMSHW__schedule_virt_procr( void *_semEnv );
   6.125 -
   6.126 -
   6.127 -#endif	/* _VMSHW_H */
   6.128 -
     7.1 --- a/VMSHW_PluginFns.c	Mon Aug 09 02:29:31 2010 -0700
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,114 +0,0 @@
     7.4 -/*
     7.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
     7.6 - *
     7.7 - * Licensed under BSD
     7.8 - */
     7.9 -
    7.10 -#include <stdio.h>
    7.11 -#include <stdlib.h>
    7.12 -#include <malloc.h>
    7.13 -
    7.14 -#include "VMS/Queue_impl/PrivateQueue.h"
    7.15 -#include "VMSHW.h"
    7.16 -#include "VMSHW_Request_Handlers.h"
    7.17 -
    7.18 -
    7.19 -/*Will get requests to send, to receive, and to create new processors.
    7.20 - * Upon send, check the hash to see if a receive is waiting.
    7.21 - * Upon receive, check hash to see if a send has already happened.
    7.22 - * When other is not there, put in.  When other is there, the comm.
    7.23 - *  completes, which means the receiver P gets scheduled and
    7.24 - *  picks up right after the receive request.  So make the work-unit
    7.25 - *  and put it into the queue of work-units ready to go.
    7.26 - * Other request is create a new Processor, with the function to run in the
    7.27 - *  Processor, and initial data.
    7.28 - */
    7.29 -void
    7.30 -VMSHW__Request_Handler( VirtProcr *requestingPr, void *_semEnv )
    7.31 - { VMSHWSemEnv *semEnv;
    7.32 -   VMSReqst    *req;
    7.33 -   VMSHWSemReq *semReq;
    7.34 - 
    7.35 -   semEnv = (VMSHWSemEnv *)_semEnv;
    7.36 -
    7.37 -   req = VMS__take_top_request_from( requestingPr );
    7.38 -   
    7.39 -   while( req != NULL )
    7.40 -    {
    7.41 -      if( VMS__isSemanticReqst( req ) )
    7.42 -       {
    7.43 -         semReq = VMS__take_sem_reqst_from( req );
    7.44 -         if( semReq == NULL ) goto DoneHandlingReqst;
    7.45 -         switch( semReq->reqType )
    7.46 -          {
    7.47 -            case send_type:       handleSendType(     semReq, semEnv);
    7.48 -               break;
    7.49 -            case send_from_to:    handleSendFromTo(   semReq, semEnv);
    7.50 -               break;
    7.51 -            case receive_type:    handleReceiveType(  semReq, semEnv);
    7.52 -               break;
    7.53 -            case receive_from_to: handleReceiveFromTo(semReq, semEnv);
    7.54 -               break;
    7.55 -            case transfer_to:     handleTransferTo(   semReq, semEnv);
    7.56 -               VMSHW__free_semantic_request( semReq );
    7.57 -               break;
    7.58 -            case transfer_out:    handleTransferOut(  semReq, semEnv);
    7.59 -               VMSHW__free_semantic_request( semReq );
    7.60 -               break;
    7.61 -          }
    7.62 -         //NOTE: freeing semantic request data strucs handled inside these
    7.63 -       }
    7.64 -      else if( VMS__isCreateReqst( req ) ) //only plugin can add to ready Q
    7.65 -       { VirtProcr *
    7.66 -         newPr = (VirtProcr *)req->semReqData;
    7.67 -         semEnv->numVirtPr += 1;
    7.68 -
    7.69 -            //resume procr asked for registration & start new pr
    7.70 -         writePrivQ( requestingPr, semEnv->readyVirtProcrQ );
    7.71 -         writePrivQ( newPr, semEnv->readyVirtProcrQ );
    7.72 -       }
    7.73 -      else if( VMS__isDissipateReqst( req ) )
    7.74 -       {
    7.75 -            //free any semantic data allocated to the virt procr
    7.76 -
    7.77 -            //Now, call VMS to free_all AppVP state -- stack and so on
    7.78 -         VMS__handle_dissipate_reqst( requestingPr );
    7.79 -
    7.80 -         semEnv->numVirtPr -= 1;
    7.81 -         if( semEnv->numVirtPr == 0 )
    7.82 -          {    //no more work, so shutdown
    7.83 -            VMS__handle_shutdown_reqst( requestingPr );
    7.84 -          }
    7.85 -       }
    7.86 -
    7.87 -      DoneHandlingReqst:
    7.88 -         //Free VMS portion of request, no matter what -- sem request data
    7.89 -         // struc instances may still be around..  VMS__free_request doesn't
    7.90 -         // affect the semantic request that was carried by it
    7.91 -      VMS__free_request( req );      
    7.92 -      req = VMS__take_top_request_from( requestingPr );
    7.93 -    } //while( req != NULL )
    7.94 - }
    7.95 -
    7.96 -//===========================================================================
    7.97 -
    7.98 -
    7.99 -/*For VMSHW, scheduling a slave simply takes the next work-unit off the
   7.100 - * ready-to-go work-unit queue and assigns it to the slaveToSched.
   7.101 - *If the ready-to-go work-unit queue is empty, then nothing to schedule
   7.102 - * to the slave -- return FALSE to let Master loop know scheduling that
   7.103 - * slave failed.
   7.104 - */
   7.105 -VirtProcr *
   7.106 -VMSHW__schedule_virt_procr( void *_semEnv )
   7.107 - { VirtProcr   *schedPr;
   7.108 -   VMSHWSemEnv *semEnv;
   7.109 -
   7.110 -   semEnv = (VMSHWSemEnv *)_semEnv;
   7.111 -
   7.112 -   schedPr = readPrivQ( semEnv->readyVirtProcrQ );
   7.113 -      //Note, using a non-blocking queue -- it returns NULL if queue empty
   7.114 -
   7.115 -   return( schedPr );
   7.116 - }
   7.117 -
     8.1 --- a/VMSHW_Request_Handlers.c	Mon Aug 09 02:29:31 2010 -0700
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,328 +0,0 @@
     8.4 -/*
     8.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
     8.6 - *
     8.7 - * Licensed under BSD
     8.8 - */
     8.9 -
    8.10 -#include <stdio.h>
    8.11 -#include <stdlib.h>
    8.12 -#include <malloc.h>
    8.13 -
    8.14 -#include "VMS/VMS.h"
    8.15 -#include "VMS/Queue_impl/PrivateQueue.h"
    8.16 -#include "VMS/Hash_impl/PrivateHash.h"
    8.17 -#include "VMSHW.h"
    8.18 -
    8.19 -
    8.20 -
    8.21 -//===========================================================================
    8.22 -//                           Helpers
    8.23 -
    8.24 -HashEntry *
    8.25 -giveEntryElseInsertReqst( char *key, VMSHWSemReq *semReq,
    8.26 -    HashTable   *commHashTbl )
    8.27 - { HashEntry    *entry;
    8.28 -   VMSHWSemReq  *waitingReq;
    8.29 -
    8.30 -   entry = getEntryFromTable( (char *)key, commHashTbl );
    8.31 -   if( entry == NULL )
    8.32 -    {    //no waiting sends or receives, so add this request and exit
    8.33 -      addValueIntoTable( key, semReq, commHashTbl );
    8.34 -      return NULL;
    8.35 -    }
    8.36 -   waitingReq = (VMSHWSemReq *)entry->content;
    8.37 -   if( waitingReq == NULL )  //might happen when last waiting gets paired
    8.38 -    {    //no waiting sends or receives, so add this request and exit
    8.39 -      entry->content = semReq;
    8.40 -      return NULL;
    8.41 -    }
    8.42 -   return entry;
    8.43 - }
    8.44 -
    8.45 -
    8.46 -
    8.47 -
    8.48 -//===========================================================================
    8.49 -/*The semantic request has the receiving processor and the message type
    8.50 - *
    8.51 - *Note one value in this approach: without the extra VMS layer,
    8.52 - * the send and receive would happen in real time instead of virtual time,
    8.53 - * which would waste real time while one of them waited for other
    8.54 - *
    8.55 - *When successfully pair-up, transfer ownership of the sent data
    8.56 - * to the receiving processor
    8.57 - *
    8.58 - *Messages of a given Type have to be kept separate..  so need a separate
    8.59 - * entry in the hash table for each pair: receivePr, msgType
    8.60 - *
    8.61 - *Also, if same sender sends multiple before any get received, then need to
    8.62 - * stack the sends up -- even if a send waits until it's paired, several
    8.63 - * separate processors can send to the same receiver, and hashing on the
    8.64 - * receive processor, so they will stack up.
    8.65 - */
    8.66 -void
    8.67 -handleSendType( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv )
    8.68 - { VirtProcr   *sendPr, *receivePr;
    8.69 -   int          key[] = {0,0,0};
    8.70 -   VMSHWSemReq *waitingReq;
    8.71 -   HashEntry   *entry;
    8.72 -   HashTable   *commHashTbl = semEnv->commHashTbl;
    8.73 - 
    8.74 -   receivePr = semReq->receivePr; //For "send", know both send & recv procrs
    8.75 -   sendPr    = semReq->sendPr;
    8.76 -
    8.77 -         //TODO: handle transfer of msg-locs ownership
    8.78 -         //TODO: hash table implemented such that using "addEntry" or
    8.79 -         //  "addValue" to table causes the *value* in old entry to be
    8.80 -         //  *freed* -- this is bad.  Want to stack up values in a linked
    8.81 -         //  list when multiple have the same key.
    8.82 -
    8.83 -      //TODO: use a faster hash function -- see notes in intelligence gather
    8.84 -   key[0] = (int)receivePr;
    8.85 -   key[1] = (int)(semReq->msgType);
    8.86 - //key[2] acts as the 0 that terminates the string
    8.87 -
    8.88 -   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
    8.89 -   if( entry == NULL ) return;  //was just inserted
    8.90 -
    8.91 -   waitingReq = (VMSHWSemReq *)entry->content;
    8.92 -
    8.93 -      //At this point, know have waiting request(s) -- either sends or recv
    8.94 -      //Note, can only have max of one receive waiting, and cannot have both
    8.95 -      // sends and receives waiting (they would have paired off)
    8.96 -      // but can have multiple send_type requests waiting (from diff senders)
    8.97 -   if( waitingReq->reqType == send_type )
    8.98 -    {    //waiting request is another send, so stack this up on list
    8.99 -      semReq->    nextReqInHashEntry = waitingReq->nextReqInHashEntry;
   8.100 -      waitingReq->nextReqInHashEntry = semReq;
   8.101 -         //printf("linked requests: %d, %d  |  ", semReq, waitingReq );
   8.102 -         //printf("type: %d, %d\n", semReq->reqType, waitingReq->reqType );
   8.103 -      return;
   8.104 -    }
   8.105 -   else
   8.106 -    {    //waiting request is a receive, so pair it to this send
   8.107 -         //first, remove the waiting receive request from the list in entry
   8.108 -      entry->content = waitingReq->nextReqInHashEntry;
   8.109 -      if( entry->content == NULL )
   8.110 -       {    //TODO: mod hash table to double-link, so can delete entry from
   8.111 -            // table without hashing the key and looking it up again
   8.112 -         deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   8.113 -       }
   8.114 -      
   8.115 -         //attach msg that's in this send request to receiving procr
   8.116 -         // when comes back from suspend, will have msg in semanticData
   8.117 -      receivePr->semanticData = semReq->msg;
   8.118 -
   8.119 -         //bring both processors back from suspend
   8.120 -      writePrivQ( sendPr,    semEnv->readyVirtProcrQ );
   8.121 -      writePrivQ( receivePr, semEnv->readyVirtProcrQ );
   8.122 -
   8.123 -         //don't need semReq anymore -- free it
   8.124 -      VMSHW__free_semantic_request( waitingReq );
   8.125 -      VMSHW__free_semantic_request( semReq );
   8.126 -      return;
   8.127 -    }
   8.128 - }
   8.129 -
   8.130 -
   8.131 -/*Looks like can make single handler for both sends..
   8.132 - */
   8.133 -//TODO: combine both send handlers into single handler
   8.134 -void
   8.135 -handleSendFromTo( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv)
   8.136 - { VirtProcr   *sendPr, *receivePr;
   8.137 -   int          key[] = {0,0,0};
   8.138 -   VMSHWSemReq *waitingReq;
   8.139 -   HashEntry   *entry;
   8.140 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   8.141 -
   8.142 -   receivePr = semReq->receivePr; //For "send", know both send & recv procrs
   8.143 -   sendPr    = semReq->sendPr;    
   8.144 -
   8.145 -   key[0] = (int)receivePr;
   8.146 -   key[1] = (int)sendPr;
   8.147 - //key[2] acts at the 0 that terminates the string
   8.148 -
   8.149 -   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
   8.150 -   if( entry == NULL ) return;  //was just inserted
   8.151 -
   8.152 -   waitingReq = (VMSHWSemReq *)entry->content;
   8.153 -
   8.154 -      //At this point, know have waiting request(s) -- either sends or recv
   8.155 -   if( waitingReq->reqType == send_from_to )
   8.156 -    { printf("\n ERROR: shouldn't be two send from-tos waiting \n");
   8.157 -    }
   8.158 -   else
   8.159 -    {    //waiting request is a receive, so it completes pair with this send
   8.160 -
   8.161 -         //remove the waiting receive request from the entry
   8.162 -      entry->content = waitingReq->nextReqInHashEntry;
   8.163 -         //can only be one waiting req for "from-to" semantics
   8.164 -      if( entry->content != NULL )
   8.165 -       {
   8.166 -         printf("\nERROR in handleSendFromTo\n");
   8.167 -         printf("waitReq: %d | next req: %d\n", waitingReq, entry->content);
   8.168 -       }
   8.169 -      deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   8.170 -
   8.171 -         //attach msg that's in this send request to receiving procr
   8.172 -         // when comes back from suspend, will have msg in semanticData
   8.173 -      receivePr->semanticData = semReq->msg;
   8.174 -
   8.175 -         //bring both processors back from suspend
   8.176 -      writePrivQ( sendPr,    semEnv->readyVirtProcrQ );
   8.177 -      writePrivQ( receivePr, semEnv->readyVirtProcrQ );
   8.178 -      
   8.179 -         //done with requests, so free them
   8.180 -      VMSHW__free_semantic_request( waitingReq );
   8.181 -      VMSHW__free_semantic_request( semReq );
   8.182 -      return;
   8.183 -    }
   8.184 - }
   8.185 -
   8.186 -
   8.187 -
   8.188 -//=======================================================
   8.189 -
   8.190 -/*Removed this one for now, because forces either a search or going to a
   8.191 - * two-level hash table, where one level the key is the receivePr, in the
   8.192 - * other level, the key is the type.
   8.193 - *So, each dest procr that either does a receive_type or that a send_type
   8.194 - * targets it, would have a hash table created just for it and placed
   8.195 - * into the first-level hash table entry for that receive procr.
   8.196 - *Then, doing a receive_type first looks up entry for receive procr in first
   8.197 - * table, gets the type-table out of that entry, and does a second lookup
   8.198 - * in the type-table.
   8.199 - *Doing a receive from-to looks up in the first table, gets the second table
   8.200 - * hashed on "from" procr.
   8.201 - *Doing a receive_any looks up in the first table, then looks to see if
   8.202 - * either of the hash tables have any entries -- would then have to do a
   8.203 - * linear search through the hash-table's array for the first non-empty
   8.204 - * spot
   8.205 - *Yuck.
   8.206 - *
   8.207 - *Alternatively, could keep two hash tables updated all the time -- one that
   8.208 - * does the receive_type and receive_from_to and a second that does
   8.209 - * receive_any -- would only hash the second table by the receive procr.
   8.210 - * When remove from one table, keep back-links to both tables, so can also
   8.211 - * quickly remove from other table.
   8.212 - *Cost is doing two hash-table lookups for every insert.
   8.213 - * If ever add receive_any, looking like this second option easier and even
   8.214 - * less costly.
   8.215 - */
   8.216 -void
   8.217 -handleReceiveAny( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv)
   8.218 - {
   8.219 - 
   8.220 - }
   8.221 -
   8.222 -
   8.223 -void
   8.224 -handleReceiveType( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv)
   8.225 - { VirtProcr   *sendPr, *receivePr;
   8.226 -   int          key[] = {0,0,0};
   8.227 -   VMSHWSemReq *waitingReq;
   8.228 -   HashEntry   *entry;
   8.229 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   8.230 -
   8.231 -   receivePr = semReq->receivePr;
   8.232 -
   8.233 -   key[0] = (int)receivePr;
   8.234 -   key[1] = (int)(semReq->msgType);
   8.235 - //key[2] acts at the 0 that terminates the string
   8.236 -
   8.237 -
   8.238 -   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
   8.239 -   if( entry == NULL ) return;  //was just inserted
   8.240 -
   8.241 -   waitingReq = (VMSHWSemReq *)entry->content;
   8.242 -
   8.243 -      //At this point, know have waiting request(s) -- should be send(s)
   8.244 -   if( waitingReq->reqType == send_type )
   8.245 -    {    //waiting request is a send, so pair it with this receive
   8.246 -         //first, remove the waiting send request from the list in entry
   8.247 -      entry->content = waitingReq->nextReqInHashEntry;
   8.248 -      if( entry->content == NULL )
   8.249 -       { deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   8.250 -       }
   8.251 -      
   8.252 -         //attach msg that's in the send request to receiving procr
   8.253 -         // when comes back from suspend, will have msg in semanticData
   8.254 -      receivePr->semanticData = waitingReq->msg;
   8.255 -
   8.256 -         //bring both processors back from suspend
   8.257 -      writePrivQ( waitingReq->sendPr, semEnv->readyVirtProcrQ );
   8.258 -      writePrivQ( receivePr,          semEnv->readyVirtProcrQ );
   8.259 -
   8.260 -         //done with requests, so free them
   8.261 -      VMSHW__free_semantic_request( waitingReq );
   8.262 -      VMSHW__free_semantic_request( semReq );
   8.263 -      return;
   8.264 -    }
   8.265 -   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   8.266 - }
   8.267 -
   8.268 -
   8.269 -/*
   8.270 - */
   8.271 -void
   8.272 -handleReceiveFromTo( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv)
   8.273 - { VirtProcr   *sendPr, *receivePr;
   8.274 -   int          key[] = {0,0,0};
   8.275 -   VMSHWSemReq *waitingReq;
   8.276 -   HashEntry   *entry;
   8.277 -   HashTable   *commHashTbl = semEnv->commHashTbl;
   8.278 -
   8.279 -   receivePr = semReq->receivePr;
   8.280 -   sendPr    = semReq->sendPr;    //for receive from-to, know send procr
   8.281 -
   8.282 -   key[0] = (int)receivePr;
   8.283 -   key[1] = (int)sendPr;
   8.284 - //key[2] acts at the 0 that terminates the string
   8.285 -
   8.286 -   entry = giveEntryElseInsertReqst( key, semReq, commHashTbl);
   8.287 -   if( entry == NULL ) return;  //was just inserted
   8.288 -
   8.289 -   waitingReq = (VMSHWSemReq *)entry->content;
   8.290 -
   8.291 -      //At this point, know have waiting request(s) -- should be send(s)
   8.292 -   if( waitingReq->reqType == send_from_to )
   8.293 -    {    //waiting request is a send, so pair it with this receive
   8.294 -
   8.295 -         //For from-to, should only ever be a single reqst waiting tobe paird
   8.296 -      entry->content = waitingReq->nextReqInHashEntry;
   8.297 -      if( entry->content != NULL ) printf("\nERROR in handleRecvFromTo\n");
   8.298 -      deleteEntryFromTable( entry->key, commHashTbl );  //frees entry too
   8.299 -
   8.300 -         //attach msg that's in the send request to receiving procr
   8.301 -         // when comes back from suspend, will have msg in semanticData
   8.302 -      receivePr->semanticData = waitingReq->msg;
   8.303 -
   8.304 -         //bring both processors back from suspend
   8.305 -      writePrivQ( waitingReq->sendPr, semEnv->readyVirtProcrQ );
   8.306 -      writePrivQ( receivePr,          semEnv->readyVirtProcrQ );
   8.307 -
   8.308 -         //done with requests, so free them
   8.309 -      VMSHW__free_semantic_request( waitingReq );
   8.310 -      VMSHW__free_semantic_request( semReq );
   8.311 -      return;
   8.312 -    }
   8.313 -   printf("\nLang Impl Error: Should never be two waiting receives!\n");
   8.314 - }
   8.315 -
   8.316 -
   8.317 -
   8.318 -//===============================================
   8.319 -void
   8.320 -handleTransferTo( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv)
   8.321 - {
   8.322 -
   8.323 - }
   8.324 -
   8.325 -void
   8.326 -handleTransferOut( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv)
   8.327 - {
   8.328 -
   8.329 - }
   8.330 -
   8.331 -
     9.1 --- a/VMSHW_Request_Handlers.h	Mon Aug 09 02:29:31 2010 -0700
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,33 +0,0 @@
     9.4 -/*
     9.5 - *  Copyright 2009 OpenSourceStewardshipFoundation.org
     9.6 - *  Licensed under GNU General Public License version 2
     9.7 - *
     9.8 - * Author: seanhalle@yahoo.com
     9.9 - *
    9.10 - */
    9.11 -
    9.12 -#ifndef _VMSHW_REQ_H
    9.13 -#define	_VMSHW_REQ_H
    9.14 -
    9.15 -#include "VMSHW.h"
    9.16 -
    9.17 -/*This header defines everything specific to the VMSHW semantic plug-in
    9.18 - */
    9.19 -
    9.20 -void
    9.21 -handleSendType( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.22 -void
    9.23 -handleSendFromTo( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.24 -void
    9.25 -handleReceiveAny( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.26 -void
    9.27 -handleReceiveType( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.28 -void
    9.29 -handleReceiveFromTo( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.30 -void
    9.31 -handleTransferTo( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.32 -void
    9.33 -handleTransferOut( VMSHWSemReq *semReq, VMSHWSemEnv *semEnv);
    9.34 -
    9.35 -#endif	/* _VMSHW_REQ_H */
    9.36 -
    10.1 --- a/VMSHW_lib.c	Mon Aug 09 02:29:31 2010 -0700
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,391 +0,0 @@
    10.4 -/*
    10.5 - * Copyright 2010  OpenSourceCodeStewardshipFoundation
    10.6 - *
    10.7 - * Licensed under BSD
    10.8 - */
    10.9 -
   10.10 -#include <stdio.h>
   10.11 -#include <stdlib.h>
   10.12 -#include <malloc.h>
   10.13 -
   10.14 -#include "VMS/VMS.h"
   10.15 -#include "VMSHW.h"
   10.16 -#include "VMS/Queue_impl/PrivateQueue.h"
   10.17 -#include "VMS/Hash_impl/PrivateHash.h"
   10.18 -
   10.19 -
   10.20 -//==========================================================================
   10.21 -
   10.22 -void
   10.23 -VMSHW__init();
   10.24 -
   10.25 -void
   10.26 -VMSHW__init_Seq();
   10.27 -
   10.28 -void
   10.29 -VMSHW__init_Helper();
   10.30 -//==========================================================================
   10.31 -
   10.32 -
   10.33 -/*TODO: Q: dealing with library f()s and DKU vs WT vs FoR
   10.34 - * (still want to do FoR, with time-lines as syntax, could be super cool)
   10.35 - * A: thinking pin the coreLoops for all of BLIS -- let Master arbitrate
   10.36 - * among library, DKU, WT, FoR -- all the patterns in terms of virtual
   10.37 - * processors (or equivalently work-units), so Master picks which virt procr
   10.38 - * from which portions of app (DKU, WT, FoR) onto which sched slots
   10.39 - *Might even do hierarchy of masters -- group of sched slots for each core
   10.40 - * has its own master, that keeps generated work local
   10.41 - * single-reader-single-writer sync everywhere -- no atomic primitives
   10.42 - * Might have the different schedulers talk to each other, to negotiate
   10.43 - * larger-grain sharing of resources, according to predicted critical
   10.44 - * path, and expansion of work
   10.45 - */
   10.46 -
   10.47 -
   10.48 -
   10.49 -//===========================================================================
   10.50 -
   10.51 -
   10.52 -/*These are the library functions *called in the application*
   10.53 - * 
   10.54 - *There's a pattern for the outside sequential code to interact with the
   10.55 - * VMS_HW code.
   10.56 - *The VMS_HW system is inside a boundary..  every VMSHW system is in its
   10.57 - * own directory that contains the functions for each of the processor types.
   10.58 - * One of the processor types is the "seed" processor that starts the
   10.59 - * cascade of creating all the processors that do the work.
   10.60 - *So, in the directory is a file called "EntryPoint.c" that contains the
   10.61 - * function, named appropriately to the work performed, that the outside
   10.62 - * sequential code calls.  This function follows a pattern:
   10.63 - *1) it calls VMSHW__init()
   10.64 - *2) it creates the initial data for the seed processor, which is passed
   10.65 - *    in to the function
   10.66 - *3) it creates the seed VMSHW processor, with the data to start it with.
   10.67 - *4) it calls startVMSHWThenWaitUntilWorkDone
   10.68 - *5) it gets the returnValue from the transfer struc and returns that
   10.69 - *    from the function
   10.70 - *
   10.71 - *For now, a new VMSHW system has to be created via VMSHW__init every
   10.72 - * time an entry point function is called -- later, might add letting the
   10.73 - * VMSHW system be created once, and let all the entry points just reuse
   10.74 - * it -- want to be as simple as possible now, and see by using what makes
   10.75 - * sense for later..
   10.76 - */
   10.77 -
   10.78 -
   10.79 -
   10.80 -//===========================================================================
   10.81 -
   10.82 -/*This is the "border crossing" function -- the thing that crosses from the
   10.83 - * outside world, into the VMS_HW world.  It initializes and starts up the
   10.84 - * VMS system, then creates one processor from the specified function and
   10.85 - * puts it into the readyQ.  From that point, that one function is resp.
   10.86 - * for creating all the other processors, that then create others, and so
   10.87 - * forth.
   10.88 - *When all the processors, including the seed, have dissipated, then this
   10.89 - * function returns.  The results will have been written by side-effect via
   10.90 - * pointers read from, or written into initData.
   10.91 - *
   10.92 - *NOTE: no Threads should exist in the outside program that might touch
   10.93 - * any of the data reachable from initData passed in to here
   10.94 - */
   10.95 -void
   10.96 -VMSHW__create_seed_procr_and_do_work( VirtProcrFnPtr fnPtr, void *initData )
   10.97 - { VMSHWSemEnv *semEnv;
   10.98 -   VirtProcr *seedProcr;
   10.99 -
  10.100 -   VMSHW__init(); //uncomment to do multi-thd
  10.101 -//   VMSHW__init_Seq();  //uncomment to debug
  10.102 -
  10.103 -   semEnv = _VMSMasterEnv->semanticEnv;
  10.104 -
  10.105 -      //VMSHW starts with one processor, which is put into initial environ,
  10.106 -      // and which then calls create() to create more, thereby expanding work
  10.107 -   seedProcr = VMS__create_procr( fnPtr, initData );
  10.108 -
  10.109 -   writePrivQ( seedProcr, semEnv->readyVirtProcrQ );
  10.110 -   semEnv->numVirtPr = 1;
  10.111 -
  10.112 -   VMS__start_the_work_then_wait_until_done();   //uncomment to do multi-thd
  10.113 -//   VMS__start_the_work_then_wait_until_done_Seq();  //uncomment to debug
  10.114 -
  10.115 -   VMSHW__cleanup_after_shutdown();
  10.116 - }
  10.117 -
  10.118 -
  10.119 -//===========================================================================
  10.120 -
  10.121 -/*Initializes all the data-structures for a VMSHW system -- but doesn't
  10.122 - * start it running yet!
  10.123 - *
  10.124 - * 
  10.125 - *This sets up the semantic layer over the VMS system
  10.126 - *
  10.127 - *First, calls VMS_Setup, then creates own environment, making it ready
  10.128 - * for creating the seed processor and then starting the work.
  10.129 - */
  10.130 -void
  10.131 -VMSHW__init()
  10.132 - {
  10.133 -   VMS__init();
  10.134 -      //masterEnv, a global var, now is partially set up by init_VMS
  10.135 -
  10.136 -   VMSHW__init_Helper();
  10.137 - }
  10.138 -
  10.139 -void
  10.140 -VMSHW__init_Seq()
  10.141 - {
  10.142 -   VMS__init_Seq();
  10.143 -      //masterEnv, a global var, now is partially set up by init_VMS
  10.144 -
  10.145 -   VMSHW__init_Helper();
  10.146 - }
  10.147 -
  10.148 -void
  10.149 -VMSHW__init_Helper()
  10.150 - { VMSHWSemEnv *semanticEnv;
  10.151 - 
  10.152 -      //Hook up the semantic layer's plug-ins to the Master virt procr
  10.153 -   _VMSMasterEnv->requestHandler = &VMSHW__Request_Handler;
  10.154 -   _VMSMasterEnv->slaveScheduler = &VMSHW__schedule_virt_procr;
  10.155 -
  10.156 -      //create the semantic layer's environment (all its data) and add to
  10.157 -      // the master environment
  10.158 -   semanticEnv = malloc( sizeof( VMSHWSemEnv ) );
  10.159 -   _VMSMasterEnv->semanticEnv = semanticEnv;
  10.160 -
  10.161 -      //create the ready queue, hash tables used for pairing send to receive
  10.162 -      // and so forth
  10.163 -      //TODO: add hash tables for pairing sends with receives, and
  10.164 -      // initialize the data ownership system
  10.165 -   semanticEnv->readyVirtProcrQ = makePrivQ();
  10.166 -   semanticEnv->commHashTbl     = makeHashTable( 1<<16, NULL ); //start big
  10.167 - }
  10.168 -
  10.169 -
  10.170 -/*Frees any memory allocated by VMSHW__init() then calls VMS__shutdown
  10.171 - */
  10.172 -void
  10.173 -VMSHW__cleanup_after_shutdown()
  10.174 - { VMSHWSemEnv *semanticEnv;
  10.175 - 
  10.176 -   semanticEnv = _VMSMasterEnv->semanticEnv;
  10.177 -
  10.178 -//TODO: double check all sem env locations freed
  10.179 -   free( semanticEnv->readyVirtProcrQ->startOfData );
  10.180 -   free( semanticEnv->readyVirtProcrQ );
  10.181 -   freeHashTable( semanticEnv->commHashTbl );
  10.182 -   free( _VMSMasterEnv->semanticEnv );
  10.183 -   VMS__cleanup_after_shutdown();
  10.184 - }
  10.185 -
  10.186 -
  10.187 -//===========================================================================
  10.188 -
  10.189 -/*
  10.190 - */
  10.191 -inline VirtProcr *
  10.192 -VMSHW__create_procr_with( VirtProcrFnPtr fnPtr, void *initData,
  10.193 -                          VirtProcr *creatingPr )
  10.194 - { VirtProcr *newPr;
  10.195 -
  10.196 -   newPr = VMS__create_procr( fnPtr, initData );
  10.197 -
  10.198 -      //After create, have to send request to plugin for any sem env
  10.199 -      // modifications -- such as putting the new procr into the ready Q
  10.200 -      //Need a processor to "animate" the creation -- it's one the register
  10.201 -      // request is attached to, and one suspended in order to send req
  10.202 -      // to plugin
  10.203 -   VMS__send_register_new_procr_request( newPr, creatingPr );
  10.204 -
  10.205 -   return newPr;
  10.206 - }
  10.207 -
  10.208 -
  10.209 -inline void
  10.210 -VMSHW__dissipate_procr( VirtProcr *procrToDissipate )
  10.211 - {
  10.212 -   VMS__dissipate_procr( procrToDissipate );
  10.213 - }
  10.214 -
  10.215 -
  10.216 -//===========================================================================
  10.217 -
  10.218 -void *
  10.219 -VMSHW__malloc_size_to( int numBytes, VirtProcr *ownerPr )
  10.220 - {
  10.221 -//TODO: Put in the ownership system from DKU -- have it working, just adapt
  10.222 -//  it to here
  10.223 -   return malloc( numBytes );
  10.224 - }
  10.225 -
  10.226 -
  10.227 -void
  10.228 -VMSHW__transfer_ownership_of_from_to( void *data, VirtProcr *oldOwnerPr,
  10.229 -                                                  VirtProcr *newOwnerPr )
  10.230 - {
  10.231 -
  10.232 - }
  10.233 -
  10.234 -
  10.235 -void
  10.236 -VMSHW__add_ownership_by_to( VirtProcr *newOwnerPr, void *data )
  10.237 - {
  10.238 -
  10.239 - }
  10.240 -
  10.241 -
  10.242 -void
  10.243 -VMSHW__remove_ownership_by_from( VirtProcr *loserPr, void *dataLosing )
  10.244 - {
  10.245 -
  10.246 - }
  10.247 -
  10.248 -
  10.249 -/*Causes the VMSHW system to remove internal ownership, so data won't be
  10.250 - * freed when VMSHW shuts down, and will persist in the external program.
  10.251 - *
  10.252 - *Must be called from the processor that currently owns the data.
  10.253 - *
  10.254 - *IMPL: Transferring ownership touches two different virtual processor's
  10.255 - * state -- which means it has to be done carefully -- the VMS rules for
  10.256 - * semantic layers say that a work-unit is only allowed to touch the
  10.257 - * virtual processor it is part of, and that only a single work-unit per
  10.258 - * virtual processor be scheduled to a slave at a time.  So, this has to
  10.259 - * modify the virtual processor that owns the work-unit that called this
  10.260 - * function, then create a request to have the other processor modified.
  10.261 - *However, in this case, the TO processor is the outside, and transfers
  10.262 - * are only allowed to be called by the giver-upper, so can mark caller of
  10.263 - * this function as no longer owner, and return -- done.
  10.264 - */
  10.265 -void
  10.266 -VMSHW__transfer_ownership_to_outside( void *data )
  10.267 - {
  10.268 -   //TODO: removeAllOwnersFrom( data );
  10.269 - }
  10.270 -
  10.271 -
  10.272 -//===========================================================================
  10.273 -
  10.274 -void
  10.275 -VMSHW__send_of_type_to( VirtProcr *sendPr, void *msg, const int type,
  10.276 -                        VirtProcr *receivePr)
  10.277 - { VMSHWSemReq *reqData;
  10.278 -
  10.279 -   reqData = malloc( sizeof(VMSHWSemReq) );
  10.280 -   reqData->receivePr = receivePr;
  10.281 -   reqData->sendPr    = sendPr;
  10.282 -   reqData->reqType   = send_type;
  10.283 -   reqData->msgType   = type;
  10.284 -   reqData->msg       = msg;
  10.285 -   reqData->nextReqInHashEntry = NULL;
  10.286 -
  10.287 -      //On ownership -- remove inside the send and let ownership sit in limbo
  10.288 -      // as a potential in an entry in the hash table, when this receive msg
  10.289 -      // gets paired to a send, the ownership gets added to the receivePr --
  10.290 -      // the next work-unit in the receivePr's trace will have ownership.
  10.291 -   VMS__add_sem_request( reqData, sendPr );
  10.292 -   VMS__suspend_procr( sendPr ); //will suspend then resume and continue
  10.293 -
  10.294 -      //When come back from suspend, no longer own data reachable from msg
  10.295 -      //TODO: release ownership here
  10.296 - }
  10.297 -
  10.298 -void
  10.299 -VMSHW__send_from_to( void *msg, VirtProcr *sendPr, VirtProcr *receivePr )
  10.300 - { VMSHWSemReq *reqData;
  10.301 -
  10.302 -      //hash on the receiver, 'cause always know it, but sometimes want to
  10.303 -      // receive from anonymous sender
  10.304 -
  10.305 -   reqData = malloc( sizeof(VMSHWSemReq) );
  10.306 -   reqData->receivePr = receivePr;
  10.307 -   reqData->sendPr    = sendPr;
  10.308 -   reqData->reqType   = send_from_to;
  10.309 -   reqData->msg       = msg;
  10.310 -   reqData->nextReqInHashEntry = NULL;
  10.311 -
  10.312 -      //On ownership -- remove inside the send and let ownership sit in limbo
  10.313 -      // as a potential in an entry in the hash table, when this receive msg
  10.314 -      // gets paired to a send, the ownership gets added to the receivePr --
  10.315 -      // the next work-unit in the receivePr's trace will have ownership.
  10.316 -   VMS__add_sem_request( reqData, sendPr );
  10.317 -   VMS__suspend_procr( sendPr ); //will suspend then resume and continue
  10.318 -
  10.319 -      //When come back from suspend, no longer own data reachable from msg
  10.320 -      //TODO: release ownership here
  10.321 - }
  10.322 -
  10.323 -
  10.324 -//===========================================================================
  10.325 -
  10.326 -void *
  10.327 -VMSHW__receive_any_to( VirtProcr *receivePr )
  10.328 - {
  10.329 -
  10.330 - }
  10.331 -
  10.332 -void *
  10.333 -VMSHW__receive_type_to( const int type, VirtProcr *receivePr )
  10.334 - { void *msg;
  10.335 -   VMSHWSemReq *reqData;
  10.336 -
  10.337 -   reqData = malloc( sizeof(VMSHWSemReq) );
  10.338 -   reqData->receivePr = receivePr;
  10.339 -   reqData->reqType   = receive_type;
  10.340 -   reqData->msgType   = type;
  10.341 -   reqData->nextReqInHashEntry = NULL;
  10.342 -
  10.343 -   VMS__add_sem_request( reqData, receivePr );
  10.344 -   VMS__suspend_procr( receivePr );
  10.345 -   msg = receivePr->semanticData;
  10.346 -   return msg;
  10.347 - }
  10.348 -
  10.349 -
  10.350 -
  10.351 -/*Call this at point receiving virt pr wants in-coming data.
  10.352 - * 
  10.353 - *The reason receivePr must call this is that it modifies the receivPr
  10.354 - * loc structure directly -- and the VMS rules state a virtual processor
  10.355 - * loc structure can only be modified by itself.
  10.356 - */
  10.357 -void *
  10.358 -VMSHW__receive_from_to( VirtProcr *sendPr, VirtProcr *receivePr )
  10.359 - { VMSHWSemReq *reqData;
  10.360 -
  10.361 -      //hash on the receiver, 'cause always know it, but sometimes want to
  10.362 -      // receive from anonymous sender
  10.363 -
  10.364 -   reqData = malloc( sizeof(VMSHWSemReq) );
  10.365 -   reqData->receivePr = receivePr;
  10.366 -   reqData->sendPr    = sendPr;
  10.367 -   reqData->reqType   = receive_from_to;
  10.368 -   reqData->nextReqInHashEntry = NULL;
  10.369 -
  10.370 -      //On ownership -- remove inside the send after receive successful.
  10.371 -      // Below, add ownership when come back from suspend
  10.372 -      //Reason: Thinking of impl ownership mech such that it automatically
  10.373 -      // frees any data has no owners -- so have to add receiver before
  10.374 -      // remove sender
  10.375 -   VMS__add_sem_request( reqData, receivePr );
  10.376 -      //TODO: add ownership of locs reachable from msg inside reqst handler
  10.377 -   VMS__suspend_procr( receivePr ); //will suspend then resume and continue
  10.378 -
  10.379 -      //When come back from suspend, the msg data is in receivePr->semData
  10.380 -   return receivePr->semanticData;
  10.381 - }
  10.382 -
  10.383 -
  10.384 -//===========================================================================
  10.385 -
  10.386 -/*Just thin wrapper for now -- semantic request is still a simple thing
  10.387 - * (July 3, 2010)
  10.388 - */
  10.389 -inline void
  10.390 -VMSHW__free_semantic_request( VMSHWSemReq *semReq )
  10.391 - {
  10.392 -   free( semReq );
  10.393 - }
  10.394 -