# HG changeset patch # User Merten Sach # Date 1307018870 -7200 # Node ID cfd2f7e54129e0b405ca5929ca0328a7537ec8ff # Parent 505d3c674ce884c84bcd01d0210bc0481912cb33 new sequential version diff -r 505d3c674ce8 -r cfd2f7e54129 DESIGN_NOTES__VPThread_lib.txt --- a/DESIGN_NOTES__VPThread_lib.txt Sat Nov 20 08:39:58 2010 +0100 +++ b/DESIGN_NOTES__VPThread_lib.txt Thu Jun 02 14:47:50 2011 +0200 @@ -1,82 +1,82 @@ - -Implement VPThread this way: - -We implemented a subset of PThreads functionality, called VMSPThd, that -includes: mutex_lock, mutex_unlock, cond_wait, and cond_notify, which we name -as VMSPThd__mutix_lock and so forth. \ All VMSPThd functions take a reference -to the AppVP that is animating the function call, in addition to any other -parameters. - -A mutex variable is an integer, returned by VMSPThd__mutex_create(), which is -used inside the request handler as a key to lookup an entry in a hash table, -that lives in the SemanticEnv. \ Such an entry has a field holding a -reference to the AppVP that currently owns the lock, and a queue of AppVPs -waiting to acquire the lock. \ - -Acquiring a lock is done with VMSPThd__mutex_lock(), which generates a -request. \ Recall that all request sends cause the suspention of the AppVP -that is animating the library call that generates the request, in this case -the AppVP animating VMSPThd__mutex_lock() is suspended. \ The request -includes a reference to that animating AppVP, and the mutex integer value. -\ When the request reaches the request handler, the mutex integer is used as -key to look up the hash entry, then if the owner field is null (or the same -as the AppVP in the request), the AppVP in the request is placed into the -owner field, and that AppVP is queued to be scheduled for re-animation. -\ However, if a different AppVP is listed in the owner field, then the AppVP -in the request is added to the queue of those trying to acquire. \ Notice -that this is a purely sequential algorithm that systematic reasoning can be -used on. - -VMSPThd__mutex_unlock(), meanwhile, generates a request that causes the -request handler to queue for re-animation the AppVP that animated the call. -\ It also pops the queue of AppVPs waiting to acquire the lock, and writes -the AppVP that comes out as the current owner of the lock and queues that -AppVP for re-animation (unless the popped value is null, in which case the -current owner is just set to null). - -Implementing condition variables takes a similar approach, in that -VMSPThd__init_cond() returns an integer that is then used to look up an entry -in a hash table, where the entry contains a queue of AppVPs waiting on the -condition variable. \ VMSPThd__cond_wait() generates a request that pushes -the AppVP into the queue, while VMSPThd__cond_signal() takes a wait request -from the queue. - -Notice that this is again a purely sequential algorithm, and sidesteps issues -such as ``simultaneous'' wait and signal requests -- the wait and signal get -serialized automatically, even though they take place at the same instant of -program virtual time. \ - -It is the fact of having a program virtual time that allows ``virtual -simultaneous'' actions to be handled of the virtual time. \ That -ability to escape outside of the virtual time is what enables a - algorithm to handle the simultaneity that is at the heart of -making implementing locks in physical time so intricately tricky -> > ->.\ - -What's nice about this approach is that the design and implementation are -simple and straight forward. \ It took just X days to design, implement, and -debug, and is in a form that should be amenable to proof of freedom from race -conditions, given a correct implementation of VMS. \ The hash-table based -approach also makes it reasonably high performance, with (essentially) no -slowdown when the number of locks or number of AppVPs grows large. - -=========================== -Behavior: -Cond variables are half of a two-piece mechanism. The other half is a mutex. - Every cond var owns a mutex -- the two intrinsically work - together, as a pair. The mutex must only be used with the condition var - and not used on its own in other ways. - -cond_wait is called with a cond-var and its mutex. -The animating processor must have acquired the mutex before calling cond_wait -The call adds the animating processor to the queue associated with the cond -variable and then calls mutex_unlock on the mutex. - -cond_signal can only be called after acquiring the cond var's mutex. It is -called with the cond-var. - The call takes the next processor from the condition-var's wait queue and - transfers it to the waiting-for-lock queue of the cond-var's mutex. -The processor that called the cond_signal next has to perform a mutex_unlock - on the cond-var's mutex -- that, finally, lets the waiting processor acquire - the mutex and proceed. + +Implement VPThread this way: + +We implemented a subset of PThreads functionality, called VMSPThd, that +includes: mutex_lock, mutex_unlock, cond_wait, and cond_notify, which we name +as VMSPThd__mutix_lock and so forth. \ All VMSPThd functions take a reference +to the AppVP that is animating the function call, in addition to any other +parameters. + +A mutex variable is an integer, returned by VMSPThd__mutex_create(), which is +used inside the request handler as a key to lookup an entry in a hash table, +that lives in the SemanticEnv. \ Such an entry has a field holding a +reference to the AppVP that currently owns the lock, and a queue of AppVPs +waiting to acquire the lock. \ + +Acquiring a lock is done with VMSPThd__mutex_lock(), which generates a +request. \ Recall that all request sends cause the suspention of the AppVP +that is animating the library call that generates the request, in this case +the AppVP animating VMSPThd__mutex_lock() is suspended. \ The request +includes a reference to that animating AppVP, and the mutex integer value. +\ When the request reaches the request handler, the mutex integer is used as +key to look up the hash entry, then if the owner field is null (or the same +as the AppVP in the request), the AppVP in the request is placed into the +owner field, and that AppVP is queued to be scheduled for re-animation. +\ However, if a different AppVP is listed in the owner field, then the AppVP +in the request is added to the queue of those trying to acquire. \ Notice +that this is a purely sequential algorithm that systematic reasoning can be +used on. + +VMSPThd__mutex_unlock(), meanwhile, generates a request that causes the +request handler to queue for re-animation the AppVP that animated the call. +\ It also pops the queue of AppVPs waiting to acquire the lock, and writes +the AppVP that comes out as the current owner of the lock and queues that +AppVP for re-animation (unless the popped value is null, in which case the +current owner is just set to null). + +Implementing condition variables takes a similar approach, in that +VMSPThd__init_cond() returns an integer that is then used to look up an entry +in a hash table, where the entry contains a queue of AppVPs waiting on the +condition variable. \ VMSPThd__cond_wait() generates a request that pushes +the AppVP into the queue, while VMSPThd__cond_signal() takes a wait request +from the queue. + +Notice that this is again a purely sequential algorithm, and sidesteps issues +such as ``simultaneous'' wait and signal requests -- the wait and signal get +serialized automatically, even though they take place at the same instant of +program virtual time. \ + +It is the fact of having a program virtual time that allows ``virtual +simultaneous'' actions to be handled of the virtual time. \ That +ability to escape outside of the virtual time is what enables a + algorithm to handle the simultaneity that is at the heart of +making implementing locks in physical time so intricately tricky +> > +>.\ + +What's nice about this approach is that the design and implementation are +simple and straight forward. \ It took just X days to design, implement, and +debug, and is in a form that should be amenable to proof of freedom from race +conditions, given a correct implementation of VMS. \ The hash-table based +approach also makes it reasonably high performance, with (essentially) no +slowdown when the number of locks or number of AppVPs grows large. + +=========================== +Behavior: +Cond variables are half of a two-piece mechanism. The other half is a mutex. + Every cond var owns a mutex -- the two intrinsically work + together, as a pair. The mutex must only be used with the condition var + and not used on its own in other ways. + +cond_wait is called with a cond-var and its mutex. +The animating processor must have acquired the mutex before calling cond_wait +The call adds the animating processor to the queue associated with the cond +variable and then calls mutex_unlock on the mutex. + +cond_signal can only be called after acquiring the cond var's mutex. It is +called with the cond-var. + The call takes the next processor from the condition-var's wait queue and + transfers it to the waiting-for-lock queue of the cond-var's mutex. +The processor that called the cond_signal next has to perform a mutex_unlock + on the cond-var's mutex -- that, finally, lets the waiting processor acquire + the mutex and proceed. diff -r 505d3c674ce8 -r cfd2f7e54129 VPThread_lib.c --- a/VPThread_lib.c Sat Nov 20 08:39:58 2010 +0100 +++ b/VPThread_lib.c Thu Jun 02 14:47:50 2011 +0200 @@ -156,19 +156,25 @@ VPThread__init() { VMS__init(); + //masterEnv, a global var, now is partially set up by init_VMS + + //Moved here from VMS.c because this is not parallel construct independent + MakeTheMeasHists + + VPThread__init_Helper(); + } + +#ifdef SEQUENTIAL +void +VPThread__init_Seq() + { + VMS__init_Seq(); + flushRegisters(); //masterEnv, a global var, now is partially set up by init_VMS VPThread__init_Helper(); } - -void -VPThread__init_Seq() - { - VMS__init_Seq(); - //masterEnv, a global var, now is partially set up by init_VMS - - VPThread__init_Helper(); - } +#endif void VPThread__init_Helper()