view BlockingQueue.c @ 1:81f6687d52d1

Correct SRSW queue and correst CAS queue
author Me
date Fri, 18 Jun 2010 17:49:38 -0700
parents 85af604dee9b
children 8abcca1590b8
line source
1 /*
2 * Copyright 2009 OpenSourceStewardshipFoundation.org
3 * Licensed under GNU General Public License version 2
4 *
5 * Author: seanhalle@yahoo.com
6 */
9 #include <stdio.h>
10 #include <errno.h>
11 #include <pthread.h>
12 #include <stdlib.h>
13 #include <sched.h>
14 #include <windows.h>
16 #include "BlockingQueue.h"
18 #define INC(x) (++x == 1024) ? (x) = 0 : (x)
20 #define SPINLOCK_TRIES 100000
22 //===========================================================================
23 //Normal pthread Q
25 QueueStruc* makeQ()
26 {
27 QueueStruc* retQ;
28 int status;
29 retQ = (QueueStruc *) malloc( sizeof( QueueStruc ) );
32 status = pthread_mutex_init( &retQ->mutex_t, NULL);
33 if (status < 0)
34 {
35 perror("Error in creating mutex:");
36 exit(1);
37 return NULL;
38 }
40 status = pthread_cond_init ( &retQ->cond_w_t, NULL);
41 if (status < 0)
42 {
43 perror("Error in creating cond_var:");
44 exit(1);
45 return NULL;
46 }
48 status = pthread_cond_init ( &retQ->cond_r_t, NULL);
49 if (status < 0)
50 {
51 perror("Error in creating cond_var:");
52 exit(1);
53 return NULL;
54 }
56 retQ->count = 0;
57 retQ->readPos = 0;
58 retQ->writePos = 0;
59 retQ -> w_empty = retQ -> w_full = 0;
61 return retQ;
62 }
64 void * readQ( QueueStruc *Q )
65 { void *ret;
66 int status, wt;
67 pthread_mutex_lock( &Q->mutex_t );
68 {
69 while( Q -> count == 0 )
70 { Q -> w_empty = 1;
71 // pthread_cond_broadcast( &Q->cond_w_t );
72 status = pthread_cond_wait( &Q->cond_r_t, &Q->mutex_t );
73 if (status != 0)
74 { perror("Thread wait error: ");
75 exit(1);
76 }
77 }
78 Q -> w_empty = 0;
79 Q -> count -= 1;
80 ret = Q->data[ Q->readPos ];
81 INC( Q->readPos );
82 wt = Q -> w_full;
83 Q -> w_full = 0;
84 //pthread_cond_broadcast( &Q->cond_w_t );
85 }
86 pthread_mutex_unlock( &Q->mutex_t );
87 if (wt) pthread_cond_signal( &Q->cond_w_t );
89 return( ret );
90 }
92 void writeQ( void * in, QueueStruc* Q )
93 {
94 int status, wt;
95 pthread_mutex_lock( &Q->mutex_t );
96 {
97 while( Q->count >= 1024 )
98 {
99 Q -> w_full = 1;
100 // pthread_cond_broadcast( &Q->cond_r_t );
101 status = pthread_cond_wait( &Q->cond_w_t, &Q->mutex_t );
102 if (status != 0)
103 { perror("Thread wait error: ");
104 exit(1);
105 }
106 }
107 Q -> w_full = 0;
108 Q->count += 1;
109 Q->data[ Q->writePos ] = in;
110 INC( Q->writePos );
111 wt = Q -> w_empty;
112 Q -> w_empty = 0;
113 // pthread_cond_broadcast( &Q->cond_r_t );
114 }
115 pthread_mutex_unlock( &Q->mutex_t );
116 if( wt ) pthread_cond_signal( &Q->cond_r_t );
117 }
120 //===========================================================================
121 // multi reader multi writer fast Q via CAS
122 #ifndef _GNU_SOURCE
123 #define _GNU_SOURCE
125 /*This is a blocking queue, but it uses CAS instr plus yield() when empty
126 * or full
127 *It uses CAS because it's meant to have more than one reader and more than
128 * one writer.
129 */
131 CASQueueStruc* makeCASQ()
132 {
133 CASQueueStruc* retQ;
134 retQ = (CASQueueStruc *) malloc( sizeof( CASQueueStruc ) );
136 retQ->insertLock = UNLOCKED;
137 retQ->extractLock= UNLOCKED;
138 //TODO: check got pointer syntax right
139 retQ->extractPos = &(retQ->startOfData[0]); //side by side == empty
140 retQ->insertPos = &(retQ->startOfData[1]); // so start pos's have to be
141 retQ->endOfData = &(retQ->startOfData[1023]);
143 return retQ;
144 }
147 void* readCASQ( CASQueueStruc* Q )
148 { void *out = 0;
149 int tries = 0;
150 void **startOfData = Q->startOfData;
151 void **endOfData = Q->endOfData;
153 int success = FALSE;
155 while( !success )
156 { success =
157 __sync_bool_compare_and_swap( &(Q->extractLock), UNLOCKED, LOCKED );
158 if( success )
159 {
160 void **insertPos = Q->insertPos;
161 void **extractPos = Q->extractPos;
163 //if not empty -- extract just below insert when empty
164 if( insertPos - extractPos != 1 &&
165 !(extractPos == endOfData && insertPos == startOfData))
166 { //move before read
167 if( extractPos == endOfData ) //write new pos exactly once, correctly
168 { Q->extractPos = startOfData; //can't overrun then fix it 'cause
169 } // other thread might read bad pos
170 else
171 { Q->extractPos++;
172 }
173 out = *(Q->extractPos);
174 Q->extractLock = UNLOCKED;
175 return out;
176 }
177 else //Q is empty
178 { success = FALSE;
179 Q->extractLock = UNLOCKED;//have to try again, release for others
180 }
181 }
182 //Q is busy or empty
183 tries++;
184 if( tries > SPINLOCK_TRIES ) SwitchToThread(); //WinAPI yield()
185 }
186 }
188 void writeCASQ( void * in, CASQueueStruc* Q )
189 {
190 int tries = 0;
191 //TODO: need to make Q volatile? Want to do this Q in assembly!
192 //Have no idea what GCC's going to do to this code
193 void **startOfData = Q->startOfData;
194 void **endOfData = Q->endOfData;
196 int success = FALSE;
198 while( !success )
199 { success =
200 __sync_bool_compare_and_swap( &(Q->insertLock), UNLOCKED, LOCKED );
201 if( success )
202 {
203 void **insertPos = Q->insertPos;
204 void **extractPos = Q->extractPos;
206 //check if room to insert.. can't use a count variable
207 // 'cause both insertor Thd and extractor Thd would write it
208 if( extractPos - insertPos != 1 &&
209 !(insertPos == endOfData && extractPos == startOfData))
210 { *(Q->insertPos) = in; //insert before move
211 if( insertPos == endOfData ) //write new pos exactly once, correctly
212 { Q->insertPos = startOfData;
213 }
214 else
215 { Q->insertPos++;
216 }
217 Q->insertLock = UNLOCKED;
218 return;
219 }
220 else //Q is full
221 { success = FALSE;
222 Q->insertLock = UNLOCKED;//have to try again, release for others
223 }
224 }
225 tries++;
226 if( tries > SPINLOCK_TRIES ) SwitchToThread(); //Win yield()
227 }
228 }
230 #endif //_GNU_SOURCE
233 //===========================================================================
234 //Single reader single writer super fast Q.. no atomic instrs..
237 /*This is a blocking queue, but it uses no atomic instructions, just does
238 * yield() when empty or full
239 *
240 *It doesn't need any atomic instructions because only a single thread
241 * extracts and only a single thread inserts, and it has no locations that
242 * are written by both. It writes before moving and moves before reading,
243 * and never lets write position and read position be the same, so dis-
244 * synchrony can only ever cause an unnecessary call to yield(), never a
245 * wrong value (by monotonicity of movement of pointers, plus single writer
246 * to pointers, plus sequence of write before change pointer, plus
247 * assumptions that if thread A semantically writes X before Y, then thread
248 * B will see the writes in that order.)
249 */
251 SRSWQueueStruc* makeSRSWQ()
252 {
253 SRSWQueueStruc* retQ;
254 retQ = (SRSWQueueStruc *) malloc( sizeof( SRSWQueueStruc ) );
256 retQ->extractPos = &(retQ->startOfData[0]); //side by side == empty
257 retQ->insertPos = &(retQ->startOfData[1]); // so start pos's have to be
258 retQ->endOfData = &(retQ->startOfData[1023]);
260 return retQ;
261 }
264 void* readSRSWQ( SRSWQueueStruc* Q )
265 { void *out = 0;
266 int tries = 0;
268 while( TRUE )
269 {
270 if( Q->insertPos - Q->extractPos != 1 &&
271 !(Q->extractPos == Q->endOfData && Q->insertPos == Q->startOfData))
272 { if( Q->extractPos >= Q->endOfData ) Q->extractPos = Q->startOfData;
273 else Q->extractPos++; //move before read
274 out = *(Q->extractPos);
275 return out;
276 }
277 //Q is empty
278 tries++;
279 if( tries > SPINLOCK_TRIES ) SwitchToThread(); //Win yield()
280 }
281 }
284 void* readSRSWQ_NonBlocking( SRSWQueueStruc* Q )
285 { void *out = 0;
286 int tries = 0;
288 while( TRUE )
289 {
290 if( Q->insertPos - Q->extractPos != 1 &&
291 !(Q->extractPos == Q->endOfData && Q->insertPos == Q->startOfData))
292 { Q->extractPos++; //move before read
293 if( Q->extractPos > Q->endOfData ) Q->extractPos = Q->startOfData;
294 out = *(Q->extractPos);
295 return out;
296 }
297 //Q is empty
298 tries++;
299 if( tries > 2 ) return 0; //long enough for writer to finish
300 }
301 }
304 void writeSRSWQ( void * in, SRSWQueueStruc* Q )
305 {
306 int tries = 0;
308 while( TRUE )
309 {
310 if( Q->extractPos - Q->insertPos != 1 &&
311 !(Q->insertPos == Q->endOfData && Q->extractPos == Q->startOfData))
312 { *(Q->insertPos) = in; //insert before move
313 if( Q->insertPos >= Q->endOfData ) Q->insertPos = Q->startOfData;
314 else Q->insertPos++;
315 return;
316 }
317 //Q is full
318 tries++;
319 if( tries > SPINLOCK_TRIES ) SwitchToThread(); //Win yield()
320 }
321 }
325 //===========================================================================
326 //Single reader Multiple writer super fast Q.. no atomic instrs..
329 /*This is a blocking queue, but it uses no atomic instructions, just does
330 * yield() when empty or full
331 *
332 *It doesn't need any atomic instructions because only a single thread
333 * extracts and only a single thread inserts, and it has no locations that
334 * are written by both. It writes before moving and moves before reading,
335 * and never lets write position and read position be the same, so dis-
336 * synchrony can only ever cause an unnecessary call to yield(), never a
337 * wrong value (by monotonicity of movement of pointers, plus single writer
338 * to pointers, plus sequence of write before change pointer, plus
339 * assumptions that if thread A semantically writes X before Y, then thread
340 * B will see the writes in that order.)
341 *
342 *The multi-writer version is implemented as a hierarchy. Each writer has
343 * its own single-reader single-writer queue. The reader simply does a
344 * round-robin harvesting from them.
345 *
346 *A writer must first register itself with the queue, and receives an ID back
347 * It then uses that ID on each write operation.
348 *
349 *The implementation is:
350 *Physically:
351 * -] the SRMWQueueStruc holds an array of SRSWQueueStruc s
352 * -] it also has read-pointer to the last queue a write was taken from.
353 *
354 *Action-Patterns:
355 * -] To add a writer
356 * --]] writer-thread calls addWriterToQ(), remember the ID it returns
357 * --]] internally addWriterToQ does:
358 * ---]]] if needs more room, makes a larger writer-array
359 * ---]]] copies the old writer-array into the new
360 * ---]]] makes a new SRSW queue an puts it into the array
361 * ---]]] returns the index to the new SRSW queue as the ID
362 * -] To write
363 * --]] writer thread calls writeSRMWQ, passing the Q struc and its writer-ID
364 * --]] this call may block, via repeated yield() calls
365 * --]] internally, writeSRMWQ does:
366 * ---]]] uses the writerID as index to get the SRSW queue for that writer
367 * ---]]] performs writeQ on that queue (may block via repeated yield calls)
368 * -] To Read
369 * --]] reader calls readSRMWQ, passing the Q struc
370 * --]] this call may block, via repeated yield() calls
371 * --]] internally, readSRMWQ does:
372 * ---]]] gets saved index of last SRSW queue read from
373 * ---]]] increments index and gets indexed queue
374 * ---]]] does a non-blocking read of that queue
375 * ---]]] if gets something, saves index and returns that value
376 * ---]]] if gets null, then goes to next queue
377 * ---]]] if got null from all the queues then does yield() then tries again
378 *
379 *Note: "0" is used as the value null, so SRSW queues must only contain
380 * pointers, and cannot use 0 as a valid pointer value.
381 *
382 */
384 SRMWQueueStruc* makeSRMWQ()
385 { SRMWQueueStruc* retQ;
387 retQ = (SRMWQueueStruc *) malloc( sizeof( SRMWQueueStruc ) );
389 retQ->numInternalQs = 0;
390 retQ->internalQsSz = 10;
391 retQ->internalQs = malloc( retQ->internalQsSz * sizeof(SRSWQueueStruc *));
393 retQ->lastQReadFrom = 0;
395 return retQ;
396 }
398 /* ---]]] if needs more room, makes a larger writer-array
399 * ---]]] copies the old writer-array into the new
400 * ---]]] makes a new SRSW queue an puts it into the array
401 * ---]]] returns the index to the new SRSW queue as the ID
402 *
403 *NOTE: assuming all adds are completed before any writes or reads are
404 * performed.. otherwise, this needs to be re-done carefully, probably with
405 * a lock.
406 */
407 int addWriterToSRMWQ( SRMWQueueStruc* Q )
408 { int oldSz, i;
409 SRSWQueueStruc * *oldArray;
411 (Q->numInternalQs)++;
412 if( Q->numInternalQs >= Q->internalQsSz )
413 { //full, so make bigger
414 oldSz = Q->internalQsSz;
415 oldArray = Q->internalQs;
416 Q->internalQsSz *= 2;
417 Q->internalQs = malloc( Q->internalQsSz * sizeof(SRSWQueueStruc *));
418 for( i = 0; i < oldSz; i++ )
419 { Q->internalQs[i] = oldArray[i];
420 }
421 free( oldArray );
422 }
423 Q->internalQs[ Q->numInternalQs - 1 ] = makeSRSWQ();
424 return Q->numInternalQs - 1;
425 }
428 /* ---]]] gets saved index of last SRSW queue read-from
429 * ---]]] increments index and gets indexed queue
430 * ---]]] does a non-blocking read of that queue
431 * ---]]] if gets something, saves index and returns that value
432 * ---]]] if gets null, then goes to next queue
433 * ---]]] if got null from all the queues then does yield() then tries again
434 */
435 void* readSRMWQ( SRMWQueueStruc* Q )
436 { SRSWQueueStruc *readQ;
437 void *readValue = 0;
438 int tries = 0;
439 int QToReadFrom = 0;
441 QToReadFrom = Q->lastQReadFrom;
443 while( TRUE )
444 { QToReadFrom++;
445 if( QToReadFrom >= Q->numInternalQs ) QToReadFrom = 0;
446 readQ = Q->internalQs[ QToReadFrom ];
447 readValue = readSRSWQ_NonBlocking( readQ );
449 if( readValue != 0 ) //got a value, return it
450 { Q->lastQReadFrom = QToReadFrom;
451 return readValue;
452 }
453 else //SRSW Q just read is empty
454 { //check if all queues have been tried
455 if( QToReadFrom == Q->lastQReadFrom ) //all the queues tried & empty
456 { tries++; //give a writer a chance to finish before yield
457 if( tries > SPINLOCK_TRIES ) SwitchToThread(); //Win yield()
458 }
459 }
460 }
461 }
464 /*
465 * ---]]] uses the writerID as index to get the SRSW queue for that writer
466 * ---]]] performs writeQ on that queue (may block via repeated yield calls)
467 */
468 void writeSRMWQ( void * in, SRMWQueueStruc* Q, int writerID )
469 {
470 if( in == 0 ) printf( "error, wrote 0 to SRMW Q" );//TODO: throw an error
472 writeSRSWQ( in, Q->internalQs[ writerID ] );
473 }