ring_t* r;
uint16_t max;
- if( size <= 0 || (r = (ring_t *) malloc( sizeof( *r ) )) == NULL ) {
+ if( size <= 0 ) {
+ return NULL;
+ }
+ if( (r = (ring_t *) malloc( sizeof( *r ) )) == NULL ) {
return NULL;
}
+ r->flags = 0;
r->rgate = NULL;
r->wgate = NULL;
r->head = r->tail = 0;
}
r->nelements = size; // because we always have an empty element when full
- if( (r->data = (void **) malloc( sizeof( void** ) * (r->nelements + 1) )) == NULL ) {
+ if( (r->data = (void **) malloc( sizeof( void* ) * (r->nelements + 1) )) == NULL ) {
free( r );
return NULL;
}
- memset( r->data, 0, sizeof( void** ) * r->nelements );
+ memset( r->data, 0, sizeof( void* ) * r->nelements );
r->pfd = eventfd( 0, EFD_SEMAPHORE | EFD_NONBLOCK ); // in semaphore mode counter is maintained with each insert/extract
return (void *) r;
}
return 0;
}
- if( options & RING_WLOCK ) {
- if( r->wgate == NULL ) { // don't realloc
- r->wgate = (pthread_mutex_t *) malloc( sizeof( *r->wgate ) );
- if( r->wgate == NULL ) {
- return 0;
- }
-
- pthread_mutex_init( r->wgate, NULL );
+ if( options & RING_WLOCK && r->wgate == NULL ) { // don't realloc if we have one
+ r->wgate = (pthread_mutex_t *) malloc( sizeof( *r->wgate ) );
+ if( r->wgate == NULL ) {
+ return 0;
}
+
+ pthread_mutex_init( r->wgate, NULL );
}
- if( options & RING_RLOCK ) {
+ if( options & (RING_RLOCK | RING_FRLOCK) ) { // read locking
if( r->rgate == NULL ) { // don't realloc
r->rgate = (pthread_mutex_t *) malloc( sizeof( *r->rgate ) );
if( r->rgate == NULL ) {
pthread_mutex_init( r->rgate, NULL );
}
+ if( options & RING_FRLOCK ) {
+ r->flags |= RING_FL_FLOCK;
+ }
}
return 1;
if( (r = (ring_t*) vr) == NULL ) {
return;
}
-
+ if( r->data ){
+ free( r->data );
+ }
+ if( r->rgate ){
+ free( r->rgate );
+ }
+ if( r->wgate ){
+ free( r->wgate );
+ }
free( r );
}
return NULL;
}
- if( r->rgate != NULL ) { // if lock exists we must honour it
- pthread_mutex_lock( r->rgate );
+ if( r->rgate != NULL ) { // if lock exists we must honour it
+ if( r->flags & RING_FL_FLOCK ) { // fast read locking try once and return nil if we cant lock
+ if( pthread_mutex_trylock( r->rgate ) != 0 ) { // quick fail if not able to get a lock
+ return NULL;
+ }
+ } else {
+ if( pthread_mutex_lock( r->rgate ) != 0 ) {
+ return NULL;
+ }
+ }
if( r->tail == r->head ) { // ensure ring didn't go empty while waiting
+ pthread_mutex_unlock( r->rgate );
return NULL;
}
}
return data;
}
+
/*
Insert the pointer at the next open space in the ring.
- Returns 1 if the inert was ok, and 0 if the ring is full.
+ Returns 1 if the inert was ok, and 0 if there is an error;
+ errno will be set to EXFULL if the ring is full, if the attempt
+ fails with anyt other error that indicates the inability to obtain
+ a lock on the ring.
*/
static inline int uta_ring_insert( void* vr, void* new_data ) {
ring_t* r;
}
if( r->wgate != NULL ) { // if lock exists we must honour it
- pthread_mutex_lock( r->wgate );
+ if( pthread_mutex_lock( r->wgate ) != 0 ) {
+ return 0; // leave mutex reason in place
+ }
}
if( r->head+1 == r->tail || (r->head+1 >= r->nelements && !r->tail) ) { // ring is full
- if( r->wgate != NULL ) { // ensure released if needed
+ if( r->wgate != NULL ) { // ensure released if needed
pthread_mutex_unlock( r->wgate );
}
+ errno = EXFULL;
return 0;
}