3 ==================================================================================
4 Copyright (c) 2019 Nokia
5 Copyright (c) 2018-2019 AT&T Intellectual Property.
7 Licensed under the Apache License, Version 2.0 (the "License");
8 you may not use this file except in compliance with the License.
9 You may obtain a copy of the License at
11 http://www.apache.org/licenses/LICENSE-2.0
13 Unless required by applicable law or agreed to in writing, software
14 distributed under the License is distributed on an "AS IS" BASIS,
15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 See the License for the specific language governing permissions and
17 limitations under the License.
18 ==================================================================================
21 Mnemonic: ring_static.c
22 Abstract: Implements a ring of information (probably to act as a
24 Author: E. Scott Daniels
28 #ifndef _ring_static_c
29 #define _ring_static_c
38 #include <sys/eventfd.h>
40 #define RING_FAST 1 // when set we skip nil pointer checks on the ring pointer
43 This returns the ring's pollable file descriptor. If one does not exist, then
46 static int uta_ring_getpfd( void* vr ) {
49 if( !RING_FAST ) { // compiler should drop the conditional when always false
50 if( (r = (ring_t*) vr) == NULL ) {
58 r->pfd = eventfd( 0, EFD_SEMAPHORE | EFD_NONBLOCK );
65 Make a new ring. The default is to NOT create a lock; if the user
66 wants read locking then uta_config_ring() can be used to setup the
67 mutex. (We use several rings internally and the assumption is that
68 there is no locking for these.)
70 static void* uta_mk_ring( int size ) {
77 if( (r = (ring_t *) malloc( sizeof( *r ) )) == NULL ) {
84 r->head = r->tail = 0;
91 r->nelements = size; // because we always have an empty element when full
92 if( (r->data = (void **) malloc( sizeof( void* ) * (r->nelements + 1) )) == NULL ) {
97 memset( r->data, 0, sizeof( void* ) * r->nelements );
98 r->pfd = eventfd( 0, EFD_SEMAPHORE | EFD_NONBLOCK ); // in semaphore mode counter is maintained with each insert/extract
103 Allows for configuration of a ring after it has been allocated.
104 Options are RING_* options that allow for things like setting/clearing
105 read locking. Returns 0 for failure 1 on success.
107 Options can be ORd together and all made effective at the same time, but
108 it will be impossible to determine a specific failure if invoked this
109 way. Control is returned on the first error, and no provision is made
110 to "undo" previously set options if an error occurs.
112 static int uta_ring_config( void* vr, int options ) {
115 if( (r = (ring_t*) vr) == NULL ) {
120 if( options & RING_WLOCK && r->wgate == NULL ) { // don't realloc if we have one
121 r->wgate = (pthread_mutex_t *) malloc( sizeof( *r->wgate ) );
122 if( r->wgate == NULL ) {
126 pthread_mutex_init( r->wgate, NULL );
129 if( options & (RING_RLOCK | RING_FRLOCK) ) { // read locking
130 if( r->rgate == NULL ) { // don't realloc
131 r->rgate = (pthread_mutex_t *) malloc( sizeof( *r->rgate ) );
132 if( r->rgate == NULL ) {
136 pthread_mutex_init( r->rgate, NULL );
138 if( options & RING_FRLOCK ) {
139 r->flags |= RING_FL_FLOCK;
147 Ditch the ring. The caller is responsible for extracting any remaining
148 pointers and freeing them as needed.
150 static void uta_ring_free( void* vr ) {
153 if( (r = (ring_t*) vr) == NULL ) {
170 Pull the next data pointer from the ring; null if there isn't
171 anything to be pulled.
173 If the read lock exists for the ring, then this will BLOCK until
174 it gets the lock. There is always a chance that once the lock
175 is obtained that the ring is empty, so the caller MUST handle
176 a nil pointer as the return.
178 static inline void* uta_ring_extract( void* vr ) {
180 uint16_t ti; // real index in data
181 int64_t ctr; // pfd counter
184 if( !RING_FAST ) { // compiler should drop the conditional when always false
185 if( (r = (ring_t*) vr) == NULL ) {
192 if( r->tail == r->head ) { // empty ring we can bail out quickly
196 if( r->rgate != NULL ) { // if lock exists we must honour it
197 if( r->flags & RING_FL_FLOCK ) { // fast read locking try once and return nil if we cant lock
198 if( pthread_mutex_trylock( r->rgate ) != 0 ) { // quick fail if not able to get a lock
202 if( pthread_mutex_lock( r->rgate ) != 0 ) {
206 if( r->tail == r->head ) { // ensure ring didn't go empty while waiting
207 pthread_mutex_unlock( r->rgate );
214 if( r->tail >= r->nelements ) {
218 read( r->pfd, &ctr, sizeof( ctr ) ); // when not in semaphore, this zeros the counter and value is meaningless
220 future -- investigate if it's possible only to set/clear when empty or going to empty
221 if( r->tail == r->head ) { // if this emptied the ring, turn off ready
225 data = r->data[ti]; // secure data and clear before letting go of the lock
228 if( r->rgate != NULL ) { // if locked above...
229 pthread_mutex_unlock( r->rgate );
237 Insert the pointer at the next open space in the ring.
238 Returns 1 if the inert was ok, and 0 if there is an error;
239 errno will be set to EXFULL if the ring is full, if the attempt
240 fails with anyt other error that indicates the inability to obtain
243 static inline int uta_ring_insert( void* vr, void* new_data ) {
245 int64_t inc = 1; // used to set the counter in the pfd
247 if( !RING_FAST ) { // compiler should drop the conditional when always false
248 if( (r = (ring_t*) vr) == NULL ) {
255 if( r->wgate != NULL ) { // if lock exists we must honour it
256 if( pthread_mutex_lock( r->wgate ) != 0 ) {
257 return 0; // leave mutex reason in place
261 if( r->head+1 == r->tail || (r->head+1 >= r->nelements && !r->tail) ) { // ring is full
262 if( r->wgate != NULL ) { // ensure released if needed
263 pthread_mutex_unlock( r->wgate );
269 r->data[r->head] = new_data;
271 if( r->head >= r->nelements ) {
275 write( r->pfd, &inc, sizeof( inc ) );
277 future -- investigate if it's possible only to set/clear when empty or going to empty
278 if( r->tail == r->head ) { // turn on ready if ring was empty
282 if( r->wgate != NULL ) { // if lock exists we must unlock before going
283 pthread_mutex_unlock( r->wgate );