3 ==================================================================================
4 Copyright (c) 2019 Nokia
5 Copyright (c) 2018-2019 AT&T Intellectual Property.
7 Licensed under the Apache License, Version 2.0 (the "License");
8 you may not use this file except in compliance with the License.
9 You may obtain a copy of the License at
11 http://www.apache.org/licenses/LICENSE-2.0
13 Unless required by applicable law or agreed to in writing, software
14 distributed under the License is distributed on an "AS IS" BASIS,
15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 See the License for the specific language governing permissions and
17 limitations under the License.
18 ==================================================================================
21 Mnemonic: ring_static.c
22 Abstract: Implements a ring of information (probably to act as a
24 Author: E. Scott Daniels
28 #ifndef _ring_static_c
29 #define _ring_static_c
38 #include <sys/eventfd.h>
40 #define RING_FAST 1 // when set we skip nil pointer checks on the ring pointer
43 This returns the ring's pollable file descriptor. If one does not exist, then
46 static int uta_ring_getpfd( void* vr ) {
49 if( !RING_FAST ) { // compiler should drop the conditional when always false
50 if( (r = (ring_t*) vr) == NULL ) {
58 r->pfd = eventfd( 0, EFD_SEMAPHORE | EFD_NONBLOCK );
65 Make a new ring. The default is to NOT create a lock; if the user
66 wants read locking then uta_config_ring() can be used to setup the
67 mutex. (We use several rings internally and the assumption is that
68 there is no locking for these.)
70 static void* uta_mk_ring( int size ) {
74 if( size <= 0 || (r = (ring_t *) malloc( sizeof( *r ) )) == NULL ) {
81 r->head = r->tail = 0;
88 r->nelements = size; // because we always have an empty element when full
89 if( (r->data = (void **) malloc( sizeof( void* ) * (r->nelements + 1) )) == NULL ) {
94 memset( r->data, 0, sizeof( void* ) * r->nelements );
95 r->pfd = eventfd( 0, EFD_SEMAPHORE | EFD_NONBLOCK ); // in semaphore mode counter is maintained with each insert/extract
100 Allows for configuration of a ring after it has been allocated.
101 Options are RING_* options that allow for things like setting/clearing
102 read locking. Returns 0 for failure 1 on success.
104 Options can be ORd together and all made effective at the same time, but
105 it will be impossible to determine a specific failure if invoked this
106 way. Control is returned on the first error, and no provision is made
107 to "undo" previously set options if an error occurs.
109 static int uta_ring_config( void* vr, int options ) {
112 if( (r = (ring_t*) vr) == NULL ) {
117 if( options & RING_WLOCK ) {
118 if( r->wgate == NULL ) { // don't realloc
119 r->wgate = (pthread_mutex_t *) malloc( sizeof( *r->wgate ) );
120 if( r->wgate == NULL ) {
124 pthread_mutex_init( r->wgate, NULL );
128 if( options & (RING_RLOCK | RING_FRLOCK) ) { // read locking
129 if( r->rgate == NULL ) { // don't realloc
130 r->rgate = (pthread_mutex_t *) malloc( sizeof( *r->rgate ) );
131 if( r->rgate == NULL ) {
135 pthread_mutex_init( r->rgate, NULL );
137 if( options & RING_FRLOCK ) {
138 r->flags |= RING_FL_FLOCK;
146 Ditch the ring. The caller is responsible for extracting any remaining
147 pointers and freeing them as needed.
149 static void uta_ring_free( void* vr ) {
152 if( (r = (ring_t*) vr) == NULL ) {
169 Pull the next data pointer from the ring; null if there isn't
170 anything to be pulled.
172 If the read lock exists for the ring, then this will BLOCK until
173 it gets the lock. There is always a chance that once the lock
174 is obtained that the ring is empty, so the caller MUST handle
175 a nil pointer as the return.
177 static inline void* uta_ring_extract( void* vr ) {
179 uint16_t ti; // real index in data
180 int64_t ctr; // pfd counter
183 if( !RING_FAST ) { // compiler should drop the conditional when always false
184 if( (r = (ring_t*) vr) == NULL ) {
191 if( r->tail == r->head ) { // empty ring we can bail out quickly
195 if( r->rgate != NULL ) { // if lock exists we must honour it
196 if( r->flags & RING_FL_FLOCK ) { // fast read locking try once and return nil if we cant lock
197 if( pthread_mutex_trylock( r->rgate ) != 0 ) { // quick fail if not able to get a lock
201 if( pthread_mutex_lock( r->rgate ) != 0 ) {
205 if( r->tail == r->head ) { // ensure ring didn't go empty while waiting
206 pthread_mutex_unlock( r->rgate );
213 if( r->tail >= r->nelements ) {
217 read( r->pfd, &ctr, sizeof( ctr ) ); // when not in semaphore, this zeros the counter and value is meaningless
219 future -- investigate if it's possible only to set/clear when empty or going to empty
220 if( r->tail == r->head ) { // if this emptied the ring, turn off ready
224 data = r->data[ti]; // secure data and clear before letting go of the lock
227 if( r->rgate != NULL ) { // if locked above...
228 pthread_mutex_unlock( r->rgate );
236 Insert the pointer at the next open space in the ring.
237 Returns 1 if the inert was ok, and 0 if there is an error;
238 errno will be set to EXFULL if the ring is full, if the attempt
239 fails with anyt other error that indicates the inability to obtain
242 static inline int uta_ring_insert( void* vr, void* new_data ) {
244 int64_t inc = 1; // used to set the counter in the pfd
246 if( !RING_FAST ) { // compiler should drop the conditional when always false
247 if( (r = (ring_t*) vr) == NULL ) {
254 if( r->wgate != NULL ) { // if lock exists we must honour it
255 if( pthread_mutex_lock( r->wgate ) != 0 ) {
256 return 0; // leave mutex reason in place
260 if( r->head+1 == r->tail || (r->head+1 >= r->nelements && !r->tail) ) { // ring is full
261 if( r->wgate != NULL ) { // ensure released if needed
262 pthread_mutex_unlock( r->wgate );
268 r->data[r->head] = new_data;
270 if( r->head >= r->nelements ) {
274 write( r->pfd, &inc, sizeof( inc ) );
276 future -- investigate if it's possible only to set/clear when empty or going to empty
277 if( r->tail == r->head ) { // turn on ready if ring was empty
281 if( r->wgate != NULL ) { // if lock exists we must unlock before going
282 pthread_mutex_unlock( r->wgate );