1 // vim: ts=4 sw=4 noet :
3 ==================================================================================
4 Copyright (c) 2019-2020 Nokia
5 Copyright (c) 2018-2020 AT&T Intellectual Property.
7 Licensed under the Apache License, Version 2.0 (the "License");
8 you may not use this file except in compliance with the License.
9 You may obtain a copy of the License at
11 http://www.apache.org/licenses/LICENSE-2.0
13 Unless required by applicable law or agreed to in writing, software
14 distributed under the License is distributed on an "AS IS" BASIS,
15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 See the License for the specific language governing permissions and
17 limitations under the License.
18 ==================================================================================
22 Mnemonic: rtable_si_static.c
23 Abstract: Route table management functions which depend on the underlying
24 transport mechanism and thus cannot be included with the generic
25 route table functions.
27 This module is designed to be included by any module (main) needing
28 the static/private stuff.
30 Author: E. Scott Daniels
31 Date: 29 November 2018
34 #ifndef rtable_static_c
35 #define rtable_static_c
44 #include <sys/types.h>
49 // -----------------------------------------------------------------------------------------------------
52 Mark an endpoint closed because it's in a failing state.
54 static void uta_ep_failed( endpoint_t* ep ) {
56 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "connection to %s was closed\n", ep->name );
62 Establish a TCP connection to the indicated target (IP address).
63 Target assumed to be address:port. The new socket is returned via the
64 user supplied pointer so that a success/fail code is returned directly.
65 Return value is 0 (false) on failure, 1 (true) on success.
67 In order to support multi-threaded user applications we must hold a lock before
68 we attempt to create a dialer and connect. NNG is thread safe, but we can
69 get things into a bad state if we allow a collision here. The lock grab
70 only happens on the intial session setup.
72 //static int uta_link2( si_ctx_t* si_ctx, endpoint_t* ep ) {
73 static int uta_link2( uta_ctx_t *ctx, endpoint_t* ep ) {
76 char conn_info[SI_MAX_ADDR_LEN]; // string to give to nano to make the connection
82 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "link2 ep was nil!\n" );
86 target = ep->name; // always give name to transport so changing dest IP does not break reconnect
87 if( target == NULL || (addr = strchr( target, ':' )) == NULL ) { // bad address:port
89 rmr_vlog( RMR_VL_WARN, "rmr: link2: unable to create link: bad target: %s\n", target == NULL ? "<nil>" : target );
95 pthread_mutex_lock( &ep->gate ); // grab the lock
97 pthread_mutex_unlock( &ep->gate );
101 snprintf( conn_info, sizeof( conn_info ), "%s", target );
103 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "link2 attempting connection with: %s\n", conn_info );
104 if( (ep->nn_sock = SIconnect( ctx->si_ctx, conn_info )) < 0 ) {
105 pthread_mutex_unlock( &ep->gate );
107 if( ep->notify ) { // need to notify if set
108 rmr_vlog( RMR_VL_WARN, "rmr: link2: unable to connect to target: %s: %d %s\n", target, errno, strerror( errno ) );
114 if( DEBUG ) rmr_vlog( RMR_VL_INFO, "rmr_si_link2: connection was successful to: %s\n", target );
116 ep->open = TRUE; // set open/notify before giving up lock
117 fd2ep_add( ctx, ep->nn_sock, ep ); // map fd to ep for disc cleanup (while we have the lock)
119 if( ! ep->notify ) { // if we yammered about a failure, indicate finally good
120 rmr_vlog( RMR_VL_INFO, "rmr: link2: connection finally establisehd with target: %s\n", target );
124 pthread_mutex_unlock( &ep->gate );
129 This provides a protocol independent mechanism for establishing the connection to an endpoint.
130 Return is true (1) if the link was opened; false on error.
132 static int rt_link2_ep( void* vctx, endpoint_t* ep ) {
139 if( ep->open ) { // already open, do nothing
143 if( (ctx = (uta_ctx_t *) vctx) == NULL ) {
147 uta_link2( ctx, ep );
153 Add an endpoint to a route table entry for the group given. If the endpoint isn't in the
154 hash we add it and create the endpoint struct.
156 The caller must supply the specific route table (we assume it will be pending, but they
157 could live on the edge and update the active one, though that's not at all a good idea).
159 extern endpoint_t* uta_add_ep( route_table_t* rt, rtable_ent_t* rte, char* ep_name, int group ) {
161 rrgroup_t* rrg; // pointer at group to update
163 if( ! rte || ! rt ) {
164 rmr_vlog( RMR_VL_WARN, "uda_add_ep didn't get a valid rt and/or rte pointer\n" );
168 if( rte->nrrgroups <= group || group < 0 ) {
169 rmr_vlog( RMR_VL_WARN, "uda_add_ep group out of range: %d (max == %d)\n", group, rte->nrrgroups );
173 //fprintf( stderr, ">>>> add ep grp=%d to rte @ 0x%p rrg=%p\n", group, rte, rte->rrgroups[group] );
174 if( (rrg = rte->rrgroups[group]) == NULL ) {
175 if( (rrg = (rrgroup_t *) malloc( sizeof( *rrg ) )) == NULL ) {
176 rmr_vlog( RMR_VL_WARN, "rmr_add_ep: malloc failed for round robin group: group=%d\n", group );
179 memset( rrg, 0, sizeof( *rrg ) );
181 if( (rrg->epts = (endpoint_t **) malloc( sizeof( endpoint_t ) * MAX_EP_GROUP )) == NULL ) {
182 rmr_vlog( RMR_VL_WARN, "rmr_add_ep: malloc failed for group endpoint array: group=%d\n", group );
185 memset( rrg->epts, 0, sizeof( endpoint_t ) * MAX_EP_GROUP );
187 rte->rrgroups[group] = rrg;
188 //fprintf( stderr, ">>>> added new rrg grp=%d to rte @ 0x%p rrg=%p\n", group, rte, rte->rrgroups[group] );
190 rrg->ep_idx = 0; // next endpoint to send to
191 rrg->nused = 0; // number populated
192 rrg->nendpts = MAX_EP_GROUP; // number allocated
194 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "rrg added to rte: mtype=%d group=%d\n", rte->mtype, group );
197 ep = rt_ensure_ep( rt, ep_name ); // get the ep and create one if not known
200 if( rrg->nused >= rrg->nendpts ) {
201 // future: reallocate
202 rmr_vlog( RMR_VL_WARN, "endpoint array for mtype/group %d/%d is full!\n", rte->mtype, group );
206 rrg->epts[rrg->nused] = ep;
210 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "endpoint added to mtype/group: %d/%d %s nused=%d\n", rte->mtype, group, ep_name, rrg->nused );
216 Given a name, find the nano socket needed to send to it. Returns the socket via
217 the user pointer passed in and sets the return value to true (1). If the
218 endpoint cannot be found false (0) is returned.
220 static int uta_epsock_byname( uta_ctx_t* ctx, char* ep_name, int* nn_sock, endpoint_t** uepp ) {
221 route_table_t* rt = NULL;
226 if( PARANOID_CHECKS ) {
227 if( ctx == NULL || (rt = ctx->rtable) == NULL || (si_ctx = ctx->si_ctx) == NULL ) {
228 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_byname: parinoia check pop ctx=%p rt=%p\n", ctx, rt );
232 rt = ctx->rtable; // faster but more risky
233 si_ctx = ctx->si_ctx;
236 ep = rmr_sym_get( rt->hash, ep_name, 1 );
237 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_byname: ep not found: %s\n", ep_name );
238 if( uepp != NULL ) { // caller needs endpoint too, give it back
242 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "get ep by name for %s not in hash!\n", ep_name );
243 if( ! ep_name || (ep = rt_ensure_ep( rt, ep_name)) == NULL ) { // create one if not in rt (support rts without entry in our table)
248 if( ! ep->open ) { // not open -- connect now
249 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "get ep by name for %s session not started... starting\n", ep_name );
250 if( ep->addr == NULL ) { // name didn't resolve before, try again
251 ep->addr = strdup( ep->name ); // use the name directly; if not IP then transport will do dns lookup
253 if( uta_link2( ctx, ep ) ) { // find entry in table and create link
256 *nn_sock = ep->nn_sock; // pass socket back to caller
257 fd2ep_add( ctx, ep->nn_sock, ep ); // map fd to this ep for disc cleanup
259 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_bn: connection state: %s %s\n", state ? "[OK]" : "[FAIL]", ep->name );
261 *nn_sock = ep->nn_sock;
269 Make a round robin selection within a round robin group for a route table
270 entry. Returns the nanomsg socket if there is a rte for the message
271 key, and group is defined. Socket is returned via pointer in the parm
274 The group is the group number to select from.
276 The user supplied (via pointer to) integer 'more' will be set if there are
277 additional groups beyond the one selected. This allows the caller to
278 to easily iterate over the group list -- more is set when the group should
279 be incremented and the function invoked again. Groups start at 0.
281 The return value is true (>0) if the socket was found and *nn_sock was updated
282 and false (0) if there is no associated socket for the msg type, group combination.
283 We return the index+1 from the round robin table on success so that we can verify
284 during test that different entries are being seleted; we cannot depend on the nng
285 socket being different as we could with nano.
287 NOTE: The round robin selection index increment might collide with other
288 threads if multiple threads are attempting to send to the same round
289 robin group; the consequences are small and avoid locking. The only side
290 effect is either sending two messages in a row to, or skipping, an endpoint.
291 Both of these, in the grand scheme of things, is minor compared to the
292 overhead of grabbing a lock on each call.
294 static int uta_epsock_rr( uta_ctx_t* ctx, rtable_ent_t* rte, int group, int* more, int* nn_sock, endpoint_t** uepp ) {
296 endpoint_t* ep; // selected end point
297 int state = FALSE; // processing state
302 if( PARANOID_CHECKS ) {
303 if( ctx == NULL || (si_ctx = ctx->si_ctx) == NULL ) {
307 si_ctx = ctx->si_ctx;
310 //fprintf( stderr, ">>>> epsock_rr selecting: grp=%d mtype=%d ngrps=%d\n", group, rte->mtype, rte->nrrgroups );
312 if( ! more ) { // eliminate cheks each time we need to use
316 if( ! nn_sock ) { // user didn't supply a pointer
317 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_rr invalid nnsock pointer\n" );
324 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_rr rte was nil; nothing selected\n" );
329 if( group < 0 || group >= rte->nrrgroups ) {
330 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "group out of range: group=%d max=%d\n", group, rte->nrrgroups );
335 if( (rrg = rte->rrgroups[group]) == NULL ) {
336 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "rrg not found for group %d (ptr rrgroups[g] == nil)\n", group );
337 *more = 0; // groups are inserted contig, so nothing should be after a nil pointer
341 *more = group < rte->nrrgroups-1 ? (rte->rrgroups[group+1] != NULL): 0; // more if something in next group slot
343 switch( rrg->nused ) {
344 case 0: // nothing allocated, just punt
345 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "nothing allocated for the rrg\n" );
348 case 1: // exactly one, no rr to deal with
350 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "_rr returning socket with one choice in group \n" );
354 default: // need to pick one and adjust rr counts
355 idx = rrg->ep_idx++ % rrg->nused; // see note above
356 ep = rrg->epts[idx]; // select next endpoint
357 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "_rr returning socket with multiple choices in group idx=%d \n", rrg->ep_idx );
358 state = idx + 1; // unit test checks to see that we're cycling through, so must not just be TRUE
362 if( uepp != NULL ) { // caller may need refernce to endpoint too; give it if pointer supplied
365 if( state ) { // end point selected, open if not, get socket either way
366 if( ! ep->open ) { // not connected
367 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_rr selected endpoint not yet open; opening %s\n", ep->name );
368 if( ep->addr == NULL ) { // name didn't resolve before, try again
369 ep->addr = strdup( ep->name ); // use the name directly; if not IP then transport will do dns lookup
372 if( uta_link2( ctx, ep ) ) { // find entry in table and create link
374 *nn_sock = ep->nn_sock; // pass socket back to caller
375 fd2ep_add( ctx, ep->nn_sock, ep ); // map fd to ep for disc cleanup
379 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_rr: connection attempted with %s: %s\n", ep->name, state ? "[OK]" : "[FAIL]" );
381 *nn_sock = ep->nn_sock;
385 if( DEBUG > 1 ) rmr_vlog( RMR_VL_DEBUG, "epsock_rr returns state=%d\n", state );
390 Given a message, use the meid field to find the owner endpoint for the meid.
391 The owner ep is then used to extract the socket through which the message
392 is sent. This returns TRUE if we found a socket and it was written to the
393 nn_sock pointer; false if we didn't.
395 We've been told that the meid is a string, thus we count on it being a nil
396 terminated set of bytes.
398 If we return false the caller's ep reference may NOT be valid or even nil.
400 static int epsock_meid( uta_ctx_t* ctx, route_table_t *rtable, rmr_mbuf_t* msg, int* nn_sock, endpoint_t** uepp ) {
401 endpoint_t* ep; // seected end point
402 int state = FALSE; // processing state
406 if( PARANOID_CHECKS ) {
407 if( ctx == NULL || (si_ctx = ctx->si_ctx) == NULL ) {
411 si_ctx = ctx->si_ctx;
415 if( ! nn_sock || msg == NULL || rtable == NULL ) { // missing stuff; bail fast
420 meid = ((uta_mhdr_t *) msg->header)->meid;
422 ep = get_meid_owner( rtable, meid );
423 if( uepp != NULL ) { // caller needs refernce to endpoint too
428 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_meid: no ep in hash for (%s)\n", meid );
433 if( ! ep->open ) { // not connected
434 if( ep->addr == NULL ) { // name didn't resolve before, try again
435 ep->addr = strdup( ep->name ); // use the name directly; if not IP then transport will do dns lookup
438 if( uta_link2( ctx, ep ) ) { // find entry in table and create link
440 *nn_sock = ep->nn_sock; // pass socket back to caller
444 if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "epsock_meid: connection attempted with %s: %s\n", ep->name, state ? "[OK]" : "[FAIL]" );
446 *nn_sock = ep->nn_sock;
453 Finds the rtable entry which matches the key. Returns a nil pointer if
454 no entry is found. If try_alternate is set, then we will attempt
455 to find the entry with a key based only on the message type.
457 static inline rtable_ent_t* uta_get_rte( route_table_t *rt, int sid, int mtype, int try_alt ) {
458 uint64_t key; // key is sub id and mtype banged together
459 rtable_ent_t* rte; // the entry we found
461 if( rt == NULL || rt->hash == NULL ) {
465 key = build_rt_key( sid, mtype ); // first try with a 'full' key
466 if( ((rte = rmr_sym_pull( rt->hash, key )) != NULL) || ! try_alt ) { // found or not allowed to try the alternate, return what we have
470 if( sid != UNSET_SUBID ) { // not found, and allowed to try alternate; and the sub_id was set
471 key = build_rt_key( UNSET_SUBID, mtype ); // rebuild key
472 rte = rmr_sym_pull( rt->hash, key ); // see what we get with this
479 Return a string of count information. E.g.:
480 <ep-name>:<port> <good> <hard-fail> <soft-fail>
482 Caller must free the string allocated if a buffer was not provided.
484 Pointer returned is to a freshly allocated string, or the user buffer
487 If the endpoint passed is a nil pointer, then we return a nil -- caller
490 static inline char* get_ep_counts( endpoint_t* ep, char* ubuf, int ubuf_len ) {
491 char* rs; // result string
501 rs = malloc( sizeof( char ) * ubuf_len );
504 snprintf( rs, ubuf_len, "%s %lld %lld %lld", ep->name, ep->scounts[EPSC_GOOD], ep->scounts[EPSC_FAIL], ep->scounts[EPSC_TRANS] );
510 // ---- fd to ep functions --------------------------------------------------------------------------
513 Create the hash which maps file descriptors to endpoints. We need this
514 to easily mark an endpoint as disconnected when we are notified. Thus we
515 expect these to be driven very seldomly; locking should not be an issue.
516 Locking is needed to prevent problems when the user application is multi-
517 threaded and attempting to (re)connect from concurrent threads.
519 static void fd2ep_init( uta_ctx_t* ctx ) {
521 if( ctx && ! ctx->fd2ep ) {
522 ctx->fd2ep = rmr_sym_alloc( 129 );
524 if( ctx->fd2ep_gate == NULL ) {
525 ctx->fd2ep_gate = (pthread_mutex_t *) malloc( sizeof( *ctx->fd2ep_gate ) );
526 if( ctx->fd2ep_gate != NULL ) {
527 pthread_mutex_init( ctx->fd2ep_gate, NULL );
534 Add an entry into the fd2ep hash to map the FD to the endpoint.
536 static void fd2ep_add( uta_ctx_t* ctx, int fd, endpoint_t* ep ) {
537 if( ctx && ctx->fd2ep ) {
538 pthread_mutex_lock( ctx->fd2ep_gate );
540 rmr_sym_map( ctx->fd2ep, (uint64_t) fd, (void *) ep );
542 pthread_mutex_unlock( ctx->fd2ep_gate );
547 Given a file descriptor this fetches the related endpoint from the hash and
548 deletes the entry from the hash (when we detect a disconnect).
550 This will also set the state on the ep open to false, and revoke the
553 static endpoint_t* fd2ep_del( uta_ctx_t* ctx, int fd ) {
554 endpoint_t* ep = NULL;
556 if( ctx && ctx->fd2ep ) {
557 ep = rmr_sym_pull( ctx->fd2ep, (uint64_t) fd );
559 pthread_mutex_lock( ctx->fd2ep_gate );
561 rmr_sym_ndel( ctx->fd2ep, (uint64_t) fd );
563 pthread_mutex_unlock( ctx->fd2ep_gate );
571 Given a file descriptor fetches the related endpoint from the hash.
572 Returns nil if there is no reference in the hash.
574 static endpoint_t* fd2ep_get( uta_ctx_t* ctx, int fd ) {
575 endpoint_t* ep = NULL;
577 if( ctx && ctx->fd2ep ) {
578 pthread_mutex_lock( ctx->fd2ep_gate );
580 ep = rmr_sym_pull( ctx->fd2ep, (uint64_t) fd );
582 pthread_mutex_unlock( ctx->fd2ep_gate );