X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=src%2Frmr%2Fcommon%2Fsrc%2Frt_generic_static.c;h=9aa58f1b562d2cc46fcfc857cff4c9595b53a9fd;hb=d07cc97b4b5493a5fc67231ee09d1c931c993161;hp=ea7f01a713603c0b725eb17fc1e0a18329d09f23;hpb=fcea3951d44de0cc55d33c5e114487abe79d3406;p=ric-plt%2Flib%2Frmr.git diff --git a/src/rmr/common/src/rt_generic_static.c b/src/rmr/common/src/rt_generic_static.c index ea7f01a..9aa58f1 100644 --- a/src/rmr/common/src/rt_generic_static.c +++ b/src/rmr/common/src/rt_generic_static.c @@ -45,15 +45,19 @@ #include #include #include +#include #include // needed for route manager messages +#define ALL 1 +#define SOME 0 /* Passed to a symtab foreach callback to construct a list of pointers from a current symtab. */ typedef struct thing_list { + int error; // if a realloc failed, this will be set int nalloc; int nused; void** things; @@ -82,6 +86,7 @@ static void ep_stats( void* st, void* entry, char const* name, void* thing, void (*counter)++; } else { rmr_vlog( RMR_VL_DEBUG, "ep_stas: nil counter %p %p %p", st, entry, name ); // dummy refs + return; } rmr_vlog_force( RMR_VL_DEBUG, "rt endpoint: target=%s open=%d\n", ep->name, ep->open ); @@ -179,7 +184,7 @@ static void rt_stats( route_table_t* rt ) { *counter = 0; rmr_vlog_force( RMR_VL_DEBUG, "route table stats:\n" ); rmr_vlog_force( RMR_VL_DEBUG, "route table endpoints:\n" ); - rmr_sym_foreach_class( rt->hash, RT_NAME_SPACE, ep_stats, counter ); // run endpoints (names) in the active table + rmr_sym_foreach_class( rt->ephash, RT_NAME_SPACE, ep_stats, counter ); // run endpoints (names) in the active table rmr_vlog_force( RMR_VL_DEBUG, "rtable: %d known endpoints\n", *counter ); rmr_vlog_force( RMR_VL_DEBUG, "route table entries:\n" ); @@ -205,7 +210,19 @@ static void rt_epcounts( route_table_t* rt, char* id ) { return; } - rmr_sym_foreach_class( rt->hash, 1, ep_counts, id ); // run endpoints in the active table + rmr_sym_foreach_class( rt->ephash, 1, ep_counts, id ); // run endpoints in the active table +} + + +static void dump_tables( uta_ctx_t *ctx ) { + if( ctx->old_rtable != NULL ) { + rmr_vlog_force( RMR_VL_DEBUG, "old route table: (ref_count=%d)\n", ctx->old_rtable->ref_count ); + rt_stats( ctx->old_rtable ); + } else { + rmr_vlog_force( RMR_VL_DEBUG, "old route table was empty\n" ); + } + rmr_vlog_force( RMR_VL_DEBUG, "new route table:\n" ); + rt_stats( ctx->rtable ); } // ------------ route manager communication ------------------------------------------------- @@ -288,7 +305,7 @@ static void send_rt_ack( uta_ctx_t* ctx, rmr_mbuf_t* smsg, char* table_id, int s smsg->len = strlen( smsg->payload ) + 1; - rmr_vlog( RMR_VL_INFO, "rmr_rtc: sending table state: (%s) state=%d whid=%d\n", smsg->payload, state, ctx->rtg_whid ); + rmr_vlog( RMR_VL_INFO, "rmr_rtc: sending table state: (%s) state=%d whid=%d table=%s\n", smsg->payload, state, ctx->rtg_whid, table_id ); if( use_rts ) { smsg = rmr_rts_msg( ctx, smsg ); } else { @@ -306,7 +323,51 @@ static void send_rt_ack( uta_ctx_t* ctx, rmr_mbuf_t* smsg, char* table_id, int s } } -// ------------------------------------------------------------------------------------------------ +// ---- alarm generation -------------------------------------------------------------------------- + +/* + Given the user's context (not the thread private context) look to see if the application isn't + working fast enough and we're dropping messages. If the drop counter has changed since the last + peeked, and we have not raised an alarm, then we will alarm. If the counter hasn't changed, then we + set a timer and if the counter still hasn't changed when it expires we will clear the alarm. + + The private context is what we use to send so as not to interfere with the user flow. +*/ +static void alarm_if_drops( uta_ctx_t* uctx, uta_ctx_t* pctx ) { + static int alarm_raised = 0; + static int ok2clear = 0; // time that we can clear + static int lastd = 0; // the last counter value so we can compute delta + static int prob_id = 0; // problem ID we assume alarm manager handles dups between processes + + rmr_vlog( RMR_VL_DEBUG, "checking for drops... raised=%d 0k2clear=%d lastd=%d probid=%d\n", alarm_raised, ok2clear, lastd, prob_id ); + if( ! alarm_raised ) { + if( uctx->dcount - lastd == 0 ) { // not actively dropping, ok to do nothing + return; + } + + alarm_raised = 1; + uta_alarm( pctx, ALARM_DROPS | ALARM_RAISE, prob_id, "application running slow; RMR is dropping messages" ); + rmr_vlog( RMR_VL_INFO, "drop alarm raised" ); + } else { + if( uctx->dcount - lastd != 0 ) { // still dropping or dropping again; we've alarmed so nothing to do + lastd = uctx->dcount; + ok2clear = 0; // reset the timer + return; + } + + if( ok2clear == 0 ) { // first round where not dropping + ok2clear = time( NULL ) + 60; // we'll clear the alarm in 60s + } else { + if( time( NULL ) > ok2clear ) { // things still stable after expiry + rmr_vlog( RMR_VL_INFO, "drop alarm cleared\n" ); + alarm_raised = 0; + uta_alarm( pctx, ALARM_DROPS | ALARM_CLEAR, prob_id++, "RMR message dropping has stopped" ); + } + } + } +} + +// ---- utility ----------------------------------------------------------------------------------- /* Little diddy to trim whitespace and trailing comments. Like shell, trailing comments must be at the start of a word (i.e. must be immediatly preceeded by whitespace). @@ -380,6 +441,70 @@ static char* ensure_nlterm( char* buf ) { return nb; } +/* + Roll the new table into the active and the active into the old table. We + must have the lock on the active table to do this. It's possible that there + is no active table (first load), so we have to account for that (no locking). +*/ +static void roll_tables( uta_ctx_t* ctx ) { + + if( ctx->new_rtable == NULL || ctx->new_rtable->error ) { + rmr_vlog( RMR_VL_WARN, "new route table NOT rolled in: nil pointer or error indicated\n" ); + ctx->old_rtable = ctx->new_rtable; + ctx->new_rtable = NULL; + return; + } + + if( ctx->rtable != NULL ) { // initially there isn't one, so must check! + pthread_mutex_lock( ctx->rtgate ); // must hold lock to move to active + ctx->old_rtable = ctx->rtable; // currently active becomes old and allowed to 'drain' + ctx->rtable = ctx->new_rtable; // one we've been adding to becomes active + pthread_mutex_unlock( ctx->rtgate ); + } else { + ctx->old_rtable = NULL; // ensure there isn't an old reference + ctx->rtable = ctx->new_rtable; // make new the active one + } + + ctx->new_rtable = NULL; +} + +/* + Given a thing list, extend the array of pointers by 1/2 of the current + number allocated. If we cannot realloc an array, then we set the error + flag. Unlikely, but will prevent a crash, AND will prevent us from + trying to use the table since we couldn't capture everything. +*/ +static void extend_things( thing_list_t* tl ) { + int old_alloc; + void* old_things; + void* old_names; + + if( tl == NULL ) { + return; + } + + old_alloc = tl->nalloc; // capture current things + old_things = tl->things; + old_names = tl->names; + + tl->nalloc += tl->nalloc/2; // new allocation size + + tl->things = (void **) malloc( sizeof( void * ) * tl->nalloc ); // allocate larger arrays + tl->names = (const char **) malloc( sizeof( char * ) * tl->nalloc ); + + if( tl->things == NULL || tl->names == NULL ){ // if either failed, then set error + tl->error = 1; + return; + } + + memcpy( tl->things, old_things, sizeof( void * ) * old_alloc ); + memcpy( tl->names, old_names, sizeof( void * ) * old_alloc ); + + free( old_things ); + free( old_names ); +} + +// ------------ entry update functions --------------------------------------------------------------- /* Given a message type create a route table entry and add to the hash keyed on the message type. Once in the hash, endpoints can be added with uta_add_ep. Size @@ -452,6 +577,8 @@ static void build_entry( uta_ctx_t* ctx, char* ts_field, uint32_t subid, char* r int i; int ngtoks; // number of tokens in the group list int grp; // index into group list + int cgidx; // contiguous group index (prevents the addition of a contiguous group without ep) + int has_ep = FALSE; // indicates if an endpoint was added in a given round robin group ts_field = clip( ts_field ); // ditch extra whitespace and trailing comments rr_field = clip( rr_field ); @@ -471,14 +598,19 @@ static void build_entry( uta_ctx_t* ctx, char* ts_field, uint32_t subid, char* r rte = uta_add_rte( ctx->new_rtable, key, ngtoks ); // get/create entry for this key rte->mtype = atoi( ts_field ); // capture mtype for debugging - for( grp = 0; grp < ngtoks; grp++ ) { - if( (ntoks = uta_rmip_tokenise( gtokens[grp], ctx->ip_list, tokens, 64, ',' )) > 0 ) { // remove any referneces to our ip addrs + for( grp = 0, cgidx = 0; grp < ngtoks; grp++ ) { + if( (ntoks = uta_rmip_tokenise( gtokens[grp], ctx->ip_list, tokens, 64, ',' )) > 0 ) { // remove any references to our ip addrs for( i = 0; i < ntoks; i++ ) { if( strcmp( tokens[i], ctx->my_name ) != 0 ) { // don't add if it is us -- cannot send to ourself if( DEBUG > 1 || (vlevel > 1)) rmr_vlog_force( RMR_VL_DEBUG, "add endpoint ts=%s %s\n", ts_field, tokens[i] ); - uta_add_ep( ctx->new_rtable, rte, tokens[i], grp ); + uta_add_ep( ctx->new_rtable, rte, tokens[i], cgidx ); + has_ep = TRUE; } } + if( has_ep ) { + cgidx++; // only increment to the next contiguous group if the current one has at least one endpoint + has_ep = FALSE; + } } } } @@ -531,6 +663,8 @@ static void trash_entry( uta_ctx_t* ctx, char* ts_field, uint32_t subid, int vle } } +// -------------------------- parse functions -------------------------------------------------- + /* Given the tokens from an mme_ar (meid add/replace) entry, add the entries. the 'owner' which should be the dns name or IP address of an enpoint @@ -617,25 +751,29 @@ static void meid_parser( uta_ctx_t* ctx, uta_ctx_t* pctx, rmr_mbuf_t* mbuf, char if( ctx->new_rtable != NULL ) { // one in progress? this forces it out if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "meid map start: dropping incomplete table\n" ); uta_rt_drop( ctx->new_rtable ); - send_rt_ack( pctx, mbuf, ctx->table_id, !RMR_OK, "table not complete" ); // nack the one that was pending as end never made it + ctx->new_rtable = NULL; + send_rt_ack( pctx, mbuf, ctx->table_id, !RMR_OK, "table not complete" ); // nack the one that was pending as and never made it } if( ctx->table_id != NULL ) { free( ctx->table_id ); } - if( ntoks >2 ) { + if( ntoks > 2 ) { ctx->table_id = strdup( clip( tokens[2] ) ); } else { ctx->table_id = NULL; } - ctx->new_rtable = uta_rt_clone_all( ctx->rtable ); // start with a clone of everything (mtype, endpoint refs and meid) + + ctx->new_rtable = prep_new_rt( ctx, ALL ); // start with a clone of everything (mtype, endpoint refs and meid) ctx->new_rtable->mupdates = 0; + if( DEBUG || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "meid_parse: meid map start found\n" ); } else { if( strcmp( tokens[1], "end" ) == 0 ) { // wrap up the table we were building if( ntoks > 2 ) { // meid_map | end | |??? given if( ctx->new_rtable->mupdates != atoi( tokens[2] ) ) { // count they added didn't match what we received - rmr_vlog( RMR_VL_ERR, "meid_parse: meid map update had wrong number of records: received %d expected %s\n", ctx->new_rtable->mupdates, tokens[2] ); + rmr_vlog( RMR_VL_ERR, "meid_parse: meid map update had wrong number of records: received %d expected %s\n", + ctx->new_rtable->mupdates, tokens[2] ); snprintf( wbuf, sizeof( wbuf ), "missing table records: expected %s got %d\n", tokens[2], ctx->new_rtable->updates ); send_rt_ack( pctx, mbuf, ctx->table_id, !RMR_OK, wbuf ); uta_rt_drop( ctx->new_rtable ); @@ -647,16 +785,17 @@ static void meid_parser( uta_ctx_t* ctx, uta_ctx_t* pctx, rmr_mbuf_t* mbuf, char } if( ctx->new_rtable ) { - uta_rt_drop( ctx->old_rtable ); // time to drop one that was previously replaced - ctx->old_rtable = ctx->rtable; // currently active becomes old and allowed to 'drain' - ctx->rtable = ctx->new_rtable; // one we've been adding to becomes active - ctx->new_rtable = NULL; + roll_tables( ctx ); // roll active to old, and new to active with proper locking if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "end of meid map noticed\n" ); send_rt_ack( pctx, mbuf, ctx->table_id, RMR_OK, NULL ); if( vlevel > 0 ) { - rmr_vlog_force( RMR_VL_DEBUG, "old route table:\n" ); - rt_stats( ctx->old_rtable ); + if( ctx->old_rtable != NULL ) { + rmr_vlog_force( RMR_VL_DEBUG, "old route table: (ref_count=%d)\n", ctx->old_rtable->ref_count ); + rt_stats( ctx->old_rtable ); + } else { + rmr_vlog_force( RMR_VL_DEBUG, "old route table was empty\n" ); + } rmr_vlog_force( RMR_VL_DEBUG, "new route table:\n" ); rt_stats( ctx->rtable ); } @@ -682,15 +821,77 @@ static void meid_parser( uta_ctx_t* ctx, uta_ctx_t* pctx, rmr_mbuf_t* mbuf, char } parse_meid_ar( ctx->new_rtable, tokens[1], tokens[2], vlevel ); ctx->new_rtable->mupdates++; + return; } - if( strcmp( tokens[0], "mme_del" ) == 0 ) { - if( ntoks < 2 ) { - rmr_vlog( RMR_VL_ERR, "meid_parse: mme_del record didn't have enough tokens\n" ); - return; - } + if( strcmp( tokens[0], "mme_del" ) == 0 ) { // ntoks < 2 already validated parse_meid_del( ctx->new_rtable, tokens[1], vlevel ); ctx->new_rtable->mupdates++; + return; + } +} + +/* + This will close the current table snarf file (in *.inc) and open a new one. + The curent one is renamed. The final file name is determined by the setting of + RMR_SNARF_RT, and if not set then the variable RMR_SEED_RT is used and given + an additional extension of .snarf. If neither seed or snarf environment vars are + set then this does nothing. + + If this is called before the tmp snarf file is opened, then this just opens the file. +*/ +static void cycle_snarfed_rt( uta_ctx_t* ctx ) { + static int ok2warn = 0; // some warnings squelched on first call + + char* seed_fname; // the filename from env + char tfname[512]; // temp fname + char wfname[512]; // working buffer for filename + char* snarf_fname = NULL; // prevent overlay of the static table if snarf_rt not given + + if( ctx == NULL ) { + return; + } + + if( (snarf_fname = getenv( ENV_STASH_RT )) == NULL ) { // specific place to stash the rt not given + if( (seed_fname = getenv( ENV_SEED_RT )) != NULL ) { // no seed, we leave in the default file + memset( wfname, 0, sizeof( wfname ) ); + snprintf( wfname, sizeof( wfname ) - 1, "%s.stash", seed_fname ); + snarf_fname = wfname; + } + } + + if( snarf_fname == NULL ) { + return; + } + + memset( tfname, 0, sizeof( tfname ) ); + snprintf( tfname, sizeof( tfname ) -1, "%s.inc", snarf_fname ); // must ensure tmp file is moveable + + if( ctx->snarf_rt_fd >= 0 ) { + char* msg= "### captured from route manager\n"; + write( ctx->snarf_rt_fd, msg, strlen( msg ) ); + if( close( ctx->snarf_rt_fd ) < 0 ) { + rmr_vlog( RMR_VL_WARN, "rmr_rtc: unable to close working rt snarf file: %s\n", strerror( errno ) ); + return; + } + + if( unlink( snarf_fname ) < 0 && ok2warn ) { // first time through this can fail and we ignore it + rmr_vlog( RMR_VL_WARN, "rmr_rtc: unable to unlink old static table: %s: %s\n", snarf_fname, strerror( errno ) ); + } + + if( rename( tfname, snarf_fname ) ) { + rmr_vlog( RMR_VL_WARN, "rmr_rtc: unable to move new route table to seed aname : %s -> %s: %s\n", tfname, snarf_fname, strerror( errno ) ); + } else { + rmr_vlog( RMR_VL_INFO, "latest route table info saved in: %s\n", snarf_fname ); + } + } + ok2warn = 1; + + ctx->snarf_rt_fd = open( tfname, O_WRONLY | O_CREAT | O_TRUNC, 0660 ); + if( ctx->snarf_rt_fd < 0 ) { + rmr_vlog( RMR_VL_WARN, "rmr_rtc: unable to open trt file: %s: %s\n", tfname, strerror( errno ) ); + } else { + if( DEBUG ) rmr_vlog( RMR_VL_DEBUG, "rmr_rtc: rt snarf file opened: %s: %s\n", tfname ); } } @@ -755,6 +956,11 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve return; } + if( ctx && ctx->snarf_rt_fd >= 0 ) { // if snarfing table as it arrives, write this puppy + write( ctx->snarf_rt_fd, buf, strlen( buf ) ); + write( ctx->snarf_rt_fd, "\n", 1 ); + } + while( *buf && isspace( *buf ) ) { // skip leading whitespace buf++; } @@ -787,6 +993,10 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve case 'n': // newrt|{start|end} tokens[1] = clip( tokens[1] ); if( strcmp( tokens[1], "end" ) == 0 ) { // wrap up the table we were building + if( ctx && ctx->snarf_rt_fd >= 0 ) { + cycle_snarfed_rt( ctx ); // make it available and open a new one + } + if( ntoks >2 ) { if( ctx->new_rtable->updates != atoi( tokens[2] ) ) { // count they added didn't match what we received rmr_vlog( RMR_VL_ERR, "rmr_rtc: RT update had wrong number of records: received %d expected %s\n", @@ -800,17 +1010,10 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve } if( ctx->new_rtable ) { - uta_rt_drop( ctx->old_rtable ); // time to drop one that was previously replaced - ctx->old_rtable = ctx->rtable; // currently active becomes old and allowed to 'drain' - ctx->rtable = ctx->new_rtable; // one we've been adding to becomes active - ctx->new_rtable = NULL; - if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog( RMR_VL_DEBUG, "end of route table noticed\n" ); - - if( vlevel > 0 ) { - rmr_vlog_force( RMR_VL_DEBUG, "old route table:\n" ); - rt_stats( ctx->old_rtable ); - rmr_vlog_force( RMR_VL_DEBUG, "new route table:\n" ); - rt_stats( ctx->rtable ); + roll_tables( ctx ); // roll active to old, and new to active with proper locking + if( DEBUG > 1 || (vlevel > 1) ) { + rmr_vlog( RMR_VL_DEBUG, "end of route table noticed\n" ); + dump_tables( ctx ); } send_rt_ack( pctx, mbuf, ctx->table_id, RMR_OK, NULL ); @@ -825,6 +1028,7 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "new table; dropping incomplete table\n" ); uta_rt_drop( ctx->new_rtable ); + ctx->new_rtable = NULL; } if( ctx->table_id != NULL ) { @@ -836,9 +1040,9 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve ctx->table_id = NULL; } - ctx->new_rtable = NULL; - ctx->new_rtable = uta_rt_clone( ctx->rtable ); // create by cloning endpoint and meidtentries from active table + ctx->new_rtable = prep_new_rt( ctx, SOME ); // wait for old table to drain and shift it back to new ctx->new_rtable->updates = 0; // init count of entries received + if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "start of route table noticed\n" ); } break; @@ -880,11 +1084,15 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve if( ctx->new_rtable == NULL ) { // update table not in progress break; } + if( ctx && ctx->snarf_rt_fd >= 0 ) { + cycle_snarfed_rt( ctx ); // make it available and open a new one + } if( ntoks >2 ) { if( ctx->new_rtable->updates != atoi( tokens[2] ) ) { // count they added didn't match what we received rmr_vlog( RMR_VL_ERR, "rmr_rtc: RT update had wrong number of records: received %d expected %s\n", ctx->new_rtable->updates, tokens[2] ); + send_rt_ack( pctx, mbuf, ctx->table_id, !RMR_OK, wbuf ); uta_rt_drop( ctx->new_rtable ); ctx->new_rtable = NULL; break; @@ -892,18 +1100,14 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve } if( ctx->new_rtable ) { - uta_rt_drop( ctx->old_rtable ); // time to drop one that was previously replaced - ctx->old_rtable = ctx->rtable; // currently active becomes old and allowed to 'drain' - ctx->rtable = ctx->new_rtable; // one we've been adding to becomes active - ctx->new_rtable = NULL; - if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "end of rt update noticed\n" ); - - if( vlevel > 0 ) { - rmr_vlog_force( RMR_VL_DEBUG, "old route table:\n" ); - rt_stats( ctx->old_rtable ); - rmr_vlog_force( RMR_VL_DEBUG, "updated route table:\n" ); - rt_stats( ctx->rtable ); + roll_tables( ctx ); // roll active to old, and new to active with proper locking + if( DEBUG > 1 || (vlevel > 1) ) { + rmr_vlog_force( RMR_VL_DEBUG, "end of rt update noticed\n" ); + dump_tables( ctx ); } + + send_rt_ack( pctx, mbuf, ctx->table_id, RMR_OK, NULL ); + ctx->rtable_ready = 1; // route based sends can now happen } else { if( DEBUG > 1 ) rmr_vlog_force( RMR_VL_DEBUG, "end of rt update noticed, but one was not started!\n" ); ctx->new_rtable = NULL; @@ -911,18 +1115,21 @@ static void parse_rt_rec( uta_ctx_t* ctx, uta_ctx_t* pctx, char* buf, int vleve } else { // start a new table. if( ctx->new_rtable != NULL ) { // one in progress? this forces it out if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "new table; dropping incomplete table\n" ); + send_rt_ack( pctx, mbuf, ctx->table_id, !RMR_OK, "table not complete" ); // nack the one that was pending as end never made it uta_rt_drop( ctx->new_rtable ); + ctx->new_rtable = NULL; } - if( ntoks >2 ) { + if( ntoks > 2 ) { if( ctx->table_id != NULL ) { free( ctx->table_id ); } ctx->table_id = strdup( clip( tokens[2] ) ); } - ctx->new_rtable = uta_rt_clone_all( ctx->rtable ); // start with a clone of everything (endpts and entries) - ctx->new_rtable->updates = 0; // init count of updates received + ctx->new_rtable = prep_new_rt( ctx, ALL ); // start with a copy of everything in the live table + ctx->new_rtable->updates = 0; // init count of updates received + if( DEBUG > 1 || (vlevel > 1) ) rmr_vlog_force( RMR_VL_DEBUG, "start of rt update noticed\n" ); } break; @@ -989,7 +1196,7 @@ static void read_static_rt( uta_ctx_t* ctx, int vlevel ) { } /* - Callback driven for each named thing in a symtab. We collect the pointers to those + Callback driven for each thing in a symtab. We collect the pointers to those things for later use (cloning). */ static void collect_things( void* st, void* entry, char const* name, void* thing, void* vthing_list ) { @@ -1004,8 +1211,12 @@ static void collect_things( void* st, void* entry, char const* name, void* thing return; } - tl->names[tl->nused] = name; // the name/key + tl->names[tl->nused] = name; // the name/key (space 0 uses int keys, so name can be nil and that is OK) tl->things[tl->nused++] = thing; // save a reference to the thing + + if( tl->nused >= tl->nalloc ) { + extend_things( tl ); // not enough allocated + } } /* @@ -1043,7 +1254,9 @@ static void del_rte( void* st, void* entry, char const* name, void* thing, void* for( i = 0; i < rte->nrrgroups; i++ ) { if( rte->rrgroups[i] ) { free( rte->rrgroups[i]->epts ); // ditch list of endpoint pointers (end points are reused; don't trash them) + free( rte->rrgroups[i] ); // but must free the rrg itself too } + } free( rte->rrgroups ); @@ -1113,15 +1326,20 @@ static char* uta_fib( char const* fname ) { return buf; } +// --------------------- initialisation/creation --------------------------------------------- /* Create and initialise a route table; Returns a pointer to the table struct. */ -static route_table_t* uta_rt_init( ) { +static route_table_t* uta_rt_init( uta_ctx_t* ctx ) { route_table_t* rt; + if( ctx == NULL ) { + return NULL; + } if( (rt = (route_table_t *) malloc( sizeof( route_table_t ) )) == NULL ) { return NULL; } + memset( rt, 0, sizeof( *rt ) ); if( (rt->hash = rmr_sym_alloc( RT_SIZE )) == NULL ) { @@ -1129,6 +1347,10 @@ static route_table_t* uta_rt_init( ) { return NULL; } + rt->gate = ctx->rtgate; // single mutex needed for all route tables + rt->ephash = ctx->ephash; // all route tables share a common endpoint hash + pthread_mutex_init( rt->gate, NULL ); + return rt; } @@ -1138,7 +1360,7 @@ static route_table_t* uta_rt_init( ) { Space is the space in the old table to copy. Space 0 uses an integer key and references rte structs. All other spaces use a string key and reference endpoints. */ -static route_table_t* rt_clone_space( route_table_t* srt, route_table_t* nrt, int space ) { +static route_table_t* rt_clone_space( uta_ctx_t* ctx, route_table_t* srt, route_table_t* nrt, int space ) { endpoint_t* ep; // an endpoint (ignore sonar complaint about const*) rtable_ent_t* rte; // a route table entry (ignore sonar complaint about const*) void* sst; // source symtab @@ -1147,9 +1369,12 @@ static route_table_t* rt_clone_space( route_table_t* srt, route_table_t* nrt, in int i; int free_on_err = 0; + if( ctx == NULL ) { + return NULL; + } if( nrt == NULL ) { // make a new table if needed free_on_err = 1; - nrt = uta_rt_init(); + nrt = uta_rt_init( ctx ); if( nrt == NULL ) { return NULL; } @@ -1161,24 +1386,47 @@ static route_table_t* rt_clone_space( route_table_t* srt, route_table_t* nrt, in things.nalloc = 2048; things.nused = 0; + things.error = 0; things.things = (void **) malloc( sizeof( void * ) * things.nalloc ); - memset( things.things, 0, sizeof( sizeof( void * ) * things.nalloc ) ); things.names = (const char **) malloc( sizeof( char * ) * things.nalloc ); - memset( things.names, 0, sizeof( char * ) * things.nalloc ); - if( things.things == NULL ) { + if( things.things == NULL || things.names == NULL ){ + if( things.things != NULL ) { + free( things.things ); + } + if( things.names != NULL ) { + free( things.names ); + } + if( free_on_err ) { rmr_sym_free( nrt->hash ); free( nrt ); nrt = NULL; + } else { + nrt->error = 1; } return nrt; } + memset( things.things, 0, sizeof( sizeof( void * ) * things.nalloc ) ); + memset( things.names, 0, sizeof( char * ) * things.nalloc ); sst = srt->hash; // convenience pointers (src symtab) nst = nrt->hash; rmr_sym_foreach_class( sst, space, collect_things, &things ); // collect things from this space + if( things.error ) { // something happened and capture failed + rmr_vlog( RMR_VL_ERR, "unable to clone route table: unable to capture old contents\n" ); + free( things.things ); + free( things.names ); + if( free_on_err ) { + rmr_sym_free( nrt->hash ); + free( nrt ); + nrt = NULL; + } else { + nrt->error = 1; + } + return nrt; + } if( DEBUG ) rmr_vlog_force( RMR_VL_DEBUG, "clone space cloned %d things in space %d\n", things.nused, space ); for( i = 0; i < things.nused; i++ ) { @@ -1198,70 +1446,103 @@ static route_table_t* rt_clone_space( route_table_t* srt, route_table_t* nrt, in } /* - Creates a new route table and then clones the parts of the table which we must keep with each newrt|start. - The endpoint and meid entries in the hash must be preserved. - - NOTE: The first call to rt_clone_space() will create the new table and subsequent - calls operate on the new table. The return of subsequent calls can be safely - ignored. There are some code analysers which will claim that there are memory - leaks here; not true as they aren't understanding the logic, just looking at - an ignored return value and assuming it's different than what was passed in. + Given a destination route table (drt), clone from the source (srt) into it. + If drt is nil, alloc a new one. If srt is nil, then nothing is done (except to + allocate the drt if that was nil too). If all is true (1), then we will clone both + the MT and the ME spaces; otherwise only the ME space is cloned. */ -static route_table_t* uta_rt_clone( route_table_t* srt ) { +static route_table_t* uta_rt_clone( uta_ctx_t* ctx, route_table_t* srt, route_table_t* drt, int all ) { endpoint_t* ep; // an endpoint rtable_ent_t* rte; // a route table entry - route_table_t* nrt = NULL; // new route table int i; + if( ctx == NULL ) { + return NULL; + } + if( drt == NULL ) { + drt = uta_rt_init( ctx ); + } if( srt == NULL ) { - return uta_rt_init(); // no source to clone, just return an empty table + return drt; } - nrt = rt_clone_space( srt, NULL, RT_NAME_SPACE ); // allocate a new one, add endpoint refs - rt_clone_space( srt, nrt, RT_ME_SPACE ); // add meid refs to new + drt->ephash = ctx->ephash; // all rts reference the same EP symtab + rt_clone_space( ctx, srt, drt, RT_ME_SPACE ); + if( all ) { + rt_clone_space( ctx, srt, drt, RT_MT_SPACE ); + } - return nrt; + return drt; } /* - Creates a new route table and then clones _all_ of the given route table (references - both endpoints AND the route table entries. Needed to support a partial update where - some route table entries will not be deleted if not explicitly in the update and when - we are adding/replacing meid references. + Prepares the "new" route table for populating. If the old_rtable is not nil, then + we wait for it's use count to reach 0. Then the table is cleared, and moved on the + context to be referenced by the new pointer; the old pointer is set to nil. - NOTE see note in uta_rt_clone() as it applies here too. + If the old table doesn't exist, then a new table is created and the new pointer is + set to reference it. + + The ME namespace references endpoints which do not need to be released, therefore we + do not need to run that portion of the table to deref like we do for the RTEs. */ -static route_table_t* uta_rt_clone_all( route_table_t* srt ) { - endpoint_t const* ep; // an endpoint - rtable_ent_t const* rte; // a route table entry - route_table_t* nrt = NULL; // new route table - int i; +static route_table_t* prep_new_rt( uta_ctx_t* ctx, int all ) { + int counter = 0; + route_table_t* rt; - if( srt == NULL ) { - return uta_rt_init(); // no source to clone, just return an empty table + if( ctx == NULL ) { + return NULL; } - nrt = rt_clone_space( srt, NULL, RT_MT_SPACE ); // create new, clone all spaces to it - rt_clone_space( srt, nrt, RT_NAME_SPACE ); - rt_clone_space( srt, nrt, RT_ME_SPACE ); + if( (rt = ctx->old_rtable) != NULL ) { + ctx->old_rtable = NULL; + while( rt->ref_count > 0 ) { // wait for all who are using to stop + if( counter++ > 1000 ) { + rmr_vlog( RMR_VL_WARN, "rt_prep_newrt: internal mishap, ref count on table seems wedged" ); + break; + } - return nrt; + usleep( 1000 ); // small sleep to yield the processer if that is needed + } + + if( rt->hash != NULL ) { + rmr_sym_foreach_class( rt->hash, 0, del_rte, NULL ); // deref and drop if needed + rmr_sym_clear( rt->hash ); // clear all entries from the old table + } + + rt->error = 0; // table with errors can be here, so endure clear before attempt to load + } else { + rt = NULL; + } + + rt = uta_rt_clone( ctx, ctx->rtable, rt, all ); // also sets the ephash pointer + if( rt != NULL ) { // very small chance for nil, but not zero, so test + rt->ref_count = 0; // take no chances; ensure it's 0! + } else { + rmr_vlog( RMR_VL_ERR, "route table clone returned nil; marking dummy table as error\n" ); + rt = uta_rt_init( ctx ); // must hav something, but mark it in error state + rt->error = 1; + } + + return rt; } + /* Given a name, find the endpoint struct in the provided route table. */ static endpoint_t* uta_get_ep( route_table_t* rt, char const* ep_name ) { - if( rt == NULL || rt->hash == NULL || ep_name == NULL || *ep_name == 0 ) { + if( rt == NULL || rt->ephash == NULL || ep_name == NULL || *ep_name == 0 ) { return NULL; } - return rmr_sym_get( rt->hash, ep_name, 1 ); + return rmr_sym_get( rt->ephash, ep_name, 1 ); } /* Drop the given route table. Purge all type 0 entries, then drop the symtab itself. + Does NOT destroy the gate as it's a common gate for ALL route tables. */ static void uta_rt_drop( route_table_t* rt ) { if( rt == NULL ) { @@ -1301,7 +1582,7 @@ static endpoint_t* rt_ensure_ep( route_table_t* rt, char const* ep_name ) { pthread_mutex_init( &ep->gate, NULL ); // init with default attrs memset( &ep->scounts[0], 0, sizeof( ep->scounts ) ); - rmr_sym_put( rt->hash, ep_name, 1, ep ); + rmr_sym_put( rt->ephash, ep_name, 1, ep ); } return ep; @@ -1338,4 +1619,50 @@ static inline endpoint_t* get_meid_owner( route_table_t *rt, char const* meid ) return (endpoint_t *) rmr_sym_get( rt->hash, meid, RT_ME_SPACE ); } +/* + This returns a pointer to the currently active route table and ups + the reference count so that the route table is not freed while it + is being used. The caller MUST call release_rt() when finished + with the pointer. + + Care must be taken: the ctx->rtable pointer _could_ change during the time + between the release of the lock and the return. Therefore we MUST grab + the current pointer when we have the lock so that if it does we don't + return a pointer to the wrong table. + + This will return NULL if there is no active table. +*/ +static inline route_table_t* get_rt( uta_ctx_t* ctx ) { + route_table_t* rrt; // return value + + if( ctx == NULL || ctx->rtable == NULL ) { + return NULL; + } + + pthread_mutex_lock( ctx->rtgate ); // must hold lock to bump use + rrt = ctx->rtable; // must stash the pointer while we hold lock + rrt->ref_count++; + pthread_mutex_unlock( ctx->rtgate ); + + return rrt; // pointer we upped the count with +} + +/* + This will "release" the route table by reducing the use counter + in the table. The table may not be freed until the counter reaches + 0, so it's imparative that the pointer be "released" when it is + fetched by get_rt(). Once the caller has released the table it + may not safely use the pointer that it had. +*/ +static inline void release_rt( uta_ctx_t* ctx, route_table_t* rt ) { + if( ctx == NULL || rt == NULL ) { + return; + } + + pthread_mutex_lock( ctx->rtgate ); // must hold lock + if( rt->ref_count > 0 ) { // something smells if it's already 0, don't do antyhing if it is + rt->ref_count--; + } + pthread_mutex_unlock( ctx->rtgate ); +} #endif