-// : vi ts=4 sw=4 noet :
+// vim: ts=4 sw=4 noet :
/*
==================================================================================
Copyright (c) 2019 Nokia
into the message, and sets errno to something that might be useful.
If we don't have a specific RMr state, then we return the default (e.g.
receive failed).
+
+ The addition of the connection shut error code to the switch requires
+ that the NNG version at commit e618abf8f3db2a94269a (or after) be
+ used for compiling RMR.
*/
static inline int xlate_nng_state( int state, int def_state ) {
state = def_state;
break;
+ case NNG_ECONNSHUT: // new error with nng commit e618abf8f3db2a94269a79c8901a51148d48fcc2 (Sept 2019)
case NNG_ECLOSED:
errno = EBADFD; // file des not in a good state for the operation
state = def_state;
reuse. They have their reasons I guess. Thus, we will free
the old transport buffer if user passes the message in; at least
our mbuf will be reused.
+
+ When msg->state is not ok, this function must set tp_state in the message as some API
+ fucntions return the message directly and do not propigate errno into the message.
*/
static rmr_mbuf_t* rcv_msg( uta_ctx_t* ctx, rmr_mbuf_t* old_msg ) {
int state;
msg->state = nng_recvmsg( ctx->nn_sock, (nng_msg **) &msg->tp_buf, NO_FLAGS ); // blocks hard until received
if( (msg->state = xlate_nng_state( msg->state, RMR_ERR_RCVFAILED )) != RMR_OK ) {
+ msg->tp_state = errno;
return msg;
}
+ msg->tp_state = 0;
if( msg->tp_buf == NULL ) { // if state is good this _should_ not be nil, but parninoia says check anyway
msg->state = RMR_ERR_EMPTY;
+ msg->tp_state = 0;
return msg;
}
msg->mtype, msg->state, msg->len, msg->payload - (unsigned char *) msg->header );
} else {
msg->state = RMR_ERR_EMPTY;
+ msg->tp_state = 0;
msg->len = 0;
msg->alloc_len = rsize;
msg->payload = NULL;
Called by rmr_send_msg() and rmr_rts_msg(), etc. and thus we assume that all pointer
validation has been done prior.
+
+ When msg->state is not ok, this function must set tp_state in the message as some API
+ fucntions return the message directly and do not propigate errno into the message.
*/
static rmr_mbuf_t* send_msg( uta_ctx_t* ctx, rmr_mbuf_t* msg, nng_socket nn_sock, int retries ) {
int state;
uta_mhdr_t* hdr;
int nng_flags = NNG_FLAG_NONBLOCK; // if we need to set any nng flags (zc buffer) add it to this
int spin_retries = 1000; // if eagain/timeout we'll spin, at max, this many times before giving up the CPU
- int tr_len; // trace len in sending message so we alloc new message with same trace size
+ int tr_len; // trace len in sending message so we alloc new message with same trace sizes
// future: ensure that application did not overrun the XID buffer; last byte must be 0
strncpy( (char *) ((uta_mhdr_t *)msg->header)->srcip, ctx->my_ip, RMR_MAX_SRC );
}
+ if( retries == 0 ) {
+ spin_retries = 100;
+ retries++;
+ }
+
errno = 0;
msg->state = RMR_OK;
if( msg->flags & MFL_ZEROCOPY ) { // faster sending with zcopy buffer
// future: this should not happen as all buffers we deal with are zc buffers; might make sense to remove the test and else
msg->state = RMR_ERR_SENDFAILED;
errno = ENOTSUP;
+ msg->tp_state = errno;
return msg;
/*
NOT SUPPORTED
message type is used. If the initial lookup, with a subid, fails, then a
second lookup using just the mtype is tried.
+ When msg->state is not OK, this function must set tp_state in the message as
+ some API fucntions return the message directly and do not propigate errno into
+ the message.
+
CAUTION: this is a non-blocking send. If the message cannot be sent, then
it will return with an error and errno set to eagain. If the send is
a limited fanout, then the returned status is the status of the last
*/
static rmr_mbuf_t* mtosend_msg( void* vctx, rmr_mbuf_t* msg, int max_to ) {
+ endpoint_t* ep; // end point that we're attempting to send to
+ rtable_ent_t* rte; // the route table entry which matches the message key
nng_socket nn_sock; // endpoint socket for send
uta_ctx_t* ctx;
int group; // selected group to get socket for
int send_again; // true if the message must be sent again
rmr_mbuf_t* clone_m; // cloned message for an nth send
int sock_ok; // got a valid socket from round robin select
- uint64_t key; // mtype or sub-id/mtype sym table key
- int altk_ok = 0; // set true if we can lookup on alternate key if mt/sid lookup fails
char* d1;
+ int ok_sends = 0; // track number of ok sends
if( (ctx = (uta_ctx_t *) vctx) == NULL || msg == NULL ) { // bad stuff, bail fast
errno = EINVAL; // if msg is null, this is their clue
if( msg != NULL ) {
msg->state = RMR_ERR_BADARG;
errno = EINVAL; // must ensure it's not eagain
+ msg->tp_state = errno;
}
return msg;
}
errno = 0; // clear; nano might set, but ensure it's not left over if it doesn't
if( msg->header == NULL ) {
- fprintf( stderr, "rmr_send_msg: ERROR: message had no header\n" );
+ fprintf( stderr, "rmr_mtosend_msg: ERROR: message had no header\n" );
msg->state = RMR_ERR_NOHDR;
errno = EBADMSG; // must ensure it's not eagain
+ msg->tp_state = errno;
return msg;
}
max_to = ctx->send_retries; // convert to retries
}
+ if( (rte = uta_get_rte( ctx->rtable, msg->sub_id, msg->mtype, TRUE )) == NULL ) { // find the entry which matches subid/type allow fallback to type only key
+ if( ctx->flags & CTXFL_WARN ) {
+ fprintf( stderr, "[WARN] no endpoint for mtype=%d sub_id=%d\n", msg->mtype, msg->sub_id );
+ }
+ msg->state = RMR_ERR_NOENDPT;
+ errno = ENXIO; // must ensure it's not eagain
+ msg->tp_state = errno;
+ return msg; // caller can resend (maybe) or free
+ }
+
send_again = 1; // force loop entry
group = 0; // always start with group 0
-
- key = build_rt_key( msg->sub_id, msg->mtype ); // route table key to find the entry
- if( msg->sub_id != UNSET_SUBID ) {
- altk_ok = 1; // if caller's sub-id doesn't hit with mtype, allow mtype only key for retry
- }
while( send_again ) {
- sock_ok = uta_epsock_rr( ctx->rtable, key, group, &send_again, &nn_sock ); // round robin sel epoint; again set if mult groups
- if( DEBUG ) fprintf( stderr, "[DBUG] send msg: type=%d again=%d group=%d len=%d sock_ok=%d ak_ok=%d\n",
- msg->mtype, send_again, group, msg->len, sock_ok, altk_ok );
-
- if( ! sock_ok ) {
- if( altk_ok ) { // we can try with the alternate (no sub-id) key
- altk_ok = 0;
- key = build_rt_key( UNSET_SUBID, msg->mtype ); // build with just the mtype and try again
- send_again = 1; // ensure we don't exit the while
- continue;
- }
+ sock_ok = uta_epsock_rr( rte, group, &send_again, &nn_sock, &ep ); // select endpt from rr group and set again if more groups
- msg->state = RMR_ERR_NOENDPT;
- errno = ENXIO; // must ensure it's not eagain
- return msg; // caller can resend (maybe) or free
- }
+ if( DEBUG ) fprintf( stderr, "[DBUG] mtosend_msg: flgs=0x%04x type=%d again=%d group=%d len=%d sock_ok=%d\n",
+ msg->flags, msg->mtype, send_again, group, msg->len, sock_ok );
group++;
- if( send_again ) {
- clone_m = clone_msg( msg ); // must make a copy as once we send this message is not available
- if( DEBUG ) fprintf( stderr, "[DBUG] msg cloned: type=%d len=%d\n", msg->mtype, msg->len );
- msg->flags |= MFL_NOALLOC; // send should not allocate a new buffer
- msg = send_msg( ctx, msg, nn_sock, max_to ); // do the hard work, msg should be nil on success
- /*
- if( msg ) {
- // error do we need to count successes/errors, how to report some success, esp if last fails?
+ if( sock_ok ) { // with an rte we _should_ always have a socket, but don't bet on it
+ if( send_again ) {
+ clone_m = clone_msg( msg ); // must make a copy as once we send this message is not available
+ if( clone_m == NULL ) {
+ msg->state = RMR_ERR_SENDFAILED;
+ errno = ENOMEM;
+ msg->tp_state = errno;
+ if( ctx->flags & CTXFL_WARN ) {
+ fprintf( stderr, "[WARN] unable to clone message for multiple rr-group send\n" );
+ }
+ return msg;
+ }
+
+ if( DEBUG ) fprintf( stderr, "[DBUG] msg cloned: type=%d len=%d\n", msg->mtype, msg->len );
+ msg->flags |= MFL_NOALLOC; // keep send from allocating a new message; we have a clone to use
+ msg = send_msg( ctx, msg, nn_sock, max_to ); // do the hard work, msg should be nil on success
+
+ if( msg != NULL ) { // returned message indicates send error of some sort
+ rmr_free_msg( msg ); // must ditchone; pick msg so we don't have to unfiddle flags
+ msg = clone_m;
+ } else {
+ ok_sends++;
+ msg = clone_m; // clone will be the next to send
+ }
+ } else {
+ msg = send_msg( ctx, msg, nn_sock, max_to ); // send the last, and allocate a new buffer; drops the clone if it was
+ if( DEBUG ) {
+ if( msg == NULL ) {
+ fprintf( stderr, "[DBUG] mtosend_msg: send returned nil message!\n" );
+ }
+ }
}
- */
- msg = clone_m; // clone will be the next to send
+ if( ep != NULL && msg != NULL ) {
+ switch( msg->state ) {
+ case RMR_OK:
+ ep->scounts[EPSC_GOOD]++;
+ break;
+
+ case RMR_ERR_RETRY:
+ ep->scounts[EPSC_TRANS]++;
+ break;
+
+ default:
+ ep->scounts[EPSC_FAIL]++;
+ break;
+ }
+ }
} else {
- msg = send_msg( ctx, msg, nn_sock, max_to ); // send the last, and allocate a new buffer; drops the clone if it was
+ if( ctx->flags & CTXFL_WARN ) {
+ fprintf( stderr, "[WARN] invalid socket for rte, setting no endpoint err: mtype=%d sub_id=%d\n", msg->mtype, msg->sub_id );
+ }
+ msg->state = RMR_ERR_NOENDPT;
+ errno = ENXIO;
+ }
+ }
+
+ if( msg ) { // call functions don't get a buffer back, so a nil check is required
+ msg->flags &= ~MFL_NOALLOC; // must return with this flag off
+ if( ok_sends ) { // multiple rr-groups and one was successful; report ok
+ msg->state = RMR_OK;
}
+
+ if( DEBUG ) fprintf( stderr, "[DBUG] final send stats: ok=%d group=%d state=%d\n\n", ok_sends, group, msg->state );
+
+ msg->tp_state = errno;
}
return msg; // last message caries the status of last/only send attempt