all:
unit_test.ksh
+# a generic rule driven from the unit_test script to run all stand alone
+# vetters after the build
+vet: logging_vet
+
+logging_vet:: logging_test always
+ bash vet_logging.sh
+
+# Make required hack to always force something to build
+always ::
+
# remove intermediates
clean:
setenv( "RMR_HR_LOG", "1", 1 ); // drive for coverage in init
setenv( "RMR_LOG_VLEVEL", "90", 1 ); // force test for out of range during init
- rmr_vlog( RMR_VL_CRIT, "debug message should not be written\n" ); // force coverage with init call
+ rmr_vlog( RMR_VL_CRIT, "crit message should be written\n" ); // force coverage; should drive init
llevel = rmr_vlog_init( );
errors += fail_if_equal( llevel, 99, "llevel was not reset by vlog init" );
errors += fail_if_equal( llevel, -10, "vlog init did not catch out of range (neg) vlog" );
rmr_set_vlevel( 2 );
-
+
+ /*
+ The remainder of these tests can be validated only by looking at the stderr
+ for the process. If any "should not be written" messages appear, then the
+ test should be marked as a failure. In a similar vein, the number of
+ expected "should be written" messages should be found.
+ */
rmr_vlog( RMR_VL_DEBUG, "debug message should not be written\n" );
rmr_vlog( RMR_VL_INFO, "info message should not be written\n" );
rmr_vlog( RMR_VL_WARN, "warn message should not be written\n" );
rmr_vlog( RMR_VL_ERR, "error message should be written\n" );
rmr_vlog( RMR_VL_CRIT, "crit message should be written\n" );
-
+
rmr_set_vlevel( 5 );
rmr_vlog( RMR_VL_DEBUG, "debug message should be written\n" );
rmr_vlog( RMR_VL_INFO, "info message should be written\n" );
rmr_vlog( RMR_VL_WARN, "warn message should be written\n" );
rmr_vlog( RMR_VL_ERR, "error message should be written\n" );
rmr_vlog( RMR_VL_CRIT, "crit message should be written\n" );
-
+
rmr_set_vlevel( 0 );
rmr_vlog( RMR_VL_DEBUG, "debug message should not be written\n" );
rmr_vlog( RMR_VL_INFO, "info message should not be written\n" );
rmr_vlog_force( RMR_VL_ERR, "error forced message should be written\n" );
rmr_vlog_force( RMR_VL_CRIT, "crit forced message should be written\n" );
+ // CAUTION -- this needs to be manually updated when 'should be' messages are added!!
+ fprintf( stderr, "<INFO> expeted 'should be' messages count is 13\n" );
rmr_vlog( -1, "out of range message might be written\n" ); // drive range checks
rmr_vlog( 10, "out of range message should not be written\n" );
- rmr_vlog_force( -1, "out of range message might be written\n" ); // drive range checks
- rmr_vlog_force( 10, "out of range message should not be written\n" );
+ rmr_vlog_force( -1, "out of range message might be written\n" ); // drive range checks
+ rmr_vlog_force( 10, "out of range message should be written (as debug)\n" ); // force with high log level should set to debug
+ test_summary( errors, "logging tests" );
return errors > 0;
}
errors += mbuf_api_test( );
+ test_summary( errors, "mbuf API tests" );
if( errors ) {
fprintf( stderr, "<FAIL> mbuf_api tests failed\n" );
} else {
#include <pthread.h>
#include <semaphore.h>
-#include "rmr.h"
-#include "rmr_agnostic.h"
-
-
/*
Conduct a series of interleaved tests inserting i-factor
values before beginning to pull values (i-factor must be
int errors = 0;
r = uta_mk_ring( 0 ); // should return nil
- if( r != NULL ) {
- fprintf( stderr, "<FAIL> attempt to make a ring with size 0 returned a pointer\n" );
- return 1;
- }
+ errors += fail_not_nil( r, "attempt to make a ring with size 0 returned a pointer" );
+
r = uta_mk_ring( -1 ); // should also return nil
- if( r != NULL ) {
- fprintf( stderr, "<FAIL> attempt to make a ring with size <0 returned a pointer\n" );
- return 1;
- }
+ errors += fail_not_nil( r, "attempt to make a ring with negative size returned a pointer" );
r = uta_mk_ring( 18 );
- if( r == NULL ) {
- fprintf( stderr, "<FAIL> unable to make ring with 17 entries\n" );
- return 1;
- }
+ errors += fail_if_nil( r, "attempt to make a ring with valid size returned a nil pointer" );
pfd = uta_ring_getpfd( r ); // get pollable file descriptor
- if( pfd < 0 ) {
- fprintf( stderr, "<FAIL> expected a pollable file descriptor >= 0, but got: %d\n", pfd );
- errors++;
- }
+ errors += fail_if_true( pfd < 0, "pollable file descriptor returned was bad" );
pfd = uta_ring_config( r, 0x03 ); // turn on locking for reads and writes
- if( pfd != 1 ) {
- fprintf( stderr, "<FAIL> config attempt to enable locking failed\n" );
- errors++;
- }
-
+ errors += fail_if_true( pfd != 1, "attempt to enable locking failed" );
for( i = 0; i < 20; i++ ) { // test to ensure it reports full when head/tail start at 0
data[i] = i;
}
}
- if( i > size ) {
- fprintf( stderr, "<FAIL> didn not report table full: i=%d\n", i );
- return 1;
- }
-
- fprintf( stderr, "<OK> reported table full at i=%d as expected\n", i );
-
+ errors += fail_if_true( i > size, "ring insert did not report full table" );
for( i = 0; i < size + 3; i++ ) { // ensure they all come back in order, and we don't get 'extras'
if( (dp = uta_ring_extract( r )) == NULL ) {
- if( i < size-1 ) {
- fprintf( stderr, "<FAIL> nil pointer at i=%d\n", i );
- return 1;
- } else {
- break;
- }
+ errors += fail_if_true( i < size-1, "nil pointer on extract from full table" );
+ break;
}
- if( *dp != i ) {
+ if( fail_if_true( *dp != i, "extracted data is incorrect; see details below" )) {
fprintf( stderr, "<FAIL> data at i=% isnt right; expected %d got %d\n", i, i, *dp );
+ errors++;
}
}
- if( i > size ) {
- fprintf( stderr, "<FAIL> got too many values on extract: %d\n", i );
- return 1;
- }
- fprintf( stderr, "<OK> extracted values were sane, got: %d\n", i-1 );
+ fail_if_true( i > size, "got too many values from extract loop" );
uta_ring_free( NULL ); // ensure this doesn't blow up
uta_ring_free( r );
for( i = 2; i < 15; i++ ) {
r = uta_mk_ring( 16 );
- if( ie_test( r, i, 101 ) != 0 ) { // modest number of inserts
- fprintf( stderr, "<FAIL> ie test for 101 inserts didn't return 0\n" );
- return 1;
- }
+ errors += fail_not_equal( ie_test( r, i, 101 ), 0, "ie test for 101 inserts didn't return 0" );
uta_ring_free( r );
}
- fprintf( stderr, "<OK> all modest insert/exctract tests pass\n" );
size = 5;
for( j = 0; j < 20; j++ ) {
for( i = 2; i < size - 2; i++ ) {
r = uta_mk_ring( size );
- if( ie_test( r, i, 66000 ) != 0 ) { // should force the 16bit head/tail indexes to roll over
- fprintf( stderr, "<FAIL> ie test for 66K inserts didn't return 0\n" );
- return 1;
- }
+ errors += fail_not_equal( ie_test( r, i, 66000 ), 0, "ie test for 66K inserts didn't return 0" );
uta_ring_free( r );
}
- fprintf( stderr, "<OK> all large insert/exctract tests pass ring size=%d\n", size );
size++;
}
- fprintf( stderr, "<INFO> all ring tests pass\n" );
return errors;
}
#include <stdint.h>
#include <pthread.h>
#include <semaphore.h>
+#include <netdb.h>
#include "rmr.h"
#include "rmr_agnostic.h"
errors += ring_test( );
+ test_summary( errors, "ring tests" );
if( errors ) {
fprintf( stderr, "<FAIL> ring tests failed\n" );
} else {
fprintf( stderr, "<OK> ring tests pass\n" );
}
- return errors;
+ return !! errors;
}
errors += rt_test(); // route table things specific to nano
fprintf( stderr, "<INFO> error count: %d\n", errors );
+ test_summary( errors, "nanomsg API tests" );
if( errors == 0 ) {
fprintf( stderr, "<PASS> all tests were OK\n" );
} else {
errors += mbuf_api_test( );
fprintf( stderr, "<INFO> error count: %d\n", errors );
+ test_summary( errors, "tool tests" );
if( errors == 0 ) {
fprintf( stderr, "<PASS> all tests were OK\n\n" );
} else {
errors += rmr_rcv_test();
fprintf( stderr, "<INFO> error count: %d\n", errors );
+ test_summary( errors, "receive tests" );
if( errors == 0 ) {
fprintf( stderr, "<PASS> all tests were OK\n\n" );
} else {
fprintf( stderr, "<INFO> error count: %d\n", errors );
*/
+ test_summary( errors, "rmr_si tests" );
if( errors == 0 ) {
- fprintf( stderr, "<PASS> all tests were OK\n\n" );
+ fprintf( stderr, "<PASS> all rmr si tests were OK\n\n" );
} else {
- fprintf( stderr, "<FAIL> %d modules reported errors\n\n", errors );
+ fprintf( stderr, "<FAIL> %d rmr si test modules reported errors\n\n", errors );
}
return !!errors;
errors += fail_if_true( state >= 0, "forced dup connect did not return error" );
tpem_set_addr_dup_state( 0 ); // back to normal
- tpem_set_conn_state( 1 );
+ tpem_set_conn_state( -1 );
state = SIconnect( si_ctx, "localhost:4567" ); // driver regular connect
errors += fail_if_true( state >= 0, "connect to low port successful when failure expected" );
+ tpem_set_conn_state( 3 );
tpem_set_sock_state( 1 ); // make scoket calls fail
state = SIconnect( si_ctx, "localhost:4567" ); // driver regular connect
errors += cleanup();
- fprintf( stderr, "<INFO> si95 tests finished (%d)\n", errors );
+ test_summary( errors, "SI95 tests" );
if( errors == 0 ) {
fprintf( stderr, "<PASS> all tests were OK\n\n" );
} else {
-static void fetch( void* st, char* key, int class, int expected ) {
+static int fetch( void* st, char* key, int class, int expected ) {
char* val;
+ int error = 0;
val = rmr_sym_get( st, key, class );
if( val ) {
fprintf( stderr, "[%s] get returns key=%s val=%s\n", !expected ? "FAIL" : "OK", key, val );
if( !expected ) {
state = BAD;
+ error = 1;
}
} else {
fprintf( stderr, "[%s] string key fetch return nil\n", expected ? "FAIL" : "OK" );
if( expected ) {
state = BAD;
+ error = 1;
}
}
+
+ return error;
}
-static void nfetch( void* st, int key, int expected ) {
+static int nfetch( void* st, int key, int expected ) {
char* val;
+ int error = 0;
val = rmr_sym_pull( st, key );
if( val ) {
fprintf( stderr, "[%s] get returns key=%d val=%s\n", !expected ? "FAIL" : "OK", key, val );
if( !expected ) {
state = BAD;
+ error = 1;
}
} else {
fprintf( stderr, "[%s] get return nil for key=%d\n", expected ? "FAIL" : "OK", key );
if( expected ) {
state = BAD;
+ error = 1;
}
}
+
+ return error;
}
int class = 1;
int s;
void* p;
+ int errors = 0;
st = rmr_sym_alloc( 10 ); // alloc with small value to force adjustment inside
- fail_if_nil( st, "symtab pointer" );
+ errors += fail_if_nil( st, "symtab pointer" );
s = rmr_sym_put( st, foo, class, bar ); // add entry with string key; returns 1 if it was inserted
- fail_if_false( s, "insert foo existed" );
+ errors += fail_if_false( s, "insert foo existed" );
s = rmr_sym_put( st, foo, class+1, bar ); // add to table with a different class
- fail_if_false( s, "insert foo existed" );
+ errors += fail_if_false( s, "insert foo existed" );
s = rmr_sym_put( st, foo, class, bar ); // inserted above, should return not inserted (0)
- fail_if_true( s, "insert foo existed" );
+ errors += fail_if_true( s, "insert foo existed" );
- fetch( st, foo, class, 1 );
- fetch( st, goo, class, 0 ); // fetch non existant
+ errors += fetch( st, foo, class, 1 );
+ errors += fetch( st, goo, class, 0 ); // fetch non existant
rmr_sym_stats( st, 4 ); // early stats at verbose level 4 so chatter is minimised
rmr_sym_dump( st );
- for( i = 2000; i < 3000; i++ ) { // bunch of dummy things to force chains in the table
+ for( i = 2000; i < 3000; i++ ) { // bunch of dummy things to force chains in the table
rmr_sym_map( st, i, foo ); // add entry with unsigned integer key
}
rmr_sym_stats( st, 0 ); // just the small facts to verify the 1000 we stuffed in
rmr_sym_ndel( st, 12001 ); // delete numeric key not there
s = rmr_sym_map( st, 1234, foo ); // add known entries with unsigned integer key
- fail_if_false( s, "numeric add of key 1234 should not have existed" );
+ errors += fail_if_false( s, "numeric add of key 1234 should not have existed" );
s = rmr_sym_map( st, 2345, bar );
fail_if_true( s, "numeric add of key 2345 should have existed" );
counter = 0;
rmr_sym_foreach_class( st, 0, each_counter, NULL );
- fail_if_false( counter, "expected counter after foreach to be non-zero" );
-
- nfetch( st, 1234, 1 );
- nfetch( st, 2345, 1 );
+ errors += fail_if_false( counter, "expected counter after foreach to be non-zero" );
+ errors += nfetch( st, 1234, 1 );
+ errors += nfetch( st, 2345, 1 );
rmr_sym_del( st, foo, 0 );
rmr_sym_free( NULL ); // ensure it doesn't barf when given a nil pointer
rmr_sym_free( st );
- return state;
+ test_summary( errors, "symtab tests" );
+ if( state + errors == 0 ) {
+ fprintf( stderr, "<PASS> all symtab tests were OK\n\n" );
+ } else {
+ fprintf( stderr, "<FAIL> %d errors in symtab code\n\n", errors );
+ }
+
+ return !!(state + errors);
}
#define GOOD 0
#endif
+// ----------- a couple of globals make it easier ---------------------------------------
+static int ts_tests_driven = 0; // number of fail_if calls made == numer of tests driven
+
+// ---------------------------------------------------------------------------------------
+
+/*
+ Support test counting, reset and summary.
+*/
+static int test_get_attempted() {
+ return ts_tests_driven;
+}
+
+static void test_reset_attempted() {
+ ts_tests_driven = 0;
+}
+
+static void test_summary( int ecount, char* tag ) {
+ fprintf( stderr, "<SUMMARY> %s completed; %d total tests, %d passed, %d failed\n",
+ tag, ts_tests_driven, ts_tests_driven - ecount, ecount );
+}
+
/*
Snag the optional positional parameter at pp, return defval if not there.
*/
}
+/*
+ Assert like logic except these just record the test and return state so that we
+ can attempt all tests and not abort on the first failure as an assert would do.
+*/
static int fail_if_nil( void* p, char* what ) {
+ ts_tests_driven++;
+
if( !p ) {
fprintf( stderr, "<FAIL> %s: pointer was nil\n", what );
}
}
static int fail_not_nil( void* p, char* what ) {
+ ts_tests_driven++;
+
if( p ) {
fprintf( stderr, "<FAIL> %s: pointer was not nil\n", what );
}
}
static int fail_if_false( int bv, char* what ) {
+ ts_tests_driven++;
+
if( !bv ) {
fprintf( stderr, "<FAIL> %s: expected true, boolean test was false (%d)\n", what, bv );
}
}
static int fail_if_true( int bv, char* what ) {
+ ts_tests_driven++;
+
if( bv ) {
fprintf( stderr, "<FAIL> %s: expected false, boolean test was true (%d)\n", what, bv );
}
Same as fail_if_true(), but reads easier in the test code.
*/
static int fail_if( int bv, char* what ) {
+ ts_tests_driven++;
+
if( bv ) {
fprintf( stderr, "<FAIL> %s: expected false, boolean test was true (%d)\n", what, bv );
}
static int fail_not_equal( int a, int b, char* what ) {
+ ts_tests_driven++;
+
if( a != b ) {
fprintf( stderr, "<FAIL> %s: values were not equal a=%d b=%d\n", what, a, b );
}
}
static int fail_if_equal( int a, int b, char* what ) {
+ ts_tests_driven++;
+
fprintf( stderr, "<TESTING> %s %d\n", what, a==b );
if( a == b ) {
fprintf( stderr, "<FAIL> %s values were equal a=%d b=%d\n", what, a, b );
}
static int fail_not_equalp( void* a, void* b, char* what ) {
+ ts_tests_driven++;
+
if( a != b ) {
fprintf( stderr, "<FAIL> %s: pointers were not equal a=%p b=%p\n", what, a, b );
}
}
static int fail_if_equalp( void* a, void* b, char* what ) {
+ ts_tests_driven++;
+
if( a == b ) {
fprintf( stderr, "<FAIL> %s pointers were equal a=%p b=%p\n", what, a, b );
}
/*
Build a message and populate both the msg buffer and the tranport header
- with mid, sid, and payload len. Tr_len causes that much space in the
+ with mid, sid, and payload len. Tr_len causes that much space in the
header for trace info to be reserved.
*/
static rmr_mbuf_t* mk_populated_msg( int alloc_len, int tr_len, int mtype, int sid, int plen ) {
}
+// end no dummy rmr
#endif
#endif
int tpem_addr_dup = 0; // getsockname duplicates last addr if true
int tpem_conn_state = 0; // states returned by emulated functions allowing failures to be driven
-int tpem_sock_state = 0;
+int tpem_sock_state = 0; // if sock state 0, then socket call returns good fd
int tpem_listen_state = 0;
int tpem_bind_state = 0;
int tpem_accept_fd = 5; // file desc returned by accept
if( tpem_sock_state == 0 ) {
if( ++fd > 10 ) {
- fd = 1;
+ fd = 3; // ensure we don't stomp on std* descriptors
}
return fd;
#include "tools_static_test.c"
int main( ) {
+ int errors = 0;
+
fprintf( stderr, ">>>> starting tools_test\n" );
- return tools_test() > 0;
+ errors += tools_test() > 0;
+
+ test_summary( errors, "tool tests" );
+ if( errors == 0 ) {
+ fprintf( stderr, "<PASS> all tool tests were OK\n\n" );
+ } else {
+ fprintf( stderr, "<FAIL> %d errors in tool code\n\n", errors );
+ }
+
+ return !!errors;
}
echo " -c allows user to set the target coverage for a module to pass; default is 80"
echo " -f forces a discount check (normally done only if coverage < target)"
echo " -F show only failures at the function level"
+ echo " -Q turns off quiet mode. Quiet mode (default) is less chatty about intermediate"
+ echo " coverage results and test programme output when failures detected"
+ echo " -q enable quiet mode (default, so this is no longer needed)"
echo " -s strict mode; code coverage must also pass to result in a good exit code"
echo " -v will write additional information to the tty and save the disccounted file if discount run or -f given"
echo " -x generates the coverage XML files for Sonar (implies -f)"
show_all=1 # show all things -F sets to show failures only
strict=0 # -s (strict) will set; when off, coverage state ignored in final pass/fail
show_output=0 # show output from each test execution (-S)
-quiet=0
+quiet=1 # less chatty with result output (only overall coverage, no intermediate coverage) -Q turns off
gen_xml=0
replace_flags=1 # replace ##### in gcov for discounted lines
run_nano_tests=0 # can nolonger be turned on
-S) show_output=1;; # test output shown even on success
-v) (( verbose++ ));;
-q) quiet=1;; # less chatty when spilling error log files
+ -Q) quiet=0;; # disable quiet mode
-x) gen_xml=1
force_discounting=1
trigger_discount_str="WARN|FAIL|PASS" # check all outcomes for each module
continue
fi
- echo "<INFO> add test: $tfile" >&2
+ if (( ! quiet ))
+ then
+ echo "<INFO> add test: $tfile" >&2
+ fi
flist="${flist}$tfile "
fi
done
printf "\n============= test programme output =======================\n"
cat /tmp/PID$$.log
printf "===========================================================\n"
+ else
+ grep "SUMMARY" /tmp/PID$$.log
fi
fi
if (( rc || force_discounting )) # didn't pass, or forcing, see if discounting helps
then
- if (( ! verbose ))
+ if (( ! verbose )) # if verbose is on we don't need this (! is not a mistake!!)
then
echo "[INFO] checking to see if discounting improves coverage for failures listed above"
fi
if (( ! quiet ))
then
cat /tmp/PID$$.noise
+ else
+ grep "SUMMARY" /tmp/PID$$.noise
fi
done
fi
fi
+# finally run any "vetters" which run programmes and analyse the output
+echo "[INFO] running vetters...."
+if ! make vet
+then
+ echo "[FAIL] one or more vetters failed"
+ state=1
+else
+ echo "[INFO] vetters all passed"
+fi
+
echo""
if (( state ))
then
--- /dev/null
+#!/usr/bin/env bash
+
+#==================================================================================
+# Copyright (c) 2019 Nokia
+# Copyright (c) 2018-2019 AT&T Intellectual Property.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#==================================================================================
+
+# Mnemonic: vet_logging.sh
+# Abstract: Drive the loging test and count the messages we see, and look for
+# messages we don't expect. Exit good (0) if all looks good.
+# Date: 16 September 2020
+# Author: E. Scott Daniels
+#-----------------------------------------------------------------------------------
+
+echo "<INFO> log message vetting starts..."
+
+expect=14 # number of good "should be written" messages expected
+
+./logging_test >/tmp/PID$$.out 2>&1
+g=$( grep -c "should be written" /tmp/PID$$.out )
+b=$( grep -c "should not be written" /tmp/PID$$.out )
+
+errors=0
+if (( b != 0 ))
+then
+ echo "<FAIL> logging test produced $b unexpected to stderr"
+ errors=1
+else
+ echo "<INFO> no unexpected messages were found"
+fi
+
+if (( g != expect ))
+then
+ echo "<FAIL> logging test did not produce the expected number of messages to stderr"
+ echo "<INFO> expected $expect, saw $g"
+ errors=1
+else
+ echo "<INFO> logging test produced good message count ($g)"
+fi
+
+if (( errors ))
+then
+ ls -al /tmp/PID$$.out
+ echo "<INFO> --- test output ----"
+ cat /tmp/PID$$.out
+ echo "<INFO> ---------------------"
+else
+ echo "<PASS> logging tests pass message output validation"
+fi
+
+rm -f /tmp/PID$$.*
+exit $(( !! errors ))