X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=userapps%2Fopensource%2Fnet-snmp%2Ftesting%2Feval_suite.sh;fp=userapps%2Fopensource%2Fnet-snmp%2Ftesting%2Feval_suite.sh;h=0000000000000000000000000000000000000000;hb=6adeba4d92a546ebbadde2562283ee6b984b22c1;hp=b8038205cf846ee8cca1ad7f72602d05b4ca2ced;hpb=dacd86d83a9fb430cca42cb78a67f9d46e289f5c;p=bcm963xx.git diff --git a/userapps/opensource/net-snmp/testing/eval_suite.sh b/userapps/opensource/net-snmp/testing/eval_suite.sh deleted file mode 100755 index b8038205..00000000 --- a/userapps/opensource/net-snmp/testing/eval_suite.sh +++ /dev/null @@ -1,212 +0,0 @@ -#!/bin/sh -# -# eval_suite.sh [-h][] -# -# CALLS: eval_oneprogram.sh [-h][-lk] -# -# RETURNS: Number of failed tests, regardless of how that failure occured -# or how many failures there were in a given test. -# -# - - - USAGE_LONG=' -# -# HOW TO ENTER A NEW TEST -# -# To add a test to the testlist, add a line to the TESTLISTFILE (eval_testlist) -# using the following format: -# -# <#_of_expected_successes> [#] -# -# Any white space may be used as separator. If is immediately -# preceeded by a pound sign (#) that test will be skipped. (No white space -# allowed after the pound. Eg, "#".) -# -# -# HOW TESTS ARE RUN AND EVALUATED -# -# The harness for individual tests is the script "eval_oneprogram.sh". -# It expects that the test print FAILED when something fails, and SUCCESS -# when something succeeds. If a test executes properly there should be -# some SUCCESS strings and NO FAILED strings. If the reason for the -# success or failure of the test should be printed on the SAME line as the -# SUCCESS/FAILED string to allow the dianostic to be easilly grepped from -# the its output. -# -# The long form of the output (-l flag) will capture that output which may -# help to diagnosis the problem. For more information: -# -# % eval_oneprogram.sh -h -# -# -# MISSING TESTS ARE NOTED -# -# If an executable is found MISSING, a note is printed to that effect -# and TESTFAILURE is incremented by 1. -# -' - -# -# Suggested improvement(s): -# Have two (or more?) arbitrary script(s) that may be associated -# with a given test. One could prepare the environment, the other -# could clean up the environment after running the test. This could -# help when testing large subsystems that might require legitimately -# building or changing things such that the testable item may be -# accessed in the first place (eg). ... -# - - -#------------------------------------ -o- -# Usage mess. (No, it works.) -# -USAGE="Usage: `basename $0` [-h][]" - -usage() { echo; echo $USAGE; cat < ... -# ... -# ... -# -TESTLIST="`cat $TESTLISTFILE | sed 's/$/ ===/'`" - - - - - -#------------------------------------ -o- -# Run all tests in the testlist. For each test do the following: -# -# 1) Note whether the test is SKIPPED or MISSING. -# -# 2) Run the test; collect the number of FAILED strings from the -# return value of eval_oneprogram.sh. -# -# 3) Count the number of SUCCESSes from the test output. -# -# 4) Print the results. If there were no FAILED strings *and* the -# number of SUCCESS strings is what we expect, simply -# note that the test passed. Otherwise, cat the output -# generated by eval_oneprogram.sh and (possibly) -# print other details. -# -set x $TESTLIST -shift - -while [ -n "$1" ] ; do - # - # Parse agument stream... - # - EXPECTEDSUCCESSES=$1 - shift - - PROGRAM= - while [ "$1" != "===" ] ; do { PROGRAM="$PROGRAM $1" ; shift ; } done - shift - - testname="`echo $PROGRAM | grep '^#' | sed 's/^#//'`" - - echo '+==================================-o-===+' - echo - - - - # - # Decide whether to skip the test, if it's mising, else run it. - # - [ -n "$testname" ] && { # Skip the test? - echo "SKIPPING test for \"$testname\"." - echo - continue - } - [ ! -e "`echo $PROGRAM | awk '{ print $1 }'`" ] && { # Missing test? - TESTFAILURE=`expr $TESTFAILURE + 1` - - echo "MISSING test for \"$PROGRAM\"." - echo - continue - } - - echo "TESTING \"$PROGRAM\"..." # Announce test! - - - - # - # Run the test and collect the failed_count and success_count. - # - eval_oneprogram.sh $ARGUMENTS $PROGRAM >$TMPFILE - failed_count=$? - - success_count=`awk '$(NF-1) == "SUCCESS:" { print $NF; exit }' $TMPFILE` - [ -z "$success_count" ] && success_count=0 - - - - # - # Output best-effort results of the test -OR- a fully successful run. - # - [ "$failed_count" -eq 0 -a \ - "$success_count" -eq "$EXPECTEDSUCCESSES" ] && - { - echo - echo $PROGRAM PASSED # Successful, fully, completed - echo - - true - } || { - TESTFAILURE=`expr $TESTFAILURE + 1` - - echo - cat $TMPFILE - echo - - [ "$success_count" -ne $EXPECTEDSUCCESSES ] && { - echo "Got $success_count SUCCESSes"\ - "out of $EXPECTEDSUCCESSES." - echo - } - true - } # end -- evaluation of and output based upon test success. -done # endwhile - - - - -#------------------------------------ -o- -# Cleanup, exit. -# -rm -f $TMPFILE - -exit $TESTFAILURE - - -