3 # eval_suite.sh [-h][<args_passed_to_getresults>]
5 # CALLS: eval_oneprogram.sh [-h][-lk] <program>
7 # RETURNS: Number of failed tests, regardless of how that failure occured
8 # or how many failures there were in a given test.
15 # HOW TO ENTER A NEW TEST
17 # To add a test to the testlist, add a line to the TESTLISTFILE (eval_testlist)
18 # using the following format:
20 # <#_of_expected_successes> [#]<program> <args>
22 # Any white space may be used as separator. If <program> is immediately
23 # preceeded by a pound sign (#) that test will be skipped. (No white space
24 # allowed after the pound. Eg, "#<program>".)
27 # HOW TESTS ARE RUN AND EVALUATED
29 # The harness for individual tests is the script "eval_oneprogram.sh".
30 # It expects that the test print FAILED when something fails, and SUCCESS
31 # when something succeeds. If a test executes properly there should be
32 # some SUCCESS strings and NO FAILED strings. If the reason for the
33 # success or failure of the test should be printed on the SAME line as the
34 # SUCCESS/FAILED string to allow the dianostic to be easilly grepped from
37 # The long form of the output (-l flag) will capture that output which may
38 # help to diagnosis the problem. For more information:
40 # % eval_oneprogram.sh -h
43 # MISSING TESTS ARE NOTED
45 # If an executable is found MISSING, a note is printed to that effect
46 # and TESTFAILURE is incremented by 1.
51 # Suggested improvement(s):
52 # Have two (or more?) arbitrary script(s) that may be associated
53 # with a given test. One could prepare the environment, the other
54 # could clean up the environment after running the test. This could
55 # help when testing large subsystems that might require legitimately
56 # building or changing things such that the testable item may be
57 # accessed in the first place (eg). ...
61 #------------------------------------ -o-
62 # Usage mess. (No, it works.)
64 USAGE="Usage: `basename $0` [-h][<args_for_getresults>]"
66 usage() { echo; echo $USAGE; cat <<BLIK | sed 's/^#//' | sed '1d' | $PAGER
72 [ "x$1" = "x-h" ] && usage
76 #------------------------------------ -o-
82 TMPFILE=/tmp/eval_suite.sh$$
84 TESTLISTFILE=eval_testlist
95 # TESTLISTFILE format:
96 # <expected_successes> <program> <arg(s)> ...
97 # <expected_successes> <program> <arg(s)> ...
100 TESTLIST="`cat $TESTLISTFILE | sed 's/$/ ===/'`"
106 #------------------------------------ -o-
107 # Run all tests in the testlist. For each test do the following:
109 # 1) Note whether the test is SKIPPED or MISSING.
111 # 2) Run the test; collect the number of FAILED strings from the
112 # return value of eval_oneprogram.sh.
114 # 3) Count the number of SUCCESSes from the test output.
116 # 4) Print the results. If there were no FAILED strings *and* the
117 # number of SUCCESS strings is what we expect, simply
118 # note that the test passed. Otherwise, cat the output
119 # generated by eval_oneprogram.sh and (possibly)
120 # print other details.
125 while [ -n "$1" ] ; do
127 # Parse agument stream...
133 while [ "$1" != "===" ] ; do { PROGRAM="$PROGRAM $1" ; shift ; } done
136 testname="`echo $PROGRAM | grep '^#' | sed 's/^#//'`"
138 echo '+==================================-o-===+'
144 # Decide whether to skip the test, if it's mising, else run it.
146 [ -n "$testname" ] && { # Skip the test?
147 echo "SKIPPING test for \"$testname\"."
151 [ ! -e "`echo $PROGRAM | awk '{ print $1 }'`" ] && { # Missing test?
152 TESTFAILURE=`expr $TESTFAILURE + 1`
154 echo "MISSING test for \"$PROGRAM\"."
159 echo "TESTING \"$PROGRAM\"..." # Announce test!
164 # Run the test and collect the failed_count and success_count.
166 eval_oneprogram.sh $ARGUMENTS $PROGRAM >$TMPFILE
169 success_count=`awk '$(NF-1) == "SUCCESS:" { print $NF; exit }' $TMPFILE`
170 [ -z "$success_count" ] && success_count=0
175 # Output best-effort results of the test -OR- a fully successful run.
177 [ "$failed_count" -eq 0 -a \
178 "$success_count" -eq "$EXPECTEDSUCCESSES" ] &&
181 echo $PROGRAM PASSED # Successful, fully, completed
186 TESTFAILURE=`expr $TESTFAILURE + 1`
192 [ "$success_count" -ne $EXPECTEDSUCCESSES ] && {
193 echo "Got $success_count SUCCESSes"\
194 "out of $EXPECTEDSUCCESSES."
198 } # end -- evaluation of and output based upon test success.
204 #------------------------------------ -o-