Cleaned up script generation and got rid of the atrocious write.sh

devs/tasn/jenkins
Tom Hacohen 10 years ago
parent 7b1df1a40f
commit 802baf489f
  1. 12
      src/scripts/Makefile.am
  2. 347
      src/scripts/exactness.in

@ -1,8 +1,10 @@
MAINTAINERCLEANFILES = Makefile.in
uninstall-local:
rm $(bindir)/exactness
exactness: exactness.in
$(AM_V_GEN) $(SED) -e "s|\@libdir\@|$(libdir)|" $(srcdir)/exactness.in > $(builddir)/exactness
install-exec-local:
$(srcdir)/write.sh $(libdir)/exactness/
$(INSTALL) exactness $(bindir)
bin_SCRIPTS = exactness
CLEAN_FILES = exactness
EXTRA_DIST = exactness.in

@ -1,10 +1,4 @@
#!/bin/sh
# This script creates exactness script that later is copied to $(bindir)
# by Makefile.
# The reason to do it this was is that we would like to get $OUR_LIBPATH as
# a parameter from Makefile.
cat <<-ENDOFMESSAGE>exactness
#!/usr/bin/env bash
#!/bin/bash
# tsuite_script.sh -i this makes new 'orig' folder
# tsuite_script.sh -i -b [BaseDir] TestName1 [TestName2 ...] rewrite files for selcted tests in 'orig' folder
# tsuite_script.sh -r -b [BaseDir] [TestName1 TestName2 ...] ; this means record [all tests or TestName additional arg]
@ -15,11 +9,11 @@ cat <<-ENDOFMESSAGE>exactness
#_DEBUG="on"
function DEBUG()
{
[ "\$_DEBUG" == "on" ] && \$@
[ "$_DEBUG" == "on" ] && $@
}
do_help () {
echo "Use \$0 to test application screen-layout."
echo "Use $0 to test application screen-layout."
echo "First, you need to compose a tests file as follows:"
echo "Each line begins with test name"
echo "second field is test-command and [optional] params."
@ -28,25 +22,25 @@ echo
echo "# This is a comment line"
echo "TestName TestCmd [param1] [param2]"
echo
echo "Later, you run \$0 with the tests file as parameter."
echo "Later, you run $0 with the tests file as parameter."
echo
echo "By default, exactness runs through test file running all tests specified."
echo "You may run selected tests by adding test name as param to exactness."
echo "Usage:"
echo "\$0 -s TestsFile TestName1 [TestName2] [...]"
echo "$0 -s TestsFile TestName1 [TestName2] [...]"
echo "Use this option to run selected tests without modifying your test file."
echo "TestName param has to match test name given in tests file (1st field)"
echo
echo
echo "Two additional parameters that \$0 accepts:"
echo "Two additional parameters that $0 accepts:"
echo "BaseDir - This is where '.rec' files reside (gets pwd by default)"
echo "DestDir - Where \$0 creates test screen shots."
echo "DestDir - Where $0 creates test screen shots."
echo " Gets 'current' under 'pwd' by default ('orig' on init)"
echo
echo
echo "Use the following options:"
echo "To record tests:"
echo "\$0 -r [-b BaseDir] TestsFile"
echo "$0 -r [-b BaseDir] TestsFile"
echo "Use BaseDir arg to create record files in specific folder."
echo "Otherwise pwd is used."
echo
@ -56,19 +50,19 @@ echo "'.rec' file is produced for each test in your TestsFile."
echo "File name is defined as 'TestName.rec' for each test."
echo
echo "You may test your record files with simulate option:"
echo "\$0 -s [-b BaseDir] TestsFile"
echo "$0 -s [-b BaseDir] TestsFile"
echo
echo "You need to run \$0 with init option prior"
echo "You need to run $0 with init option prior"
echo "to using play option."
echo "Later, when doing play, PNG files are compared with"
echo "PNG files reside in 'orig' folder create when init."
echo
echo "To use init option:"
echo "\$0 -i [-b BaseDir] TestsFile"
echo "$0 -i [-b BaseDir] TestsFile"
echo "Do not use DestDir param with init, target always 'orig'."
echo
echo "Use Play tests option to produce PNG files of screen shot:"
echo "\$0 -p [-b BaseDir] [-d DestDir] TestsFile"
echo "$0 -p [-b BaseDir] [-d DestDir] TestsFile"
echo "Play option produces PNG files in DestDir."
echo "These are compares with PNGs in 'orig'."
echo "(created in 'init' phase)"
@ -77,7 +71,7 @@ echo "Use -v option for detailed flow-report."
echo "Thus, when running many tests, the output format makes"
echo "it easy to match output to a running test."
echo "Example:"
echo "\$0 -v -p [-b BaseDir] [-d DestDir] TestsFile"
echo "$0 -v -p [-b BaseDir] [-d DestDir] TestsFile"
}
get_test_params () {
@ -85,40 +79,40 @@ get_test_params () {
# reset globals
_test_name=
_test_cmd=
local line="\$1"
local c=\${line:0:1}
if [ "\$c" = "#" ]
local line="$1"
local c=${line:0:1}
if [ "$c" = "#" ]
then
# This line is a comment
return 1
fi
local p=\`expr index "\$line" \\ \`
if [ \$p -ne 0 ]
local p=\`expr index "$line" \\ \`
if [ $p -ne 0 ]
then
(( p-- ))
fi
_test_name=\${line:0:p}
_test_name=${line:0:p}
(( p++ ))
_test_cmd=\${line:p}
_test_cmd=${line:p}
# Test that input is valid
if [ -z "\$_test_name" ]
if [ -z "$_test_name" ]
then
_test_name=
_test_cmd=
return 1
fi
if [ -z "\$_test_cmd" ]
if [ -z "$_test_cmd" ]
then
_test_name=
_test_cmd=
return 1
fi
DEBUG echo test name=\""\$_test_name"\"
DEBUG echo test cmd=\""\$_test_cmd"\"
DEBUG echo test name=\""$_test_name"\"
DEBUG echo test cmd=\""$_test_cmd"\"
return 0
}
@ -127,152 +121,152 @@ DEBUG printf "do_record()\n"
# This will run record for all test if no specific test specified
# or run recording of specified tests (names passed as parameter).
# run ALL tests to record
DEBUG echo do_record "\$*"
get_test_params "\$1"
if [ \$? -ne 0 ]
DEBUG echo do_record "$*"
get_test_params "$1"
if [ $? -ne 0 ]
then
return 0
fi
if [ "\$_verbose" -ne 0 ]
if [ "$_verbose" -ne 0 ]
then
echo "do_record: \$_test_name"
echo "do_record: $_test_name"
fi
TSUITE_RECORDING='rec' TSUITE_BASE_DIR=\${_base_dir} TSUITE_DEST_DIR=\${_dest_dir} TSUITE_FILE_NAME=\${_base_dir}/\${_test_name}.rec TSUITE_TEST_NAME=\${_test_name} LD_PRELOAD=\${OUR_LIBPATH}/libexactness.so eval \${_test_cmd}
TSUITE_RECORDING='rec' TSUITE_BASE_DIR=${_base_dir} TSUITE_DEST_DIR=${_dest_dir} TSUITE_FILE_NAME=${_base_dir}/${_test_name}.rec TSUITE_TEST_NAME=${_test_name} LD_PRELOAD=${OUR_LIBPATH}/libexactness.so eval ${_test_cmd}
}
do_simulation () {
# This will play simulation
# this will NOT produce screenshots
DEBUG echo do_simulation "\$*"
get_test_params "\$1"
if [ \$? -ne 0 ]
DEBUG echo do_simulation "$*"
get_test_params "$1"
if [ $? -ne 0 ]
then
return 0
fi
local file_name=\${_base_dir}/\${_test_name}.rec
local file_name=${_base_dir}/${_test_name}.rec
if [ ! -e "\$file_name" ]
if [ ! -e "$file_name" ]
then
echo Rec file "\$file_name" not found.
echo Rec file "$file_name" not found.
return 1
fi
if [ "\$_verbose" -ne 0 ]
if [ "$_verbose" -ne 0 ]
then
echo "do_simulation: \$_test_name"
echo "do_simulation: $_test_name"
fi
TSUITE_BASE_DIR=\${_base_dir} TSUITE_DEST_DIR=\${_dest_dir} TSUITE_FILE_NAME=\${file_name} TSUITE_TEST_NAME=\${_test_name} LD_PRELOAD=\${OUR_LIBPATH}/libexactness.so eval \${_test_cmd}
TSUITE_BASE_DIR=${_base_dir} TSUITE_DEST_DIR=${_dest_dir} TSUITE_FILE_NAME=${file_name} TSUITE_TEST_NAME=${_test_name} LD_PRELOAD=${OUR_LIBPATH}/libexactness.so eval ${_test_cmd}
}
do_play () {
# This will play record for all test if '-a' specified.
# or play record of tests specified as parameter.
# run ALL tests to record
DEBUG echo base dir: "\$_base_dir"
DEBUG echo dest dir: "\$_dest_dir"
DEBUG echo do_play "\$_dest_dir" "\$*"
DEBUG echo base dir: "$_base_dir"
DEBUG echo dest dir: "$_dest_dir"
DEBUG echo do_play "$_dest_dir" "$*"
# Play recorded tests and produce PNG files.
# this will produce screenshots in "_dest_dir" folder
get_test_params "\$1"
if [ \$? -ne 0 ]
get_test_params "$1"
if [ $? -ne 0 ]
then
return 0
fi
local file_name=\${_base_dir}/\${_test_name}.rec
local file_name=${_base_dir}/${_test_name}.rec
if [ ! -e "\$file_name" ]
if [ ! -e "$file_name" ]
then
echo Rec file "\$file_name" not found.
echo Rec file "$file_name" not found.
return 1
fi
if [ -e "\$_dest_dir" ]
if [ -e "$_dest_dir" ]
then
# Remove PNG files according to tests played
rm "\$_dest_dir"/\${_test_name}_[0-9]*.png &> /dev/null
rm "$_dest_dir"/${_test_name}_[0-9]*.png &> /dev/null
else
# Create dest dir
mkdir -p "\$_dest_dir" &> /dev/null
mkdir -p "$_dest_dir" &> /dev/null
fi
if [ "\$_verbose" -ne 0 ]
if [ "$_verbose" -ne 0 ]
then
echo "do_play: \$_test_name"
echo "do_play: $_test_name"
fi
ELM_ENGINE="buffer" TSUITE_BASE_DIR=\${_base_dir} TSUITE_DEST_DIR=\${_dest_dir} TSUITE_FILE_NAME=\${file_name} TSUITE_TEST_NAME=\${_test_name} LD_PRELOAD=\${OUR_LIBPATH}/libexactness.so eval \${_test_cmd}
ELM_ENGINE="buffer" TSUITE_BASE_DIR=${_base_dir} TSUITE_DEST_DIR=${_dest_dir} TSUITE_FILE_NAME=${file_name} TSUITE_TEST_NAME=${_test_name} LD_PRELOAD=${OUR_LIBPATH}/libexactness.so eval ${_test_cmd}
}
compare_files () {
if [ "\$_verbose" -ne 0 ]
if [ "$_verbose" -ne 0 ]
then
echo "compare_files: <\$1> and <\$2>"
echo "compare_files: <$1> and <$2>"
fi
if [ -e "\$1" ]
if [ -e "$1" ]
# First file exists
then
local md_file1=\`md5sum \$1\`
if [ -e "\$2" ]
local md_file1=\`md5sum $1\`
if [ -e "$2" ]
then
# Second file exists
local md_file2=\`md5sum \$2\`
local md_file2=\`md5sum $2\`
# Get md5 of both files
local md1=\`echo "\$md_file1" | cut -d ' ' -f1\`
local md2=\`echo "\$md_file2" | cut -d ' ' -f1\`
local md1=\`echo "$md_file1" | cut -d ' ' -f1\`
local md2=\`echo "$md_file2" | cut -d ' ' -f1\`
# Increase counter of comparisons
(( ncomp++ ))
# Compare md5 of both files
if [ "x\$md1" != "x\$md2" ]
if [ "x$md1" != "x$md2" ]
then
if [ \$comp_unavail -eq 0 ]
if [ $comp_unavail -eq 0 ]
then
# Create diff-file with 'comp_' prefix.
local name=\`basename "\$1"\`
compare "\$1" "\$2" "\$_dest_dir"/comp_"\$name"
local name=\`basename "$1"\`
compare "$1" "$2" "$_dest_dir"/comp_"$name"
else
echo "\$name does not match."
echo "$name does not match."
fi
# Increment counter of files not identical.
(( nfail++ ))
fi
else
# Failed to find second file
echo "Test file was not found \$2"
echo "Test file was not found $2"
(( nerr++ ))
fi
else
# Failed to find first file
echo "Test file was not found \$1"
echo "Test file was not found $1"
(( nerr++ ))
fi
}
process_compare () {
# Process all files listed in array (param)
local files_list=( "\$@" )
for line in "\${files_list[@]}"
local files_list=( "$@" )
for line in "${files_list[@]}"
do
local name=\`basename "\$line"\`
DEBUG echo "comparing \$name"
compare_files "\$_orig_dir"/"\$name" "\$_dest_dir"/"\$name"
local name=\`basename "$line"\`
DEBUG echo "comparing $name"
compare_files "$_orig_dir"/"$name" "$_dest_dir"/"$name"
done
}
do_compare () {
DEBUG printf "do_compare()\n"
DEBUG echo orig dir: "\$_orig_dir"
DEBUG echo dest dir: "\$_dest_dir"
DEBUG echo orig dir: "$_orig_dir"
DEBUG echo dest dir: "$_dest_dir"
# This will compare files in 'orig' folder with files in _dest_dir
if [ \$comp_unavail -ne 0 ]
if [ $comp_unavail -ne 0 ]
then
if [ \$# -eq 1 ]
if [ $# -eq 1 ]
then
echo "Compare software missing."
echo "Install \"ImageMagick\" if you like to procduce \"comp\" files."
@ -280,38 +274,38 @@ then
fi
fi
if [ -z "\$_dest_dir" ]
if [ -z "$_dest_dir" ]
then
printf "For comparing, Usage: %s -c -a -d DirName\nor\n%s -c -d DirName TestName1, TestName2,...\n" \$(basename \$0) \$(basename \$0) >&2
printf "For comparing, Usage: %s -c -a -d DirName\nor\n%s -c -d DirName TestName1, TestName2,...\n" $(basename $0) $(basename $0) >&2
fi
if [ "\$_dest_dir" = "\$_orig_dir" ]
if [ "$_dest_dir" = "$_orig_dir" ]
then
printf "Dest-Dir is \$_dest_dir, exiting.\n"
printf "Dest-Dir is $_dest_dir, exiting.\n"
return 0
fi
local files_list=
for test_name in \$*
for test_name in $*
do
rm "\$_dest_dir"/comp_"\$test_name"_[0-9]*.png &> /dev/null
files_list=( \`ls "\$_dest_dir"/"\$test_name"_[0-9]*.png\` )
process_compare "\${files_list[@]}"
rm "$_dest_dir"/comp_"$test_name"_[0-9]*.png &> /dev/null
files_list=( \`ls "$_dest_dir"/"$test_name"_[0-9]*.png\` )
process_compare "${files_list[@]}"
done
if [ "\$ncomp" -ne 0 ]
if [ "$ncomp" -ne 0 ]
then
echo "Processed \$ncomp comparisons"
echo "Processed $ncomp comparisons"
fi
if [ "\$nfail" -ne 0 ]
if [ "$nfail" -ne 0 ]
then
echo "with \$nfail diff errors"
echo "with $nfail diff errors"
fi
if [ "\$nerr" -ne 0 ]
if [ "$nerr" -ne 0 ]
then
echo "\$nerr PNG-files were not found"
echo "$nerr PNG-files were not found"
fi
return 0
@ -320,21 +314,21 @@ return 0
name_in_args () {
# This function gets curline as first arg
# Then list of args to find if test name is first field of curline
get_test_params "\$1"
if [ \$? -ne 0 ]
get_test_params "$1"
if [ $? -ne 0 ]
then
return 0
fi
if [ -z "\$_test_name" ]
if [ -z "$_test_name" ]
then
return 0
fi
shift
while (( "\$#" ));
while (( "$#" ));
do
if [ "\$_test_name" = "\$1" ]
if [ "$_test_name" = "$1" ]
# found test name in list of args
then
return 1
@ -348,7 +342,7 @@ return 0
}
# Script Entry Point
OUR_LIBPATH="$1"
OUR_LIBPATH="@libdir@/exactness"
_verbose=0
_record=
@ -372,16 +366,16 @@ _n_exe_err=0
# Test that compare is insatlled
which compare &> /dev/null
comp_unavail=\$?
comp_unavail=$?
while getopts 'ab:cd:hprisv?' OPTION
do
case \$OPTION in
b) _base_dir="\$OPTARG"
case $OPTION in
b) _base_dir="$OPTARG"
;;
c) _compare=1
;;
d) _dest_dir="\$OPTARG"
d) _dest_dir="$OPTARG"
;;
p) _play=1
_compare=1
@ -390,12 +384,12 @@ do
r) _record=1
_remove_fail=1
;;
i) _dest_dir="\$_orig_dir"
i) _dest_dir="$_orig_dir"
_init=1
_play=1
_remove_fail=1
;;
s) _dest_dir="\$_orig_dir"
s) _dest_dir="$_orig_dir"
_simulation=1
;;
h) do_help
@ -408,13 +402,13 @@ do
;;
esac
done
shift \$((\$OPTIND - 1))
shift $(($OPTIND - 1))
_test_file_name="\$1"
_test_file_name="$1"
shift
# Test if user added test-names as arguments
if [ ! -z "\$*" ]
if [ ! -z "$*" ]
then
_test_all=0
fi
@ -422,12 +416,12 @@ fi
# when using -o option, we can loop now on tests names
# given as arguments to this script
DEBUG echo _test_file_name="\$_test_file_name"
DEBUG echo _base_dir="\$_base_dir"
DEBUG echo _dest_dir="\$_dest_dir"
DEBUG echo _test_file_name="$_test_file_name"
DEBUG echo _base_dir="$_base_dir"
DEBUG echo _dest_dir="$_dest_dir"
if [ "\$_simulation" ]
if [ "$_simulation" ]
then
# When in simulation mode, we will just commit play (ignore other options)
_init=
@ -437,57 +431,57 @@ then
_play=
while read curline;
do
name_in_args "\$curline" \$*
_run_test=\$(( \$? + \$_test_all ))
if [ \$_run_test -ne 0 ]
name_in_args "$curline" $*
_run_test=$(( $? + $_test_all ))
if [ $_run_test -ne 0 ]
then
do_simulation "\$curline"
if [ \$? -ne 0 ]
do_simulation "$curline"
if [ $? -ne 0 ]
then
(( _n_exe_err++ ))
fi
fi
done < "\$_test_file_name"
done < "$_test_file_name"
# This will cause render simulation
fi
if [ ! -e "\$_base_dir" ]
if [ ! -e "$_base_dir" ]
then
echo "Base dir <\$_base_dir> - not found."
echo "Base dir <$_base_dir> - not found."
exit 1
fi
# printf "Remaining arguments are: %s\n" "\$*"
if [ -z "\$_dest_dir" ]
# printf "Remaining arguments are: %s\n" "$*"
if [ -z "$_dest_dir" ]
then
if [ ! -z "\$_play" ]
if [ ! -z "$_play" ]
then
_dest_dir="current"
fi
if [ ! -z "\$_compare" ]
if [ ! -z "$_compare" ]
then
_dest_dir="current"
fi
else
if [ ! -z "\$_init" ]
if [ ! -z "$_init" ]
then
if [ "\$_dest_dir" != "\$_orig_dir" ]
if [ "$_dest_dir" != "$_orig_dir" ]
then
echo "Cannot use '-i' option with a DestDir that is not 'orig'"
echo "No need to specify DestDir when using '-i'"
echo "For help: \$0 -h"
echo "For help: $0 -h"
exit 2
fi
fi
fi
if [ ! -z "\$_compare" ]
if [ ! -z "$_compare" ]
then
if [ "\$_dest_dir" = "\$_orig_dir" ]
if [ "$_dest_dir" = "$_orig_dir" ]
then
echo "Cannot use 'orig' dir with compare '-c' option"
echo "Please select different DestDir"
echo "For help: \$0 -h"
echo "For help: $0 -h"
exit 3
fi
fi
@ -495,113 +489,110 @@ fi
# Here we clear all fail_*.txt files before running test
# because those files are opened with append ("a") mode.
# We don't want to accumlate old tests fail text.
if [ "\$_remove_fail" ]
if [ "$_remove_fail" ]
then
if [ -z "\$*" ]
if [ -z "$*" ]
then
# Remove all file_*.txt files before init/play/record
DEBUG echo "removing fail_*.txt"
rm fail_*.txt &> /dev/null
else
# Remove file_*.txt files before init/play/record
for i in \$*
for i in $*
do
DEBUG echo "removing fail_\$i.txt"
rm fail_"\$i".txt &> /dev/null
DEBUG echo "removing fail_$i.txt"
rm fail_"$i".txt &> /dev/null
done
fi
fi
if [ "\$_record" ]
if [ "$_record" ]
then
while read curline;
do
name_in_args "\$curline" \$*
_run_test=\$(( \$? + \$_test_all ))
if [ \$_run_test -ne 0 ]
name_in_args "$curline" $*
_run_test=$(( $? + $_test_all ))
if [ $_run_test -ne 0 ]
then
do_record "\$curline"
if [ \$? -ne 0 ]
do_record "$curline"
if [ $? -ne 0 ]
then
(( _n_exe_err++ ))
fi
fi
done < "\$_test_file_name"
done < "$_test_file_name"
fi
if [ "\$_play" ]
if [ "$_play" ]
then
while read curline;
do
name_in_args "\$curline" \$*
_run_test=\$(( \$? + \$_test_all ))
if [ \$_run_test -ne 0 ]
name_in_args "$curline" $*
_run_test=$(( $? + $_test_all ))
if [ $_run_test -ne 0 ]
then
do_play "\$curline"
if [ \$? -ne 0 ]
do_play "$curline"
if [ $? -ne 0 ]
then
(( _n_exe_err++ ))
fi
fi
done < "\$_test_file_name"
done < "$_test_file_name"
fi
if [ "\$_compare" ]
if [ "$_compare" ]
then
while read curline;
do
name_in_args "\$curline" \$*
_run_test=\$(( \$? + \$_test_all ))
if [ \$_run_test -ne 0 ]
name_in_args "$curline" $*
_run_test=$(( $? + $_test_all ))
if [ $_run_test -ne 0 ]
then
get_test_params "\$curline"
if [ \$? -eq 0 ]
get_test_params "$curline"
if [ $? -eq 0 ]
then
do_compare "\$_test_name"
do_compare "$_test_name"
fi
fi
done < "\$_test_file_name"
done < "$_test_file_name"
fi
_n_tests_failed=0
if [ "\$_remove_fail" ]
if [ "$_remove_fail" ]
then
ls fail_*.txt &> /dev/null
if [ "\$?" -eq 0 ]
if [ "$?" -eq 0 ]
then
_n_tests_failed=\`ls fail_*.txt -1 | wc -l\`
if [ "\$_n_tests_failed" ]
if [ "$_n_tests_failed" ]
then
echo "\$_n_tests_failed tests had errors while running."
echo "$_n_tests_failed tests had errors while running."
echo "Please examine fail_*.txt files in CWD"
fi
fi
fi
# Add up total-error and emit user message.
total_errors=\$(( \$nfail + \$nerr + \$_n_tests_failed + \$_n_exe_err ))
echo "\$_n_exe_err tests returned exit-code <> 0"
echo "\$0 finished with \$total_errors errors."
total_errors=$(( $nfail + $nerr + $_n_tests_failed + $_n_exe_err ))
echo "$_n_exe_err tests returned exit-code <> 0"
echo "$0 finished with $total_errors errors."
status=0
# Compute exit code
if [ "\$nfail" -ne 0 ]
if [ "$nfail" -ne 0 ]
then
status=\$(( \$status | 1 ))
status=$(( $status | 1 ))
fi
if [ "\$nerr" -ne 0 ]
if [ "$nerr" -ne 0 ]
then
status=\$(( \$status | 2 ))
status=$(( $status | 2 ))
fi
if [ "\$_n_tests_failed" -ne 0 ]
if [ "$_n_tests_failed" -ne 0 ]
then
status=\$(( \$status | 4 ))
status=$(( $status | 4 ))
fi
exit \$status
ENDOFMESSAGE
chmod +x exactness
exit $status
Loading…
Cancel
Save