diff --git a/legacy/eina/configure.ac b/legacy/eina/configure.ac index c5bb0cdf46..1011bece00 100644 --- a/legacy/eina/configure.ac +++ b/legacy/eina/configure.ac @@ -588,6 +588,7 @@ src/modules/mp/buddy/Makefile src/modules/mp/one_big/Makefile src/tests/Makefile src/examples/Makefile +src/scripts/Makefile ]) AC_OUTPUT diff --git a/legacy/eina/src/Makefile.am b/legacy/eina/src/Makefile.am index 7ae5ce91ef..7cee1c5767 100644 --- a/legacy/eina/src/Makefile.am +++ b/legacy/eina/src/Makefile.am @@ -1,4 +1,4 @@ -SUBDIRS = lib include modules tests examples +SUBDIRS = lib include modules tests examples scripts MAINTAINERCLEANFILES = Makefile.in diff --git a/legacy/eina/src/scripts/Makefile.am b/legacy/eina/src/scripts/Makefile.am new file mode 100644 index 0000000000..d9adbfa48b --- /dev/null +++ b/legacy/eina/src/scripts/Makefile.am @@ -0,0 +1,3 @@ +bin_SCRIPTS = eina-bench-cmp + +EXTRA_DIST = $(bin_SCRIPTS) diff --git a/legacy/eina/src/scripts/eina-bench-cmp b/legacy/eina/src/scripts/eina-bench-cmp new file mode 100755 index 0000000000..6ff2b508ac --- /dev/null +++ b/legacy/eina/src/scripts/eina-bench-cmp @@ -0,0 +1,250 @@ +#!/usr/bin/env python + +import sys +import os +import os.path +import csv +from optparse import OptionParser + +fmttext = '%(value)7.2f (%(percentual)+6.1f%%)' +fmthtml = '%(value)7.2f (%(percentual)+0.1f%%)' + + +parser = OptionParser(usage="%prog [options] .. ", + description="""\ +Generate reports comparing two or more outputs of expedite. + +Just run expedite and save output to a file and then feed them to this +program. The first file is used as base for comparison and other files +will print relative improvements. +""") +parser.add_option("-e", "--accepted-error", + help=("maximum error to accept as percentage 0.0-1.0. " + "[default=%default]"), + action="store", type="float", default=0.05) +parser.add_option("-r", "--report", + help=("kind of report to use. One of text or html. " + "[default=%default]"), + action="store", type="choice", default="text", + choices=["text", "html"]) +parser.add_option("-F", "--format", + help=("format to use as python format string, " + "valid keys are: value and percentual. " + "[defaults: html=\"%s\", text=\"%s\"]" % + (fmthtml, fmttext)), + action="store", type="str", default=None) +parser.add_option("-C", "--no-color", dest="color", + help="do not use color in reports.", + action="store_false", default=True) + +options, files = parser.parse_args() +if len(files) < 2: + raise SystemExit("need at least 2 files to compare") + +if options.format is None: + if options.report == "html": + options.format = fmthtml + else: + options.format = fmttext + +ref_f = files[0] +others_f = files[1:] + +max_test_name = 0 +data = {} +tests = [] +for f in files: + d = data[f] = {} + for row in csv.reader(open(f), delimiter='\t'): + if row[0].startswith("#"): + continue + t = row[0].strip() + if f == ref_f: + tests.append(t) + d[t] = float(row[1]) + max_test_name = max(len(t), max_test_name) + +def report_text(): + test_name_fmt = "%%%ds:" % max_test_name + + fmtsize = len(options.format % {"value": 12345.67, "percentual": 1234.56}) + hdrfmt = "%%%d.%ds" % (fmtsize, fmtsize) + + print test_name_fmt % "\\", + print "%7.7s" % (files[0][-7:],), + for f in files[1:]: + n, e = os.path.splitext(f) + print hdrfmt % n[-fmtsize:], + print + + if options.color and os.environ.get("TERM", "") in ( + "xterm", "xterm-color", "rxvt", "rxvt-unicode", "screen", + "Eterm", "aterm", "gnome", "interix"): + color_good = "\033[1;32m" + color_bad = "\033[1;31m" + color_equal = "\033[1;30m" + color_reset = "\033[0m" + else: + color_good = "" + color_bad = "" + color_equal = "" + color_reset = "" + + + def print_row(test): + print test_name_fmt % test, + ref_val = data[ref_f][test] + print "%7.2f" % ref_val, + for f in others_f: + try: + val = data[f][test] + except KeyError: + print "-?????-", + continue + + percent = (val - ref_val) / ref_val + if percent < -options.accepted_error: + c = color_good + elif percent > options.accepted_error: + c = color_bad + else: + c = color_equal + + fmt = options.format % {"value": val, "percentual": percent * 100} + if len(fmt) < fmtsize: + fmt = hdrfmt % fmt + print "%s%s%s" % (c, fmt, color_reset), + + print + + for t in tests: + print_row(t) + + +def report_html(): + import time + + fnames = [os.path.basename(f) for f in files] + print """\ + + + + + + expedite comparison sheet: %(files)s + + + +

Comparison sheet for %(files)s, created at %(date)s.

+ + + + \ +""" % {"files": ", ".join(fnames), + "date": time.asctime(), + } + + for f in fnames: + print """\ + \ +""" % f + print """\ + + + \ +""" + + def print_row(test): + ref_val = data[ref_f][test] + if "EVAS SPEED" in test.upper(): + extra_cls = ' class="overall-results"' + else: + extra_cls = "" + + print """\ + + + \ +""" % (extra_cls, test, ref_val) + + for f in others_f: + try: + val = data[f][test] + except KeyError: + print """\ + \ +""" + continue + + percent = (val - ref_val) / ref_val + if percent < -options.accepted_error: + c = 'good' + elif percent > options.accepted_error: + c = 'bad' + else: + c = 'equal' + + v = options.format % {"value": val, "percentual": percent * 100} + + print """\ + \ +""" % (c, v) + + print """\ + \ +""" + + for t in tests: + print_row(t) + + print """\ + +
\\%s
%s%7.2f-?????-%s
+ + +""" + +if options.report == "text": + report_text() +elif options.report == "html": + report_html()