Commit e2c3ff8f authored by Kevin Modzelewski's avatar Kevin Modzelewski

Add -q to the tester

To silence results for passing tests.  We have a lot of
tests and most of them are passing and are crowding out the
failing ones.

Also, turn it on when testing on travis-ci
parent 13a3e9e5
...@@ -324,7 +324,7 @@ add_test(NAME check-format COMMAND ${CMAKE_SOURCE_DIR}/tools/check_format.sh ${L ...@@ -324,7 +324,7 @@ add_test(NAME check-format COMMAND ${CMAKE_SOURCE_DIR}/tools/check_format.sh ${L
add_test(NAME analysis_unittest COMMAND analysis_unittest) add_test(NAME analysis_unittest COMMAND analysis_unittest)
macro(add_pyston_test testname directory) macro(add_pyston_test testname directory)
add_test(NAME pyston_${testname}_${directory} COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/tools/tester.py -R ./pyston -j${TEST_THREADS} -k -a=-S ${ARGV2} ${ARGV3} ${ARGV4} ${CMAKE_SOURCE_DIR}/test/${directory}) add_test(NAME pyston_${testname}_${directory} COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/tools/tester.py -R ./pyston -j${TEST_THREADS} -q -k -a=-S ${ARGV2} ${ARGV3} ${ARGV4} ${CMAKE_SOURCE_DIR}/test/${directory})
endmacro() endmacro()
# tests testname directory arguments # tests testname directory arguments
......
...@@ -42,6 +42,14 @@ EXIT_CODE_ONLY = False ...@@ -42,6 +42,14 @@ EXIT_CODE_ONLY = False
SKIP_FAILING_TESTS = False SKIP_FAILING_TESTS = False
VERBOSE = 1 VERBOSE = 1
DISPLAY_SKIPS = False
DISPLAY_SUCCESSES = True
def success_message(msg):
if DISPLAY_SUCCESSES:
return msg
return ""
PYTHONIOENCODING = 'utf-8' PYTHONIOENCODING = 'utf-8'
# For fun, can test pypy. # For fun, can test pypy.
...@@ -145,7 +153,7 @@ def run_test(fn, check_stats, run_memcheck): ...@@ -145,7 +153,7 @@ def run_test(fn, check_stats, run_memcheck):
del check_stats, run_memcheck del check_stats, run_memcheck
if opts.skip: if opts.skip:
return "(skipped: %s)" % opts.skip return ("(skipped: %s)" % opts.skip) if DISPLAY_SKIPS else ""
env = dict(os.environ) env = dict(os.environ)
env["PYTHONPATH"] = EXTMODULE_DIR_PYSTON env["PYTHONPATH"] = EXTMODULE_DIR_PYSTON
...@@ -266,7 +274,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed): ...@@ -266,7 +274,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed):
color = 31 color = 31
msg = "no stats available" msg = "no stats available"
if opts.expected == "fail": if opts.expected == "fail":
return "Expected failure (no stats found)" return success_message("Expected failure (no stats found)")
elif KEEP_GOING: elif KEEP_GOING:
failed.append(fn) failed.append(fn)
if VERBOSE >= 1: if VERBOSE >= 1:
...@@ -316,7 +324,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed): ...@@ -316,7 +324,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed):
msg = "Exited with code %d (expected code %d)" % (code, expected_code) msg = "Exited with code %d (expected code %d)" % (code, expected_code)
if opts.expected == "fail": if opts.expected == "fail":
return "Expected failure (got code %d, should be %d)" % (code, expected_code) return success_message("Expected failure (got code %d, should be %d)" % (code, expected_code))
elif KEEP_GOING: elif KEEP_GOING:
failed.append(fn) failed.append(fn)
if VERBOSE >= 1: if VERBOSE >= 1:
...@@ -342,7 +350,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed): ...@@ -342,7 +350,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed):
elif out != expected_out: elif out != expected_out:
if opts.expected == "fail": if opts.expected == "fail":
return "Expected failure (bad output)" return success_message("Expected failure (bad output)")
else: else:
diff = diff_output(expected_out, out, "expected_", "received_") diff = diff_output(expected_out, out, "expected_", "received_")
if KEEP_GOING: if KEEP_GOING:
...@@ -356,7 +364,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed): ...@@ -356,7 +364,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed):
raise Exception("Failed on %s:\n%s" % (fn, diff)) raise Exception("Failed on %s:\n%s" % (fn, diff))
elif not TEST_PYPY and canonicalize_stderr(stderr) != canonicalize_stderr(expected_err): elif not TEST_PYPY and canonicalize_stderr(stderr) != canonicalize_stderr(expected_err):
if opts.expected == "fail": if opts.expected == "fail":
return "Expected failure (bad stderr)" return success_message("Expected failure (bad stderr)")
else: else:
diff = diff_output(expected_err, stderr, "expectederr_", "receivederr_") diff = diff_output(expected_err, stderr, "expectederr_", "receivederr_")
if KEEP_GOING: if KEEP_GOING:
...@@ -437,7 +445,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed): ...@@ -437,7 +445,7 @@ def determine_test_result(fn, opts, code, out, stderr, elapsed):
else: else:
r += ("(Skipping memchecks)",) r += ("(Skipping memchecks)",)
return r return success_message(r)
q = Queue.Queue() q = Queue.Queue()
cv = threading.Condition() cv = threading.Condition()
...@@ -485,6 +493,8 @@ parser.add_argument('-s', '--skip-tests', type=str, default='', ...@@ -485,6 +493,8 @@ parser.add_argument('-s', '--skip-tests', type=str, default='',
help='tests to skip (comma-separated)') help='tests to skip (comma-separated)')
parser.add_argument('-e', '--exit-code-only', action='store_true', parser.add_argument('-e', '--exit-code-only', action='store_true',
help="only check exit code; don't run CPython to get expected output to compare against") help="only check exit code; don't run CPython to get expected output to compare against")
parser.add_argument('-q', '--quiet', action='store_true',
help="Only display failing tests")
parser.add_argument('--skip-failing', action='store_true', parser.add_argument('--skip-failing', action='store_true',
help="skip tests expected to fail") help="skip tests expected to fail")
parser.add_argument('--order-by-mtime', action='store_true', parser.add_argument('--order-by-mtime', action='store_true',
...@@ -506,6 +516,7 @@ def main(orig_dir): ...@@ -506,6 +516,7 @@ def main(orig_dir):
global VERBOSE global VERBOSE
global EXTMODULE_DIR_PYSTON global EXTMODULE_DIR_PYSTON
global EXTMODULE_DIR global EXTMODULE_DIR
global DISPLAY_SUCCESSES
run_memcheck = False run_memcheck = False
...@@ -521,6 +532,9 @@ def main(orig_dir): ...@@ -521,6 +532,9 @@ def main(orig_dir):
EXIT_CODE_ONLY = opts.exit_code_only EXIT_CODE_ONLY = opts.exit_code_only
SKIP_FAILING_TESTS = opts.skip_failing SKIP_FAILING_TESTS = opts.skip_failing
if opts.quiet:
DISPLAY_SUCCESSES = False
TEST_DIR = os.path.join(orig_dir, opts.test_dir) TEST_DIR = os.path.join(orig_dir, opts.test_dir)
EXTMODULE_DIR_PYSTON = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/") EXTMODULE_DIR_PYSTON = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/")
EXTMODULE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/build/lib.linux-x86_64-2.7/") EXTMODULE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/build/lib.linux-x86_64-2.7/")
...@@ -607,11 +621,13 @@ def main(orig_dir): ...@@ -607,11 +621,13 @@ def main(orig_dir):
print "(%s also failed)" % fn print "(%s also failed)" % fn
sys.exit(1) sys.exit(1)
break break
name = os.path.basename(fn).rjust(FN_JUST_SIZE)
msgs = results[fn] if results[fn]:
if isinstance(msgs,str): name = os.path.basename(fn).rjust(FN_JUST_SIZE)
msgs = [msgs] msgs = results[fn]
print ' '.join([name] + list(msgs)) if isinstance(msgs,str):
msgs = [msgs]
print ' '.join([name] + list(msgs))
for t in threads: for t in threads:
t.join() t.join()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment