Commit eb3edfd6 authored by Kevin Modzelewski's avatar Kevin Modzelewski Committed by Kevin Modzelewski

Add "log hashing" to see new failures

A common issue is that we see that a test goes from N failures to N+1,
but it's not clear at all which of the failures is new.

This adds a simple hashing mechanism to try to help us in these cases:
we hash each line in the log, and construct a small bloom filter of
which lines were seen.  We can then put this into the test file, and
when the test fails we can run the received output against the expected
bloom filter.

I tried testing it and it seems ok, but we'll have to see how well it
works in practice.
parent 0df5d7b5
......@@ -26,7 +26,7 @@ def install_and_test_lxml():
"skip_sslv2_tests.patch",
"fix_testsuite_ftbfs.patch",
"fix_testsuite_tls1.2.patch",
"fix_testsuite_sha256.patch")
"fix_testsuite_sha256.patch")
for patch in debian_patches:
PATCH_FILE = os.path.abspath(os.path.join(M2CRYPTO_DIR, "debian", "patches", patch))
......@@ -39,11 +39,16 @@ def install_and_test_lxml():
# M2Crypto can't find the opensslconf without this
env["DEB_HOST_MULTIARCH"] = "/usr/include/x86_64-linux-gnu"
# SWIG does not work with pyston if this define is not set
env["CFLAGS"] = "-DSWIG_PYTHON_SLOW_GETSET_THIS"
env["CFLAGS"] = "-DSWIG_PYTHON_SLOW_GETSET_THIS"
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=M2CRYPTO_DIR, env=env)
expected = [{'ran': 235, 'errors': 5, 'skipped': 2}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=M2CRYPTO_DIR, expected=expected)
expected_log_hash = '''
rAAABwigI04NBogROC1ATTYBiAUIAMhCMKBLQAC1SMALgBCRQIIAgKOpBBGgIaMKAAUAkVgCUJAA
ABCMAIwJAQAAwONQATYSBmEYSACDAEUoRABkJKEAhBBEMgYMwQoFABBwCETByQggaAkAqAgUgAAG
QBWgAamiIaSEIIIiGjE=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=M2CRYPTO_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_lxml()
......@@ -8,7 +8,7 @@ PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
PYTEST_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "py.test"))
AVRO_DIR = os.path.abspath(os.path.join(ENV_NAME, "avro-1.7.7"))
packages = ["pytest==2.8.7", "py==1.4.29", "avro==1.7.7"]
packages = ["pytest==2.8.7", "py==1.4.29", "avro==1.7.7"]
create_virtenv(ENV_NAME, packages, force_create = True)
url = "https://pypi.python.org/packages/source/a/avro/avro-1.7.7.tar.gz"
......@@ -20,4 +20,9 @@ env["PYTHONPATH"] = os.path.abspath(os.path.join(ENV_NAME, "lib/python2.7/site-p
# cpython has the same number of failures
expected = [{'failed': 2, 'passed': 47}]
run_test([PYTEST_EXE], env=env, cwd=AVRO_DIR, expected=expected)
expected_log_hash = '''
gBAAAACAAAABBAgAAAACAAgAIAAABAQAAAAAAACAAAgDAIAAABAAIAMBAQgACBAAFBAQAACBAAAA
EAEAAAAQAABAAAAAAIAAAAAAAAQAAAgICCgAEBAAAAAQAAAAAACAAAAAAAEAgAAAAAIAAAAAgBAA
AZQAAAAAAAAAIAAEAAA=
'''
run_test([PYTEST_EXE], env=env, cwd=AVRO_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -20,4 +20,9 @@ subprocess.check_call([PYTHON_EXE, "setup.py", "build"], cwd=BABEL_DIR)
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=BABEL_DIR)
expected = [{"ran": 227, "failures": 3, "errors": 3}]
run_test([NOSETESTS_EXE], cwd=BABEL_DIR, expected=expected)
expected_log_hash = '''
gAIAAAAACQAAAABAAAAABAAAAIAAEAAAAAAAAAAAAEAEBAAAAAAAkAAAAAAAAAAAQAAEgAAAAAAA
AAAAAAAAAQAACAgAAAAAAAAAIAAJAAAAAAAAAAAAAAAAAAAAAEAAAAAAAhAAAAAAAAAAEACAAAAA
EIgAAAAQAAAAAIAAAAA=
'''
run_test([NOSETESTS_EXE], cwd=BABEL_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -30,7 +30,12 @@ def install_and_test_cffi():
# dir_to_test = "."
# I just picked a subdirectory; I don't really know what it's testing.
dir_to_test = os.path.join(CFFI_DIR, "testing", "cffi1")
run_test([PYTEST_EXE, dir_to_test], cwd=CFFI_DIR, expected=expected)
expected_log_hash = '''
gBEACAAQEAIAwICAAAAAAABAAAAAAAACAAAAEAAAEAAAAEQEQAAAAKAAARFEEACKAAABAACAAAAA
QgAAAEAQBAACgAEAAABAAAAAAAFAAAoAAAAAAACAAAACAAIAAUiAIAAAAAODgAAgEIQBABAACgAC
GBACAAAAICEAABAAgQA=
'''
run_test([PYTEST_EXE, dir_to_test], cwd=CFFI_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, ["pytest==2.8.7", "py==1.4.31", "pycparser==2.14"], force_create = True)
install_and_test_cffi()
......@@ -26,9 +26,19 @@ def install_and_test_cffi():
# looks like clang 3.5 causes more errors like: 214 != -42 doing casts
if os.environ.has_key("CC") and "clang" in os.environ["CC"]:
expected = [{ "failed": 20, "passed": 1659, "skipped": 73, "xfailed": 4}]
expected_log_hash = '''
oRkAgDIgEgAAwoKiAIQAIABAQAAAAAIKBOAIUABAEAAAIMQFgQCKhKEgERFEMAgAAAIBAAiCCBAC
CAIASESQBAQDpAEAAAogAAMBAoVQqkCKABBAAIDgAKECABJAAQiEIAAgAgOigAIwcoQBIAAACoAG
2FIHAAQAJIELIVABgwA=
'''
else:
expected = [{ "failed": 11, "passed": 1668, "skipped": 73, "xfailed": 4}]
run_test([PYTEST_EXE], cwd=CFFI_DIR, expected=expected)
expected_log_hash = '''
oRkAwBAg0gAEwoCiQIQgIQBAQAABQEKKBGAZVAhKcAAAAMQFAQAogKggFRFGEIgAAAKABgiGCBCC
CAIASEAQHAQSpAEADEugCJEBAoFgIECDBBBEAACgACECAAJKgQicIAAgAAOChBIyUoQBIAAACoAG
2FInAAQQpIEHARAJowE=
'''
run_test([PYTEST_EXE], cwd=CFFI_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, ["pytest==2.8.7", "py==1.4.31", "pycparser==2.14"], force_create = True)
install_and_test_cffi()
......@@ -10,4 +10,9 @@ cheetah_exe = os.path.join(ENV_NAME, "bin", "cheetah")
env = os.environ
env["PATH"] = os.path.join(ENV_NAME, "bin")
expected = [{'ran': 2138, 'errors': 4}, {'ran': 2138, 'errors': 232, 'failures': 2}]
run_test([cheetah_exe, "test"], cwd=ENV_NAME, expected=expected, env=env)
expected_log_hash = '''
jcoDAKUIQTpEDIDiMwAuQFEAKABjEbNAAAACgqABAAGgGsGQaQQLg/l0gIQXbEA4IKQisBIAAlOQ
IG4lA5AAASAqqGdMCPAAALKbAEQAYAcCEgRHAQCAAhAVJIghShwAUpAAKaEwgk0GaEUkgQIIADgb
pKTQYrIACAshhJ6Bwh0=
'''
run_test([cheetah_exe, "test"], cwd=ENV_NAME, expected=expected, env=env, expected_log_hash=expected_log_hash)
......@@ -16,4 +16,9 @@ create_virtenv(ENV_NAME, packages, force_create = True)
subprocess.check_call(["patch", "-p1"], stdin=open(os.path.join(os.path.dirname(__file__), "formencode.patch")), cwd=SRC_DIR)
expected = [{'ran': 201}]
run_test([NOSETESTS_EXE], cwd=FORMENCODE_DIR, expected=expected)
expected_log_hash = '''
gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAgAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAgAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAA=
'''
run_test([NOSETESTS_EXE], cwd=FORMENCODE_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -11,4 +11,12 @@ pkg = ["nose==1.3.7", "-e", "git+http://github.com/maxmind/geoip-api-python.git@
create_virtenv(ENV_NAME, pkg, force_create = True)
GEOIP_DIR = os.path.abspath(os.path.join(SRC_DIR, "geoip"))
expected = [{'ran': 10}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=GEOIP_DIR, expected=expected)
expected_log_hash = '''
ggAAAAAAQAQAAAAACAAAAAAAAAAAAAIABIAAAAAAgAACAAAAAAAIAAAAAAAAAAIIBAAAgABABAgA
AAAAAAAAAAAAAAAAAAAAAAQAAIgAAAAAAAAAAQBAABAAEAEAAAAAAAAAAAgAAAAIAIAAAAEAAAAA
AIAAAAgAAAAAAAAAAAA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=GEOIP_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -32,9 +32,14 @@ def install_and_test_lxml():
print "Applied lxml patch"
subprocess.check_call([PYTHON_EXE, "setup.py", "build_ext", "-i", "--with-cython"], cwd=LXML_DIR)
expected = [{'ran': 1381}]
run_test([PYTHON_EXE, "test.py"], cwd=LXML_DIR, expected=expected)
expected_log_hash = '''
gAAAAAAAAQAAAAAIAAAAAAAAAAAAgAAAAAAAAABAAACCAEgAAAAAgAIAAAAAAACAAAAAoAAAAAAA
ABAAAAAAAAAAAAAigAAAAAAAAAAQAAAwAgAAAAAAAAAAAAAAAAIAAAAEAAAACAAAAAAAAABBAAAA
AAAAAAAAAAAAAAAAAAA=
'''
run_test([PYTHON_EXE, "test.py"], cwd=LXML_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_lxml()
......@@ -16,8 +16,8 @@ def install_and_test_mysqldb():
subprocess.check_call(["git", "clone", "https://github.com/farcepest/MySQLdb1.git"], cwd=SRC_DIR)
MYSQLDB_DIR = os.path.abspath(os.path.join(SRC_DIR, "MySQLdb1"))
subprocess.check_call(["git", "checkout", "MySQLdb-1.2.5"], cwd=MYSQLDB_DIR)
subprocess.check_call(["git", "checkout", "MySQLdb-1.2.5"], cwd=MYSQLDB_DIR)
nosetests_exe = os.path.abspath(ENV_NAME + "/bin/nosetests")
#apply patch
......@@ -29,9 +29,14 @@ def install_and_test_mysqldb():
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=MYSQLDB_DIR)
env = os.environ
env["TESTDB"] = "travis.cnf"
env["TESTDB"] = "travis.cnf"
expected = [{"ran": 69}]
run_test([nosetests_exe], cwd=MYSQLDB_DIR, expected=expected, env=env)
expected_log_hash = '''
gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAA
AAAAAAAEAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAQAAAAAAA=
'''
run_test([nosetests_exe], cwd=MYSQLDB_DIR, expected=expected, env=env, expected_log_hash=expected_log_hash)
packages = ["nose==1.3.7"]
create_virtenv(ENV_NAME, packages, force_create = True)
......
......@@ -77,8 +77,13 @@ except:
raise
expected_log_hash = '''
gAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAQAAAAAABAAACAAAAAAAAAAAAA
AAAgAAAAAAAAAAAAAAA=
'''
test_helper.run_test(['sh', '-c', '. %s/bin/activate && python %s/numpy/tools/test-installed-numpy.py' % (ENV_DIR, ENV_DIR)],
ENV_NAME, [dict(ran=6139, failures=1)])
ENV_NAME, [dict(ran=6139, failures=1)], expected_log_hash=expected_log_hash)
print
print "PASSED"
......@@ -37,7 +37,10 @@ print ">> "
# - no shiftjis encoding
# - slightly different error messages
expected = [{"failed" : 22, "passed" : 112}]
run_test([PYTEST_EXE], cwd=PASTE_TEST_DIR, expected=expected)
expected_log_hash = '''
ghA0YC2ZyyvAxlQASAgCMAglIjZ2pwSCB8wuCIgiAYGKAITIQgIQLRgRYwA0e1BchxcHGsShlUij
OBXOA0E4AQIkzEKIOCikJYUgRF5hh8YJVAioAI6FDJB8waBqoGC7hEIDCdEZkACMA4IDaLABMFSi
n7AogIFoJ4iCeUEEggM=
'''
run_test([PYTEST_EXE], cwd=PASTE_TEST_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -28,7 +28,12 @@ def install_and_test_protobuf():
subprocess.check_call([PYTHON_EXE, "setup.py", "build"], cwd=PROTOBUF_PY_DIR, env=env)
expected = [{"ran": 216}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=PROTOBUF_PY_DIR, expected=expected, env=env)
expected_log_hash = '''
gAAQSBxQEAxCwBwkAAREFCAUCQAAiAsIBggpNIQAAIBBBAAEAAQQAAADDEgABFI9QpcAlQAAgwEi
HEAJAESKkAKBGAAlpAAIAMggcAgAQQsQMwCkEgAisDKIAhEhABCMEE4CBAAEQQQAgIAIiIAEJBIy
gUBSkjAAIAUAQA8EIAI=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=PROTOBUF_PY_DIR, expected=expected, env=env, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_protobuf()
......@@ -15,12 +15,12 @@ def install_and_test_pyicu():
subprocess.check_call(["wget", url], cwd=SRC_DIR)
subprocess.check_call(["tar", "-zxf", "icu4c-4_2_1-src.tgz"], cwd=SRC_DIR)
ICU_DIR = os.path.abspath(os.path.join(SRC_DIR, "icu", "source"))
INSTALL_DIR = os.path.join(SRC_DIR, "icu_install")
subprocess.check_call(["./runConfigureICU", "Linux", "--prefix=" + INSTALL_DIR], cwd=ICU_DIR)
subprocess.check_call(["make", "-j4"], cwd=ICU_DIR)
subprocess.check_call(["make", "install"], cwd=ICU_DIR)
url = "https://pypi.python.org/packages/source/P/PyICU/PyICU-1.0.1.tar.gz"
subprocess.check_call(["wget", url], cwd=SRC_DIR)
subprocess.check_call(["tar", "-zxf", "PyICU-1.0.1.tar.gz"], cwd=SRC_DIR)
......@@ -37,9 +37,14 @@ def install_and_test_pyicu():
env["LD_LIBRARY_PATH"] = LIB_DIR
subprocess.check_call([PYTHON_EXE, "setup.py", "build"], cwd=PYICU_DIR, env=env)
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=PYICU_DIR, env=env)
expected = [{'ran': 17}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=PYICU_DIR, expected=expected)
expected_log_hash = '''
gAAAAQAAABQAACBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARAAAAAAAAAAAAAAAIAIAAgAAAAAAA
AAAAAAgAAAACAAAAAAAAAAIAAAiAAAgAAQQAAAAAABAAAIEBAAAAAAAAACAAAAAAAAAAIIAIAAAA
AAAAAAAAAAAAAAACgAA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=PYICU_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_pyicu()
......@@ -18,12 +18,17 @@ def install_and_test_pylons():
PYLONS_DIR = os.path.abspath(os.path.join(SRC_DIR, "Pylons-0.9.6.2"))
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=PYLONS_DIR)
# most of the errors are because of our coerceUnicodeToStr which raises a TypeError instead of a UnicodeError
# but as we don't support the unicode string correctly every where I don't want to change this currently.
expected = [{ "ran": 50, "errors": 7}]
run_test([NOSE_EXE], cwd=PYLONS_DIR, expected=expected)
expected_log_hash = '''
wLKBAAEAEQAABEAgAAUAYBABtBACiIFIAoAIIAiAYAIUBADgCOIAggAIBACQCAgIgAGBgCAsAIAB
FCIAQAAQAQQAmQoAAACEMQAiAaIAFIgAEEAAAUgAAGAIQAEAAEBQQABQAEAAAAAAAiEiIEAAAEIC
ECBAiigwIAAABAQIAQE=
'''
run_test([NOSE_EXE], cwd=PYLONS_DIR, expected=expected, expected_log_hash=expected_log_hash)
pkg = [ "Mako==1.0.3",
"decorator==4.0.9",
"simplejson==3.8.2",
......
......@@ -25,4 +25,9 @@ subprocess.check_call(["sed", "-i", 's/\\(def test_export_text.*\\)/\\1\\n
print os.path.join(PYOPENSSL_DIR, "test", "test_crypto.py")
expected = [{'ran': 438}]
run_test([NOSETESTS_EXE], cwd=PYOPENSSL_DIR, expected=expected)
expected_log_hash = '''
gAAAAAAAAAAAAAAgAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAAAAgAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAEAAAAAAAA
AAAAACAAAAAAgAAAAAA=
'''
run_test([NOSETESTS_EXE], cwd=PYOPENSSL_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -13,4 +13,9 @@ create_virtenv(ENV_NAME, pkg, force_create = True)
ROUTES_DIR = os.path.abspath(os.path.join(SRC_DIR, "routes"))
expected = [{ "ran" : 141 }]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=ROUTES_DIR, expected=expected)
expected_log_hash = '''
ggSACQMCAQZABAAUIIMCEABCAAAgQAACDAUABAKgAAgEAAECIAAgIAgBABQICCDoIkNQgQAAIQQE
xACSGAIAoIAAAgAQAQEIAAAQAFhWACgBBHAEYAAgIBQAUGAAAAIABCAEQEgAAAFAACAAAo5EgBAA
AAAQCSBIURAAwDCAkQA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=ROUTES_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -15,5 +15,9 @@ packages += ["-e", "git+https://github.com/dahlia/libsass-python@0.8.3#egg=libsa
create_virtenv(ENV_NAME, packages, force_create = True)
expected = [{'ran': 75}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=SASS_DIR, expected=expected)
expected_log_hash = '''
wEAIQAAAAAQEhBAAAAgQIF0QAAAQQAAAQigEAAwwABACAARUEBEAACTAAAQAEAAIAABJgQAEACAC
AEAAUQQACBAAAAEBBABAARAAAQgCAHAWBQQAAABkaDABQAAYYAAEJtgCAIAgcoASgAwgUAAIRAQR
QAQgIAQACCAgFEDEiFA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=SASS_DIR, expected=expected, expected_log_hash=expected_log_hash)
......@@ -10,7 +10,7 @@ PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
def install_and_test_simplejson():
shutil.rmtree(SRC_DIR, ignore_errors=True)
os.makedirs(SRC_DIR)
url = "https://pypi.python.org/packages/source/s/simplejson/simplejson-2.6.2.tar.gz"
subprocess.check_call(["wget", url], cwd=SRC_DIR)
subprocess.check_call(["tar", "-zxf", "simplejson-2.6.2.tar.gz"], cwd=SRC_DIR)
......@@ -18,9 +18,15 @@ def install_and_test_simplejson():
subprocess.check_call([PYTHON_EXE, "setup.py", "build"], cwd=SIMPLEJSON_DIR)
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=SIMPLEJSON_DIR)
expected = [{'ran': 170}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=SIMPLEJSON_DIR, expected=expected)
expected_log_hash = '''
gAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAA
AAAAAAAAAAAAAAAAAEAAAAQAAAgAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAIAAAAAAAAAAAAAAAA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=SIMPLEJSON_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_simplejson()
......@@ -20,7 +20,12 @@ def install_and_test_unidecode():
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=UNIDECODE_DIR)
expected = [{'ran': 8}]
run_test([PYTHON_EXE, "setup.py", "test"], cwd=UNIDECODE_DIR, expected=expected)
expected_log_hash = '''
gECAAAAAAAAAAABAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAA
AAAAGAAAAAAAAAAAAAAAAAQAAggAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAAAAAggAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=UNIDECODE_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_unidecode()
......@@ -59,7 +59,13 @@ assert enc_data != test_string
assert key.decrypt(enc_data) == test_string
expected = [{'ran': 1891}]
test_helper.run_test([sys.executable, "setup.py", "test"], pycrypto_dir, expected)
expected_log_hash = '''
gAAAAAAAAAAAAABAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAIAAIAgAAAAAAA
BAABAABAAAAAAAAAAAAAAAQAAAgAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAA=
'''
test_helper.run_test([sys.executable, "setup.py", "test"], pycrypto_dir, expected, expected_log_hash=expected_log_hash)
print "-- Tests finished"
......
......@@ -57,7 +57,8 @@ def parse_output(output):
result[-1][res_type] = int(m.group(1))
return result
def run_test(cmd, cwd, expected, env = None):
def run_test(cmd, cwd, expected, expected_log_hash="", env=None):
assert isinstance(expected_log_hash, str)
print "Running", cmd
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd, env=env)
output, unused_err = process.communicate()
......@@ -68,12 +69,114 @@ def run_test(cmd, cwd, expected, env = None):
print "Return code:", errcode
assert errcode in (0, 1), "\n\n%s\nTest process crashed" % output
expected_log_hash = expected_log_hash.strip()
this_log_hash = log_hash(output)
if expected_log_hash == "":
raise Exception("please set the expected log hash: \nexpected_log_hash = '''\n%s\n'''" % (this_log_hash,))
if expected == result:
print "Received expected output"
different = check_hash(output, expected_log_hash)
# These checks are useful for making sure that we have the right expected
# hashes in our test files, but I don't think it's worth failing the build for them:
# assert not different, "expected_log_hash = '''\n%s\n'''" % (this_log_hash,)
# assert this_log_hash == expected_log_hash, "expected_log_hash = '''\n%s\n'''" % (this_log_hash,)
else:
print >> sys.stderr, '\n'.join(output.split('\n')[-500:])
print >> sys.stderr, '\n'
different = check_hash(output, expected_log_hash)
print >> sys.stderr, '\n'
print >> sys.stderr, "WRONG output"
print >> sys.stderr, "is:", result
print >> sys.stderr, "expected:", expected
if not different:
print >> sys.stderr, "(log hash can't detect missing lines)"
if this_log_hash != expected_log_hash:
print >> sys.stderr, "expected_log_hash = '''\n%s\n'''" % (this_log_hash,)
assert result == expected
# Try to canonicalize the log to remove most spurious differences.
# We won't be able to get 100% of them, since there will always be differences in the number of
# python warnings or compiler messages.
# But try to remove the most egregious things (filename differences, timing differences) so that the output is easier to parse.
def process_log(log):
r = []
for l in log.split('\n'):
# Remove timing data:
l = re.sub("tests in ([\\d\\.])+s", "", l)
l = re.sub("in ([\\d\\.])+ seconds", "", l)
# Remove filenames:
# log = re.sub("/[^ ]*.py:\\d", "", log)
# log = re.sub("/[^ ]*.py.*line \\d", "", log)
if "http://" not in l:
l = re.sub("(^|[ \"\'/])/[^ :\"\']*($|[ \":\'])", "", l)
# Remove pointer ids:
l = re.sub('0x([0-9a-f]{8,})', "", l)
r.append(l)
return r
def log_hash(log, nbits=1024):
log_lines = process_log(log)
bits = [0] * nbits
for l in log_lines:
bits[hash(l) % nbits] = 1
assert sum(bits) < nbits * .67, "hash is very full!"
l = []
for i in xrange(0, nbits, 8):
t = 0
for j in xrange(8):
if bits[i + j]:
t += 1 << (7 - j)
l.append(chr(t))
return ''.join(l).encode('base64').strip()
def check_hash(log, expected_hash):
orig_log_lines = log.split('\n')
log_lines = process_log(log)
s = expected_hash.decode('base64')
nbits = len(s) * 8
bits = [0] * nbits
for i in xrange(len(s)):
c = ord(s[i])
for j in xrange(8):
bit = (c >> (7 - j)) & 1
if bit:
bits[i * 8 + j] = True
missing = [False] * len(log_lines)
for i, l in enumerate(log_lines):
if not bits[hash(l) % nbits]:
missing[i] = True
ncontext = 2
def ismissing(idx, within):
for i in xrange(max(0, idx-within), min(len(log_lines), idx+within+1)):
if missing[i]:
return True
return False
different = False
for i in xrange(len(log_lines)):
if ismissing(i, 0):
different = True
if orig_log_lines[i] != log_lines[i]:
print >>sys.stderr, "\033[30m+ % 4d: %s\033[0m" % (i + 1, orig_log_lines[i])
print >>sys.stderr, "+ % 4d: %s" % (i + 1, log_lines[i])
else:
print >>sys.stderr, "+ % 4d: %s" % (i + 1, orig_log_lines[i])
elif ismissing(i, ncontext):
print >>sys.stderr, " % 4d: %s" % (i + 1, orig_log_lines[i])
assert different == any(missing)
return any(missing)
......@@ -535,7 +535,8 @@ def main(orig_dir):
TEST_DIR = os.path.join(orig_dir, opts.test_dir)
EXTMODULE_DIR_PYSTON = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/")
EXTMODULE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/build/lib.linux-x86_64-2.7/")
# EXTMODULE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/build/lib.linux-x86_64-2.7/")
EXTMODULE_DIR = os.path.abspath(orig_dir) + "/test/test_extension/build/lib.linux-x86_64-2.7/"
patterns = opts.pattern
IS_OPTIMIZED = int(subprocess.check_output([IMAGE, "-c", 'import sysconfig; print int("-O0" not in sysconfig.get_config_var(\"CFLAGS\"))']))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment