Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Carlos Ramos Carreño
neoppod
Commits
a5f2f604
Commit
a5f2f604
authored
Jan 16, 2014
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tests: review report and mark known failures as expected
parent
d250deca
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
105 additions
and
134 deletions
+105
-134
neo/scripts/runner.py
neo/scripts/runner.py
+63
-118
neo/tests/__init__.py
neo/tests/__init__.py
+20
-6
neo/tests/functional/testClient.py
neo/tests/functional/testClient.py
+2
-0
neo/tests/threaded/test.py
neo/tests/threaded/test.py
+8
-2
neo/tests/zodb/testPack.py
neo/tests/zodb/testPack.py
+7
-6
neo/tests/zodb/testUndo.py
neo/tests/zodb/testUndo.py
+4
-1
tools/test_bot
tools/test_bot
+1
-1
No files found.
neo/scripts/runner.py
View file @
a5f2f604
...
@@ -22,7 +22,9 @@ import time
...
@@ -22,7 +22,9 @@ import time
import
sys
import
sys
import
neo
import
neo
import
os
import
os
from
collections
import
Counter
,
defaultdict
from
cStringIO
import
StringIO
from
unittest.runner
import
_WritelnDecorator
from
neo.tests
import
getTempDirectory
,
__dict__
as
neo_tests__dict__
from
neo.tests
import
getTempDirectory
,
__dict__
as
neo_tests__dict__
from
neo.tests.benchmark
import
BenchmarkRunner
from
neo.tests.benchmark
import
BenchmarkRunner
...
@@ -91,17 +93,22 @@ ZODB_TEST_MODULES = [
...
@@ -91,17 +93,22 @@ ZODB_TEST_MODULES = [
]
]
class
NeoTestRunner
(
unittest
.
TestResult
):
class
NeoTestRunner
(
unittest
.
Te
xtTe
stResult
):
""" Custom result class to build report with statistics per module """
""" Custom result class to build report with statistics per module """
def
__init__
(
self
,
title
):
def
__init__
(
self
,
title
,
verbosity
):
unittest
.
TestResult
.
__init__
(
self
)
super
(
NeoTestRunner
,
self
).
__init__
(
_WritelnDecorator
(
sys
.
stderr
),
False
,
verbosity
)
self
.
_title
=
title
self
.
_title
=
title
self
.
modulesStats
=
{}
self
.
modulesStats
=
{}
self
.
failedImports
=
{}
self
.
failedImports
=
{}
self
.
lastStart
=
None
self
.
run_dict
=
defaultdict
(
int
)
self
.
time_dict
=
defaultdict
(
int
)
self
.
temp_directory
=
getTempDirectory
()
self
.
temp_directory
=
getTempDirectory
()
def
wasSuccessful
(
self
):
return
not
(
self
.
failures
or
self
.
errors
or
self
.
unexpectedSuccesses
)
def
run
(
self
,
name
,
modules
):
def
run
(
self
,
name
,
modules
):
print
'
\
n
'
,
name
print
'
\
n
'
,
name
suite
=
unittest
.
TestSuite
()
suite
=
unittest
.
TestSuite
()
...
@@ -123,137 +130,74 @@ class NeoTestRunner(unittest.TestResult):
...
@@ -123,137 +130,74 @@ class NeoTestRunner(unittest.TestResult):
suite
.
addTests
(
loader
.
loadTestsFromModule
(
test_module
))
suite
.
addTests
(
loader
.
loadTestsFromModule
(
test_module
))
suite
.
run
(
self
)
suite
.
run
(
self
)
class
ModuleStats
(
object
):
run
=
0
errors
=
0
success
=
0
failures
=
0
time
=
0.0
def
_getModuleStats
(
self
,
test
):
module
=
test
.
__class__
.
__module__
module
=
tuple
(
module
.
split
(
'.'
))
try
:
return
self
.
modulesStats
[
module
]
except
KeyError
:
self
.
modulesStats
[
module
]
=
self
.
ModuleStats
()
return
self
.
modulesStats
[
module
]
def
_updateTimer
(
self
,
stats
):
stats
.
time
+=
time
.
time
()
-
self
.
lastStart
def
startTest
(
self
,
test
):
def
startTest
(
self
,
test
):
unittest
.
TestResult
.
startTest
(
self
,
test
)
super
(
NeoTestRunner
,
self
).
startTest
(
test
)
logging
.
info
(
" * TEST %s"
,
test
)
self
.
run_dict
[
test
.
__class__
.
__module__
]
+=
1
stats
=
self
.
_getModuleStats
(
test
)
self
.
start_time
=
time
.
time
()
stats
.
run
+=
1
self
.
lastStart
=
time
.
time
()
def
addSuccess
(
self
,
test
):
def
stopTest
(
self
,
test
):
print
"OK"
self
.
time_dict
[
test
.
__class__
.
__module__
]
+=
\
unittest
.
TestResult
.
addSuccess
(
self
,
test
)
time
.
time
()
-
self
.
start_time
stats
=
self
.
_getModuleStats
(
test
)
super
(
NeoTestRunner
,
self
).
stopTest
(
test
)
stats
.
success
+=
1
self
.
_updateTimer
(
stats
)
def
addError
(
self
,
test
,
err
):
print
"ERROR"
unittest
.
TestResult
.
addError
(
self
,
test
,
err
)
stats
=
self
.
_getModuleStats
(
test
)
stats
.
errors
+=
1
self
.
_updateTimer
(
stats
)
def
addFailure
(
self
,
test
,
err
):
print
"FAIL"
unittest
.
TestResult
.
addFailure
(
self
,
test
,
err
)
stats
=
self
.
_getModuleStats
(
test
)
stats
.
failures
+=
1
self
.
_updateTimer
(
stats
)
def
_buildSummary
(
self
,
add_status
):
def
_buildSummary
(
self
,
add_status
):
success
=
self
.
testsRun
-
len
(
self
.
errors
)
-
len
(
self
.
failures
)
unexpected_count
=
len
(
self
.
errors
)
+
len
(
self
.
failures
)
\
+
len
(
self
.
unexpectedSuccesses
)
expected_count
=
len
(
self
.
expectedFailures
)
success
=
self
.
testsRun
-
unexpected_count
-
expected_count
add_status
(
'Directory'
,
self
.
temp_directory
)
add_status
(
'Directory'
,
self
.
temp_directory
)
if
self
.
testsRun
:
if
self
.
testsRun
:
add_status
(
'Status'
,
'%.3f%%'
%
(
success
*
100.0
/
self
.
testsRun
))
add_status
(
'Status'
,
'%.3f%%'
%
(
success
*
100.0
/
self
.
testsRun
))
for
var
in
os
.
environ
.
iterkeys
()
:
for
var
in
os
.
environ
:
if
var
.
startswith
(
'NEO_TEST'
):
if
var
.
startswith
(
'NEO_TEST'
):
add_status
(
var
,
os
.
environ
[
var
])
add_status
(
var
,
os
.
environ
[
var
])
# visual
# visual
header
=
"%25s |
run | success | errors | fails | time
\
n
"
%
'Test Module'
header
=
"%25s |
run | unexpected | expected | skipped | time
\
n
"
%
'Test Module'
separator
=
"%25s-+-------
--+---------+
---------+---------+----------
\
n
"
%
(
'-'
*
25
)
separator
=
"%25s-+-------
+------------+-
---------+---------+----------
\
n
"
%
(
'-'
*
25
)
format
=
"%25s | %3s | %3s | %3s | %3s | %6.2fs
\
n
"
format
=
"%25s | %3s | %3s | %3s | %3s | %6.2fs
\
n
"
group_f
=
"%25s | | | | |
\
n
"
group_f
=
"%25s | | | | |
\
n
"
# header
# header
s
=
' '
*
30
+
' NEO TESTS REPORT'
s
=
' '
*
30
+
' NEO TESTS REPORT
\
n
\
n
'
+
header
+
separator
s
+=
'
\
n
'
s
+=
'
\
n
'
+
header
+
separator
group
=
None
group
=
None
t_success
=
0
unexpected
=
Counter
(
x
[
0
].
__class__
.
__module__
for
x
in
(
self
.
errors
,
self
.
failures
)
for
x
in
x
)
unexpected
.
update
(
x
.
__class__
.
__module__
for
x
in
self
.
unexpectedSuccesses
)
expected
=
Counter
(
x
[
0
].
__class__
.
__module__
for
x
in
self
.
expectedFailures
)
skipped
=
Counter
(
x
[
0
].
__class__
.
__module__
for
x
in
self
.
skipped
)
total_time
=
0
# for each test case
# for each test case
for
k
,
v
in
sorted
(
self
.
modulesStats
.
items
()):
for
k
,
v
in
sorted
(
self
.
run_dict
.
iter
items
()):
# display group below its content
# display group below its content
_group
=
'.'
.
join
(
k
[:
-
1
])
_group
,
name
=
k
.
rsplit
(
'.'
,
1
)
if
group
is
None
:
group
=
_group
if
_group
!=
group
:
if
_group
!=
group
:
if
group
:
s
+=
separator
+
group_f
%
group
+
separator
s
+=
separator
+
group_f
%
group
+
separator
group
=
_group
group
=
_group
# test case stats
t
=
self
.
time_dict
[
k
]
t_success
+=
v
.
success
total_time
+=
t
run
,
success
=
v
.
run
or
'.'
,
v
.
success
or
'.'
s
+=
format
%
(
name
.
lstrip
(
'test'
),
v
,
unexpected
.
get
(
k
,
'.'
),
errors
,
failures
=
v
.
errors
or
'.'
,
v
.
failures
or
'.'
expected
.
get
(
k
,
'.'
),
skipped
.
get
(
k
,
'.'
),
t
)
name
=
k
[
-
1
].
lstrip
(
'test'
)
args
=
(
name
,
run
,
success
,
errors
,
failures
,
v
.
time
)
s
+=
format
%
args
# the last group
# the last group
s
+=
separator
+
group_f
%
group
+
separator
s
+=
separator
+
group_f
%
group
+
separator
# the final summary
# the final summary
errors
,
failures
=
len
(
self
.
errors
)
or
'.'
,
len
(
self
.
failures
)
or
'.'
s
+=
format
%
(
"Summary"
,
self
.
testsRun
,
unexpected_count
or
'.'
,
args
=
(
"Summary"
,
self
.
testsRun
,
t_success
,
errors
,
failures
,
self
.
time
)
expected_count
or
'.'
,
len
(
self
.
skipped
)
or
'.'
,
s
+=
format
%
args
+
separator
+
'
\
n
'
total_time
)
+
separator
+
'
\
n
'
return
s
return
"%s Tests, %s Failed"
%
(
self
.
testsRun
,
unexpected_count
),
s
def
_buildErrors
(
self
):
s
=
''
test_formatter
=
lambda
t
:
t
.
id
()
if
len
(
self
.
errors
):
s
+=
'
\
n
ERRORS:
\
n
'
for
test
,
trace
in
self
.
errors
:
s
+=
"%s
\
n
"
%
test_formatter
(
test
)
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
trace
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
'
\
n
'
if
len
(
self
.
failures
):
s
+=
'
\
n
FAILURES:
\
n
'
for
test
,
trace
in
self
.
failures
:
s
+=
"%s
\
n
"
%
test_formatter
(
test
)
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
trace
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
'
\
n
'
return
s
def
_buildWarnings
(
self
):
s
=
'
\
n
'
if
self
.
failedImports
:
s
+=
'Failed imports :
\
n
'
for
module
,
err
in
self
.
failedImports
.
items
():
s
+=
'%s:
\
n
%s'
%
(
module
,
err
)
s
+=
'
\
n
'
return
s
def
buildReport
(
self
,
add_status
):
def
buildReport
(
self
,
add_status
):
self
.
time
=
sum
([
s
.
time
for
s
in
self
.
modulesStats
.
values
()])
subject
,
summary
=
self
.
_buildSummary
(
add_status
)
# TODO: Add 'Broken' for known failures (not a regression)
body
=
StringIO
()
# and 'Fixed' for unexpected successes.
body
.
write
(
summary
)
self
.
subject
=
"%s Tests, %s Failed"
%
(
for
test
in
self
.
unexpectedSuccesses
:
self
.
testsRun
,
len
(
self
.
errors
)
+
len
(
self
.
failures
))
body
.
write
(
"UNEXPECTED SUCCESS: %s
\
n
"
%
self
.
getDescription
(
test
))
summary
=
self
.
_buildSummary
(
add_status
)
self
.
stream
=
_WritelnDecorator
(
body
)
errors
=
self
.
_buildErrors
()
self
.
printErrors
()
warnings
=
self
.
_buildWarnings
()
return
subject
,
body
.
getvalue
()
report
=
'
\
n
'
.
join
([
summary
,
errors
,
warnings
])
return
(
self
.
subject
,
report
)
class
TestRunner
(
BenchmarkRunner
):
class
TestRunner
(
BenchmarkRunner
):
...
@@ -264,6 +208,8 @@ class TestRunner(BenchmarkRunner):
...
@@ -264,6 +208,8 @@ class TestRunner(BenchmarkRunner):
help
=
'Unit & threaded tests'
)
help
=
'Unit & threaded tests'
)
parser
.
add_option
(
'-z'
,
'--zodb'
,
action
=
'store_true'
,
parser
.
add_option
(
'-z'
,
'--zodb'
,
action
=
'store_true'
,
help
=
'ZODB test suite running on a NEO'
)
help
=
'ZODB test suite running on a NEO'
)
parser
.
add_option
(
'-v'
,
'--verbose'
,
action
=
'store_true'
,
help
=
'Verbose output'
)
parser
.
format_epilog
=
lambda
_
:
"""
parser
.
format_epilog
=
lambda
_
:
"""
Environment Variables:
Environment Variables:
NEO_TESTS_ADAPTER Default is SQLite for threaded clusters,
NEO_TESTS_ADAPTER Default is SQLite for threaded clusters,
...
@@ -291,14 +237,13 @@ Environment Variables:
...
@@ -291,14 +237,13 @@ Environment Variables:
unit
=
options
.
unit
,
unit
=
options
.
unit
,
functional
=
options
.
functional
,
functional
=
options
.
functional
,
zodb
=
options
.
zodb
,
zodb
=
options
.
zodb
,
verbosity
=
2
if
options
.
verbose
else
1
,
)
)
def
start
(
self
):
def
start
(
self
):
config
=
self
.
_config
config
=
self
.
_config
# run requested tests
# run requested tests
runner
=
NeoTestRunner
(
runner
=
NeoTestRunner
(
config
.
title
or
'Neo'
,
config
.
verbosity
)
title
=
config
.
title
or
'Neo'
,
)
try
:
try
:
if
config
.
unit
:
if
config
.
unit
:
runner
.
run
(
'Unit tests'
,
UNIT_TEST_MODULES
)
runner
.
run
(
'Unit tests'
,
UNIT_TEST_MODULES
)
...
...
neo/tests/__init__.py
View file @
a5f2f604
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
import
__builtin__
import
__builtin__
import
errno
import
errno
import
functools
import
os
import
os
import
random
import
random
import
socket
import
socket
...
@@ -31,11 +32,28 @@ from neo.lib.protocol import NodeTypes, Packets, UUID_NAMESPACES
...
@@ -31,11 +32,28 @@ from neo.lib.protocol import NodeTypes, Packets, UUID_NAMESPACES
from
neo.lib.util
import
getAddressType
from
neo.lib.util
import
getAddressType
from
time
import
time
from
time
import
time
from
struct
import
pack
,
unpack
from
struct
import
pack
,
unpack
from
unittest.case
import
_ExpectedFailure
,
_UnexpectedSuccess
try
:
try
:
from
ZODB.utils
import
newTid
from
ZODB.utils
import
newTid
except
ImportError
:
except
ImportError
:
pass
pass
def
expectedFailure
(
exception
=
AssertionError
):
def
decorator
(
func
):
def
wrapper
(
*
args
,
**
kw
):
try
:
func
(
*
args
,
**
kw
)
except
exception
,
e
:
# XXX: passing sys.exc_info() causes deadlocks
raise
_ExpectedFailure
((
type
(
e
),
None
,
None
))
raise
_UnexpectedSuccess
return
functools
.
wraps
(
func
)(
wrapper
)
if
callable
(
exception
)
and
not
isinstance
(
exception
,
type
):
func
=
exception
exception
=
Exception
return
decorator
(
func
)
return
decorator
DB_PREFIX
=
os
.
getenv
(
'NEO_DB_PREFIX'
,
'test_neo'
)
DB_PREFIX
=
os
.
getenv
(
'NEO_DB_PREFIX'
,
'test_neo'
)
DB_ADMIN
=
os
.
getenv
(
'NEO_DB_ADMIN'
,
'root'
)
DB_ADMIN
=
os
.
getenv
(
'NEO_DB_ADMIN'
,
'root'
)
DB_PASSWD
=
os
.
getenv
(
'NEO_DB_PASSWD'
,
''
)
DB_PASSWD
=
os
.
getenv
(
'NEO_DB_PASSWD'
,
''
)
...
@@ -117,8 +135,6 @@ def setupMySQLdb(db_list, user=DB_USER, password='', clear_databases=True):
...
@@ -117,8 +135,6 @@ def setupMySQLdb(db_list, user=DB_USER, password='', clear_databases=True):
class
NeoTestBase
(
unittest
.
TestCase
):
class
NeoTestBase
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
sys
.
stdout
.
write
(
' * %s '
%
(
self
.
id
(),
))
sys
.
stdout
.
flush
()
logging
.
name
=
self
.
setupLog
()
logging
.
name
=
self
.
setupLog
()
unittest
.
TestCase
.
setUp
(
self
)
unittest
.
TestCase
.
setUp
(
self
)
...
@@ -126,17 +142,15 @@ class NeoTestBase(unittest.TestCase):
...
@@ -126,17 +142,15 @@ class NeoTestBase(unittest.TestCase):
test_case
,
logging
.
name
=
self
.
id
().
rsplit
(
'.'
,
1
)
test_case
,
logging
.
name
=
self
.
id
().
rsplit
(
'.'
,
1
)
logging
.
setup
(
os
.
path
.
join
(
getTempDirectory
(),
test_case
+
'.log'
))
logging
.
setup
(
os
.
path
.
join
(
getTempDirectory
(),
test_case
+
'.log'
))
def
tearDown
(
self
,
def
tearDown
(
self
):
success
=
'ok'
if
sys
.
version_info
<
(
2
,
7
)
else
'success'
):
assert
self
.
tearDown
.
im_func
is
NeoTestBase
.
tearDown
.
im_func
assert
self
.
tearDown
.
im_func
is
NeoTestBase
.
tearDown
.
im_func
self
.
_tearDown
(
sys
.
_getframe
(
1
).
f_locals
[
success
])
self
.
_tearDown
(
sys
.
_getframe
(
1
).
f_locals
[
'success'
])
def
_tearDown
(
self
,
success
):
def
_tearDown
(
self
,
success
):
# Kill all unfinished transactions for next test.
# Kill all unfinished transactions for next test.
# Note we don't even abort them because it may require a valid
# Note we don't even abort them because it may require a valid
# connection to a master node (see Storage.sync()).
# connection to a master node (see Storage.sync()).
transaction
.
manager
.
__init__
()
transaction
.
manager
.
__init__
()
print
class
failureException
(
AssertionError
):
class
failureException
(
AssertionError
):
def
__init__
(
self
,
msg
=
None
):
def
__init__
(
self
,
msg
=
None
):
...
...
neo/tests/functional/testClient.py
View file @
a5f2f604
...
@@ -26,6 +26,7 @@ from ZODB.FileStorage import FileStorage
...
@@ -26,6 +26,7 @@ from ZODB.FileStorage import FileStorage
from
ZODB.POSException
import
ConflictError
from
ZODB.POSException
import
ConflictError
from
ZODB.tests.StorageTestBase
import
zodb_pickle
from
ZODB.tests.StorageTestBase
import
zodb_pickle
from
persistent
import
Persistent
from
persistent
import
Persistent
from
..
import
expectedFailure
from
.
import
NEOCluster
,
NEOFunctionalTest
from
.
import
NEOCluster
,
NEOFunctionalTest
TREE_SIZE
=
6
TREE_SIZE
=
6
...
@@ -220,6 +221,7 @@ class ClientTests(NEOFunctionalTest):
...
@@ -220,6 +221,7 @@ class ClientTests(NEOFunctionalTest):
self
.
__checkTree
(
root
[
'trees'
])
self
.
__checkTree
(
root
[
'trees'
])
@
expectedFailure
(
AttributeError
)
def
testExportFileStorageBug
(
self
):
def
testExportFileStorageBug
(
self
):
# currently fails due to a bug in ZODB.FileStorage
# currently fails due to a bug in ZODB.FileStorage
self
.
testExport
(
True
)
self
.
testExport
(
True
)
...
...
neo/tests/threaded/test.py
View file @
a5f2f604
...
@@ -26,6 +26,7 @@ from neo.storage.transactions import TransactionManager, \
...
@@ -26,6 +26,7 @@ from neo.storage.transactions import TransactionManager, \
from
neo.lib.connection
import
ConnectionClosed
,
MTClientConnection
from
neo.lib.connection
import
ConnectionClosed
,
MTClientConnection
from
neo.lib.protocol
import
CellStates
,
ClusterStates
,
NodeStates
,
Packets
,
\
from
neo.lib.protocol
import
CellStates
,
ClusterStates
,
NodeStates
,
Packets
,
\
ZERO_TID
ZERO_TID
from
..
import
expectedFailure
,
_UnexpectedSuccess
from
.
import
ClientApplication
,
NEOCluster
,
NEOThreadedTest
,
Patch
from
.
import
ClientApplication
,
NEOCluster
,
NEOThreadedTest
,
Patch
from
neo.lib.util
import
add64
,
makeChecksum
from
neo.lib.util
import
add64
,
makeChecksum
from
neo.client.pool
import
CELL_CONNECTED
,
CELL_GOOD
from
neo.client.pool
import
CELL_CONNECTED
,
CELL_GOOD
...
@@ -237,6 +238,7 @@ class Test(NEOThreadedTest):
...
@@ -237,6 +238,7 @@ class Test(NEOThreadedTest):
self
.
assertEqual
(
self
.
_testDeadlockAvoidance
([
2
,
4
]),
self
.
assertEqual
(
self
.
_testDeadlockAvoidance
([
2
,
4
]),
[
DelayedError
,
DelayedError
,
ConflictError
,
ConflictError
])
[
DelayedError
,
DelayedError
,
ConflictError
,
ConflictError
])
@
expectedFailure
(
POSException
.
ConflictError
)
def
testDeadlockAvoidance
(
self
):
def
testDeadlockAvoidance
(
self
):
# This test fail because deadlock avoidance is not fully implemented.
# This test fail because deadlock avoidance is not fully implemented.
# 0: C1 -> S1
# 0: C1 -> S1
...
@@ -717,9 +719,13 @@ class Test(NEOThreadedTest):
...
@@ -717,9 +719,13 @@ class Test(NEOThreadedTest):
# XXX: This is an expected failure. A ttid column was added to
# XXX: This is an expected failure. A ttid column was added to
# 'trans' table to permit recovery, by checking that the
# 'trans' table to permit recovery, by checking that the
# transaction was really committed.
# transaction was really committed.
self
.
assertRaises
(
ConnectionClosed
,
t
.
commit
)
try
:
t
.
commit
()
raise
_UnexpectedSuccess
except
ConnectionClosed
:
pass
t
.
begin
()
t
.
begin
()
c
.
root
()[
'x'
]
expectedFailure
(
self
.
assertIn
)(
'x'
,
c
.
root
())
finally
:
finally
:
cluster
.
stop
()
cluster
.
stop
()
...
...
neo/tests/zodb/testPack.py
View file @
a5f2f604
...
@@ -16,14 +16,11 @@
...
@@ -16,14 +16,11 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import
unittest
import
unittest
try
:
from
ZODB.tests.PackableStorage
import
\
from
ZODB.tests.PackableStorage
import
PackableStorageWithOptionalGC
PackableStorageWithOptionalGC
,
PackableUndoStorage
except
ImportError
:
from
ZODB.tests.PackableStorage
import
PackableStorage
as
\
PackableStorageWithOptionalGC
from
ZODB.tests.PackableStorage
import
PackableUndoStorage
from
ZODB.tests.StorageTestBase
import
StorageTestBase
from
ZODB.tests.StorageTestBase
import
StorageTestBase
from
..
import
expectedFailure
from
.
import
ZODBTestCase
from
.
import
ZODBTestCase
class
PackableTests
(
ZODBTestCase
,
StorageTestBase
,
class
PackableTests
(
ZODBTestCase
,
StorageTestBase
,
...
@@ -32,6 +29,10 @@ class PackableTests(ZODBTestCase, StorageTestBase,
...
@@ -32,6 +29,10 @@ class PackableTests(ZODBTestCase, StorageTestBase,
def
setUp
(
self
):
def
setUp
(
self
):
super
(
PackableTests
,
self
).
setUp
(
cluster_kw
=
{
'adapter'
:
'MySQL'
})
super
(
PackableTests
,
self
).
setUp
(
cluster_kw
=
{
'adapter'
:
'MySQL'
})
checkPackAllRevisions
=
expectedFailure
()(
PackableStorageWithOptionalGC
.
checkPackAllRevisions
)
checkPackUndoLog
=
expectedFailure
()(
PackableUndoStorage
.
checkPackUndoLog
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
suite
=
unittest
.
makeSuite
(
PackableTests
,
'check'
)
suite
=
unittest
.
makeSuite
(
PackableTests
,
'check'
)
unittest
.
main
(
defaultTest
=
'suite'
)
unittest
.
main
(
defaultTest
=
'suite'
)
...
...
neo/tests/zodb/testUndo.py
View file @
a5f2f604
...
@@ -19,11 +19,14 @@ from ZODB.tests.StorageTestBase import StorageTestBase
...
@@ -19,11 +19,14 @@ from ZODB.tests.StorageTestBase import StorageTestBase
from
ZODB.tests.TransactionalUndoStorage
import
TransactionalUndoStorage
from
ZODB.tests.TransactionalUndoStorage
import
TransactionalUndoStorage
from
ZODB.tests.ConflictResolution
import
ConflictResolvingTransUndoStorage
from
ZODB.tests.ConflictResolution
import
ConflictResolvingTransUndoStorage
from
..
import
expectedFailure
from
.
import
ZODBTestCase
from
.
import
ZODBTestCase
class
UndoTests
(
ZODBTestCase
,
StorageTestBase
,
TransactionalUndoStorage
,
class
UndoTests
(
ZODBTestCase
,
StorageTestBase
,
TransactionalUndoStorage
,
ConflictResolvingTransUndoStorage
):
ConflictResolvingTransUndoStorage
):
pass
checkTransactionalUndoAfterPack
=
expectedFailure
()(
TransactionalUndoStorage
.
checkTransactionalUndoAfterPack
)
# Don't run this test. It cannot run with pipelined store, and is not executed
# Don't run this test. It cannot run with pipelined store, and is not executed
# on Zeo - but because Zeo doesn't have an iterator, while Neo has.
# on Zeo - but because Zeo doesn't have an iterator, while Neo has.
...
...
tools/test_bot
View file @
a5f2f604
...
@@ -85,7 +85,7 @@ def main():
...
@@ -85,7 +85,7 @@ def main():
revision
[:
7
],
os
.
path
.
basename
(
test_home
),
backend
)
revision
[:
7
],
os
.
path
.
basename
(
test_home
),
backend
)
if
tests
:
if
tests
:
subprocess
.
call
([
os
.
path
.
join
(
bin
,
'neotestrunner'
),
subprocess
.
call
([
os
.
path
.
join
(
bin
,
'neotestrunner'
),
'-'
+
tests
,
'--title'
,
'NEO tests '
+
title
,
'-
v
'
+
tests
,
'--title'
,
'NEO tests '
+
title
,
]
+
sys
.
argv
[
1
:
arg_count
])
]
+
sys
.
argv
[
1
:
arg_count
])
if
'm'
in
tasks
:
if
'm'
in
tasks
:
subprocess
.
call
([
os
.
path
.
join
(
bin
,
'python'
),
subprocess
.
call
([
os
.
path
.
join
(
bin
,
'python'
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment