Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
erp5_fork
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Eteri
erp5_fork
Commits
f5ded6d8
Commit
f5ded6d8
authored
Sep 10, 2013
by
Benjamin Blanc
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
runScalabilityTestSuite: add test repetition
parent
ee12673f
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
54 additions
and
91 deletions
+54
-91
erp5/util/scalability/runScalabilityTestSuite.py
erp5/util/scalability/runScalabilityTestSuite.py
+51
-91
tests/__init__.py
tests/__init__.py
+3
-0
No files found.
erp5/util/scalability/runScalabilityTestSuite.py
View file @
f5ded6d8
...
@@ -289,75 +289,39 @@ class ScalabilityLauncher(object):
...
@@ -289,75 +289,39 @@ class ScalabilityLauncher(object):
# Main loop
# Main loop
while
True
:
while
True
:
# Loop for getting new test case
current_test
=
self
.
getRunningTest
()
current_test
=
self
.
getRunningTest
()
while
not
current_test
:
while
not
current_test
:
time
.
sleep
(
15
)
time
.
sleep
(
15
)
current_test
=
self
.
getRunningTest
()
current_test
=
self
.
getRunningTest
()
self
.
log
(
"Test Case %s going to be run."
%
(
current_test
.
title
))
error_count
=
1
# Do not run a test while there are pending activities
waitFor0PendingActivities
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
previous_document_number
=
0
self
.
log
(
"Test Case %s is running..."
%
(
current_test
.
title
))
try
:
# Prepare configuration
# Prepare command parameters
current_test_number
=
int
(
current_test
.
title
)
current_test_number
=
int
(
current_test
.
title
)
test_duration
=
suite
.
getTestDuration
(
current_test_number
)
test_duration
=
suite
.
getTestDuration
(
current_test_number
)
benchmarks_path
=
os
.
path
.
join
(
self
.
__argumentNamespace
.
erp5_location
,
suite
.
getTestPath
())
benchmarks_path
=
os
.
path
.
join
(
self
.
__argumentNamespace
.
erp5_location
,
suite
.
getTestPath
())
user_file_full_path
=
os
.
path
.
join
(
self
.
__argumentNamespace
.
erp5_location
,
suite
.
getUsersFilePath
())
user_file_full_path
=
os
.
path
.
join
(
self
.
__argumentNamespace
.
erp5_location
,
suite
.
getUsersFilePath
())
user_file_path
=
os
.
path
.
split
(
user_file_full_path
)[
0
]
user_file_path
=
os
.
path
.
split
(
user_file_full_path
)[
0
]
user_file
=
os
.
path
.
split
(
user_file_full_path
)[
1
]
user_file
=
os
.
path
.
split
(
user_file_full_path
)[
1
]
tester_path
=
self
.
__argumentNamespace
.
runner_path
tester_path
=
self
.
__argumentNamespace
.
runner_path
user_number
=
suite
.
getUserNumber
(
current_test_number
)
user_number
=
suite
.
getUserNumber
(
current_test_number
)
repetition
=
suite
.
getTestRepetition
(
current_test_number
)
self
.
log
(
"user_number: %s"
%
str
(
user_number
))
self
.
log
(
"user_number: %s"
%
str
(
user_number
))
self
.
log
(
"test_duration: %s seconds"
%
str
(
test_duration
))
# WARMING UP
self
.
log
(
"Warming up run.. for 180s"
)
# Store the number of documents generated for each iteration
# Generate commands to run
document_number
=
[]
command_list
=
[]
user_index
=
0
# Repeat the same test several times to accurate test result
for
test_suite
in
test_suite_list
:
for
i
in
range
(
1
,
repetition
+
1
):
command_list
.
append
([
tester_path
,
self
.
log
(
"Repetition: %d/%d"
%
(
i
,
repetition
))
self
.
__argumentNamespace
.
erp5_url
,
str
(
user_number
/
len
(
test_suite_list
)),
test_suite
,
'--benchmark-path-list'
,
benchmarks_path
,
'--users-file-path'
,
user_file_path
,
'--users-file'
,
user_file
,
'--filename-prefix'
,
"%s_%s_"
%
(
LOG_FILE_PREFIX
,
current_test
.
title
),
'--report-directory'
,
self
.
__argumentNamespace
.
log_path
,
'--repeat'
,
"%s"
%
str
(
MAX_DOCUMENTS
),
'--max-errors'
,
str
(
1000000
),
'--user-index'
,
str
(
user_index
),
])
user_index
+=
user_number
/
len
(
test_suite_list
)
# Launch
tester_process_list
=
[]
for
command
in
command_list
:
self
.
log
(
"command: %s"
%
str
(
command
))
tester_process_list
.
append
(
subprocess
.
Popen
(
command
))
# Sleep
time
.
sleep
(
180
)
# Stop
for
tester_process
in
tester_process_list
:
tester_process
.
send_signal
(
signal
.
SIGINT
)
self
.
log
(
"%s signal send to tester"
%
str
(
signal
.
SIGINT
))
# /WARMING UP
# Wait for 0 activities
waitFor0PendingActivities
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
# Get the number of documents present before running the test.
# Get the number of documents present before running the test.
waitFor0PendingActivities
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
previous_document_number
=
getCreatedDocumentNumberFromERP5
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
previous_document_number
=
getCreatedDocumentNumberFromERP5
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
self
.
log
(
"previous_document_number: %d"
%
previous_document_number
)
self
.
log
(
"previous_document_number: %d"
%
previous_document_number
)
self
.
log
(
"test_duration: %ss"
%
str
(
test_duration
))
# Generate commands to run
# Generate commands to run
command_list
=
[]
command_list
=
[]
user_index
=
0
user_index
=
0
...
@@ -369,7 +333,7 @@ class ScalabilityLauncher(object):
...
@@ -369,7 +333,7 @@ class ScalabilityLauncher(object):
'--benchmark-path-list'
,
benchmarks_path
,
'--benchmark-path-list'
,
benchmarks_path
,
'--users-file-path'
,
user_file_path
,
'--users-file-path'
,
user_file_path
,
'--users-file'
,
user_file
,
'--users-file'
,
user_file
,
'--filename-prefix'
,
"%s_%s_
"
%
(
LOG_FILE_PREFIX
,
current_test
.
title
),
'--filename-prefix'
,
"%s_%s_
repetition%d"
%
(
LOG_FILE_PREFIX
,
current_test
.
title
,
i
),
'--report-directory'
,
self
.
__argumentNamespace
.
log_path
,
'--report-directory'
,
self
.
__argumentNamespace
.
log_path
,
'--repeat'
,
"%s"
%
str
(
MAX_DOCUMENTS
),
'--repeat'
,
"%s"
%
str
(
MAX_DOCUMENTS
),
'--max-errors'
,
str
(
1000000
),
'--max-errors'
,
str
(
1000000
),
...
@@ -377,7 +341,7 @@ class ScalabilityLauncher(object):
...
@@ -377,7 +341,7 @@ class ScalabilityLauncher(object):
])
])
user_index
+=
user_number
/
len
(
test_suite_list
)
user_index
+=
user_number
/
len
(
test_suite_list
)
# Launch
# Launch
commands
tester_process_list
=
[]
tester_process_list
=
[]
for
command
in
command_list
:
for
command
in
command_list
:
self
.
log
(
"command: %s"
%
str
(
command
))
self
.
log
(
"command: %s"
%
str
(
command
))
...
@@ -389,31 +353,29 @@ class ScalabilityLauncher(object):
...
@@ -389,31 +353,29 @@ class ScalabilityLauncher(object):
# Stop
# Stop
for
tester_process
in
tester_process_list
:
for
tester_process
in
tester_process_list
:
tester_process
.
send_signal
(
signal
.
SIGINT
)
tester_process
.
send_signal
(
signal
.
SIGINT
)
self
.
log
(
"
%s signal send to tester"
%
str
(
signal
.
SIGINT
)
)
self
.
log
(
"
End signal sent to the tester."
)
# Ok
# Count created documents
error_count
=
0
# Wait for 0 pending activities before counting
waitFor0PendingActivities
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
current_document_number
=
getCreatedDocumentNumberFromERP5
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
created_document_number
=
current_document_number
-
previous_document_number
self
.
log
(
"previous_document_number: %d"
%
previous_document_number
)
self
.
log
(
"current_document_number: %d"
%
current_document_number
)
self
.
log
(
"created_document_number: %d"
%
created_document_number
)
document_number
[
i
]
=
created_document_number
# Move csv/logs
self
.
moveLogs
(
current_test
.
title
)
except
:
self
.
log
(
"Error during tester call."
)
raise
ValueError
(
"Tester call failed"
)
self
.
log
(
"Test Case %s is finish"
%
(
current_test
.
title
))
self
.
log
(
"Test Case %s is finish"
%
(
current_test
.
title
))
self
.
log
(
"Going to count the number of created documents"
)
# Get the maximum as choice
# Wait for 0 pending activities before counting
maximum
=
0
waitFor0PendingActivities
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
for
i
in
range
(
0
,
len
(
document_number
)):
# Count created documents
if
document_number
[
i
]
>
maximum
:
current_document_number
=
getCreatedDocumentNumberFromERP5
(
self
.
__argumentNamespace
.
erp5_url
,
self
.
log
)
maximum
=
document_number
[
i
]
created_document_number
=
current_document_number
-
previous_document_number
self
.
log
(
"previous_document_number: %d"
%
previous_document_number
)
# Send results to ERP5 master
self
.
log
(
"current_document_number: %d"
%
current_document_number
)
self
.
log
(
"created_document_number: %d"
%
created_document_number
)
created_document_per_hour_number
=
(
(
float
(
created_document_number
)
*
60
*
60
)
/
float
(
test_duration
)
)
# Move csv/logs
self
.
moveLogs
(
current_test
.
title
)
# Make a connection with ERP5 master
retry_time
=
2.0
retry_time
=
2.0
proxy
=
taskdistribution
.
ServerProxy
(
proxy
=
taskdistribution
.
ServerProxy
(
self
.
__argumentNamespace
.
test_suite_master_url
,
self
.
__argumentNamespace
.
test_suite_master_url
,
...
@@ -424,29 +386,27 @@ class ScalabilityLauncher(object):
...
@@ -424,29 +386,27 @@ class ScalabilityLauncher(object):
current_test
.
relative_path
,
current_test
.
relative_path
,
current_test
.
title
current_test
.
title
)
)
# Generate output
results
=
"created docs=%d
\
n
"
\
results
=
"created docs=%d
\
n
"
\
"duration=%d
\
n
"
\
"duration=%d
\
n
"
\
"number of tests=%d
\
n
"
\
"number of tests=%d
\
n
"
\
"number of users=%d
\
n
"
\
"number of users=%d
\
n
"
\
"tests=%s
\
n
"
\
"tests=%s
\
n
"
\
%
(
%
(
created_document_number
,
maximum
,
test_duration
,
test_duration
,
len
(
test_suite_list
),
len
(
test_suite_list
),
(
user_number
/
len
(
test_suite_list
))
*
len
(
test_suite_list
),
(
user_number
/
len
(
test_suite_list
))
*
len
(
test_suite_list
),
'_'
.
join
(
test_suite_list
)
'_'
.
join
(
test_suite_list
)
)
)
self
.
log
(
"results: %s"
%
results
)
self
.
log
(
"Results: %s"
%
results
)
self
.
log
(
"%s doc in %s secs = %s docs per hour"
%
(
created_document_number
,
test_duration
,
created_document_per_hour_number
))
# Stop test case
test_result_line_test
.
stop
(
stdout
=
results
,
test_result_line_test
.
stop
(
stdout
=
results
,
test_count
=
len
(
test_suite_list
),
test_count
=
len
(
test_suite_list
),
error_count
=
error_count
,
duration
=
test_duration
)
duration
=
test_duration
)
self
.
log
(
"Test Case Stopped"
)
self
.
log
(
"Test Case Stopped"
)
#
error_message_set
=
None
exit_status
=
0
return
error_message_set
,
exit_status
return
error_message_set
,
exit_status
def
main
():
def
main
():
...
...
tests/__init__.py
View file @
f5ded6d8
...
@@ -142,3 +142,6 @@ class ERP5_scalability(_ERP5):
...
@@ -142,3 +142,6 @@ class ERP5_scalability(_ERP5):
# Test duration in seconds
# Test duration in seconds
def
getTestDuration
(
self
,
test_number
):
def
getTestDuration
(
self
,
test_number
):
return
60
*
10
return
60
*
10
def
getTestRepetition
(
self
,
test_number
):
return
3
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment