Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
erp5
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Yoji Takeuchi
erp5
Commits
dbc82bf0
Commit
dbc82bf0
authored
Nov 15, 2017
by
Julien Muchembled
Committed by
Klaus Wölfel
Aug 28, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
testnode: some code clean up
A lot was found with pylint.
parent
08bc6f71
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
218 additions
and
336 deletions
+218
-336
erp5/tests/testERP5TestNode.py
erp5/tests/testERP5TestNode.py
+7
-7
erp5/util/testnode/NodeTestSuite.py
erp5/util/testnode/NodeTestSuite.py
+19
-30
erp5/util/testnode/ProcessManager.py
erp5/util/testnode/ProcessManager.py
+12
-9
erp5/util/testnode/SlapOSControler.py
erp5/util/testnode/SlapOSControler.py
+62
-102
erp5/util/testnode/SlapOSMasterCommunicator.py
erp5/util/testnode/SlapOSMasterCommunicator.py
+17
-33
erp5/util/testnode/UnitTestRunner.py
erp5/util/testnode/UnitTestRunner.py
+23
-35
erp5/util/testnode/Updater.py
erp5/util/testnode/Updater.py
+12
-16
erp5/util/testnode/Utils.py
erp5/util/testnode/Utils.py
+15
-25
erp5/util/testnode/testnode.py
erp5/util/testnode/testnode.py
+51
-79
No files found.
erp5/tests/testERP5TestNode.py
View file @
dbc82bf0
...
@@ -237,7 +237,7 @@ class ERP5TestNode(TestCase):
...
@@ -237,7 +237,7 @@ class ERP5TestNode(TestCase):
node_test_suite
.
custom_profile_path
)
node_test_suite
.
custom_profile_path
)
profile
=
open
(
node_test_suite
.
custom_profile_path
,
'r'
)
profile
=
open
(
node_test_suite
.
custom_profile_path
,
'r'
)
if
my_test_type
==
'UnitTest'
:
if
my_test_type
==
'UnitTest'
:
expected_profile
=
"""
expected_profile
=
"""
\
[buildout]
[buildout]
extends = %(temp_dir)s/testnode/foo/rep0/software.cfg
extends = %(temp_dir)s/testnode/foo/rep0/software.cfg
...
@@ -258,7 +258,7 @@ shared = true
...
@@ -258,7 +258,7 @@ shared = true
else
:
else
:
revision1
=
"azerty"
revision1
=
"azerty"
revision2
=
"qwerty"
revision2
=
"qwerty"
expected_profile
=
"""
expected_profile
=
"""
\
[buildout]
[buildout]
extends = %(temp_dir)s/testnode/foo/rep0/software.cfg
extends = %(temp_dir)s/testnode/foo/rep0/software.cfg
...
@@ -802,17 +802,17 @@ shared = true
...
@@ -802,17 +802,17 @@ shared = true
test_node
.
node_test_suite_dict
test_node
.
node_test_suite_dict
rand_part_set
=
set
()
rand_part_set
=
set
()
self
.
assertEquals
(
2
,
len
(
test_node
.
node_test_suite_dict
))
self
.
assertEquals
(
2
,
len
(
test_node
.
node_test_suite_dict
))
assert
(
test_node
.
suite_log
is
not
None
)
self
.
assertIsNot
(
test_node
.
suite_log
,
None
)
assert
(
isinstance
(
test_node
.
suite_log
,
types
.
MethodType
))
self
.
assertTrue
(
isinstance
(
test_node
.
suite_log
,
types
.
MethodType
))
for
ref
,
suite
in
test_node
.
node_test_suite_dict
.
items
():
for
ref
,
suite
in
test_node
.
node_test_suite_dict
.
items
():
self
.
assertTrue
(
'var/log/testnode/%s'
%
suite
.
reference
in
\
self
.
assertTrue
(
'var/log/testnode/%s'
%
suite
.
reference
in
\
suite
.
suite_log_path
,
suite
.
suite_log_path
,
"Incorrect suite log path : %r"
%
suite
.
suite_log_path
)
"Incorrect suite log path : %r"
%
suite
.
suite_log_path
)
assert
(
suite
.
suite_log_path
.
endswith
(
'suite.log'
))
self
.
assertTrue
(
suite
.
suite_log_path
.
endswith
(
'suite.log'
))
m
=
re
.
match
(
'.*
\
-(.*)
\
/suite.log'
,
suite
.
suite_log_path
)
m
=
re
.
match
(
'.*
\
-(.*)
\
/suite.log'
,
suite
.
suite_log_path
)
rand_part
=
m
.
groups
()[
0
]
rand_part
=
m
.
groups
()[
0
]
assert
(
len
(
rand_part
)
==
32
)
self
.
assertEqual
(
len
(
rand_part
),
32
)
assert
(
rand_part
not
in
rand_part_set
)
self
.
assertNotIn
(
rand_part
,
rand_part_set
)
rand_part_set
.
add
(
rand_part
)
rand_part_set
.
add
(
rand_part
)
suite_log
=
open
(
suite
.
suite_log_path
,
'r'
)
suite_log
=
open
(
suite
.
suite_log_path
,
'r'
)
self
.
assertEquals
(
1
,
len
([
x
for
x
in
suite_log
.
readlines
()
\
self
.
assertEquals
(
1
,
len
([
x
for
x
in
suite_log
.
readlines
()
\
...
...
erp5/util/testnode/NodeTestSuite.py
View file @
dbc82bf0
...
@@ -24,23 +24,13 @@
...
@@ -24,23 +24,13 @@
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
##############################################################################
##############################################################################
from
datetime
import
datetime
,
timedelta
import
errno
import
os
import
os
import
subprocess
import
sys
import
time
import
glob
import
glob
import
SlapOSControler
import
json
import
time
import
shutil
import
shutil
import
logging
import
string
import
string
import
random
import
random
from
ProcessManager
import
SubprocessError
,
ProcessManager
,
CancellationError
from
.Utils
import
createFolder
from
subprocess
import
CalledProcessError
from
Updater
import
Updater
from
erp5.util
import
taskdistribution
class
SlapOSInstance
(
object
):
class
SlapOSInstance
(
object
):
"""
"""
...
@@ -57,7 +47,7 @@ class SlapOSInstance(object):
...
@@ -57,7 +47,7 @@ class SlapOSInstance(object):
def
_checkData
(
self
):
def
_checkData
(
self
):
pass
pass
class
NodeTestSuite
(
SlapOSInstance
):
class
NodeTestSuite
(
SlapOSInstance
):
"""
"""
...
@@ -67,22 +57,19 @@ class NodeTestSuite(SlapOSInstance):
...
@@ -67,22 +57,19 @@ class NodeTestSuite(SlapOSInstance):
self
.
reference
=
reference
self
.
reference
=
reference
self
.
cluster_configuration
=
{}
self
.
cluster_configuration
=
{}
def
edit
(
self
,
**
kw
):
super
(
NodeTestSuite
,
self
).
edit
(
**
kw
)
def
_checkData
(
self
):
def
_checkData
(
self
):
if
getattr
(
self
,
"working_directory"
,
None
)
is
not
None
:
if
getattr
(
self
,
"working_directory"
,
None
)
is
not
None
:
if
not
(
self
.
working_directory
.
endswith
(
os
.
path
.
sep
+
self
.
reference
)):
if
not
(
self
.
working_directory
.
endswith
(
os
.
path
.
sep
+
self
.
reference
)):
self
.
working_directory
=
os
.
path
.
join
(
self
.
working_directory
,
self
.
working_directory
=
os
.
path
.
join
(
self
.
working_directory
,
self
.
reference
)
self
.
reference
)
SlapOSControler
.
createFolder
(
self
.
working_directory
)
createFolder
(
self
.
working_directory
)
self
.
test_suite_directory
=
os
.
path
.
join
(
self
.
test_suite_directory
=
os
.
path
.
join
(
self
.
working_directory
,
"test_suite"
)
self
.
working_directory
,
"test_suite"
)
self
.
custom_profile_path
=
os
.
path
.
join
(
self
.
working_directory
,
self
.
custom_profile_path
=
os
.
path
.
join
(
self
.
working_directory
,
'software.cfg'
)
'software.cfg'
)
if
getattr
(
self
,
"vcs_repository_list"
,
None
)
is
not
None
:
if
getattr
(
self
,
"vcs_repository_list"
,
None
)
is
not
None
:
for
vcs_repository
in
self
.
vcs_repository_list
:
for
vcs_repository
in
self
.
vcs_repository_list
:
buildout_section_id
=
vcs_repository
.
get
(
'buildout_section_id'
,
None
)
buildout_section_id
=
vcs_repository
.
get
(
'buildout_section_id'
)
repository_id
=
buildout_section_id
or
\
repository_id
=
buildout_section_id
or
\
vcs_repository
.
get
(
'url'
).
split
(
'/'
)[
-
1
].
split
(
'.'
)[
0
]
vcs_repository
.
get
(
'url'
).
split
(
'/'
)[
-
1
].
split
(
'.'
)[
0
]
repository_path
=
os
.
path
.
join
(
self
.
working_directory
,
repository_id
)
repository_path
=
os
.
path
.
join
(
self
.
working_directory
,
repository_id
)
...
@@ -92,20 +79,22 @@ class NodeTestSuite(SlapOSInstance):
...
@@ -92,20 +79,22 @@ class NodeTestSuite(SlapOSInstance):
def
createSuiteLog
(
self
):
def
createSuiteLog
(
self
):
# /srv/slapgrid/slappartXX/srv/var/log/testnode/az-mlksjfmlk234Sljssdflkj23KSdfslj/suite.log
# /srv/slapgrid/slappartXX/srv/var/log/testnode/az-mlksjfmlk234Sljssdflkj23KSdfslj/suite.log
alphabets
=
string
.
digits
+
string
.
letters
alphabets
=
string
.
digits
+
string
.
letters
rand_part
=
''
.
join
(
random
.
choice
(
alphabets
)
for
i
in
xrange
(
32
))
while
1
:
random_suite_folder_id
=
'%s-%s'
%
(
self
.
reference
,
rand_part
)
log_folder_name
=
'%s-%s'
%
(
self
.
reference
,
suite_log_directory
=
os
.
path
.
join
(
self
.
log_directory
,
''
.
join
(
random
.
choice
(
alphabets
)
for
i
in
xrange
(
32
)))
random_suite_folder_id
)
log_folder_path
=
os
.
path
.
join
(
self
.
log_directory
,
log_folder_name
)
SlapOSControler
.
createFolders
(
suite_log_directory
)
try
:
os
.
makedirs
(
log_folder_path
)
except
OSError
,
e
:
if
e
.
errno
!=
errno
.
EEXIST
:
raise
else
:
break
# XXX copy the whole content of the log viewer app
# XXX copy the whole content of the log viewer app
for
fname
in
glob
.
glob
(
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'js-logtail'
,
'*'
)):
for
fname
in
glob
.
glob
(
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'js-logtail'
,
'*'
)):
shutil
.
copy
(
fname
,
suite_log_directory
)
shutil
.
copy
(
fname
,
log_folder_path
)
self
.
suite_log_path
=
os
.
path
.
join
(
suite_log_directory
,
self
.
suite_log_path
=
os
.
path
.
join
(
log_folder_path
,
'suite.log'
)
'suite.log'
)
return
self
.
suite_log_path
,
log_folder_name
return
self
.
getSuiteLogPath
(),
random_suite_folder_id
def
getSuiteLogPath
(
self
):
return
getattr
(
self
,
"suite_log_path"
,
None
)
@
property
@
property
def
revision
(
self
):
def
revision
(
self
):
...
...
erp5/util/testnode/ProcessManager.py
View file @
dbc82bf0
...
@@ -131,14 +131,14 @@ class ProcessManager(object):
...
@@ -131,14 +131,14 @@ class ProcessManager(object):
stdin
=
file
(
os
.
devnull
)
stdin
=
file
(
os
.
devnull
)
def
__init__
(
self
,
log
,
*
args
,
**
kw
):
def
__init__
(
self
,
log
,
max_timeout
=
MAX_TIMEOUT
):
self
.
log
=
log
self
.
log
=
log
self
.
process_pid_set
=
set
()
self
.
process_pid_set
=
set
()
signal
.
signal
(
signal
.
SIGTERM
,
self
.
sigterm_handler
)
signal
.
signal
(
signal
.
SIGTERM
,
self
.
sigterm_handler
)
self
.
under_cancellation
=
False
self
.
under_cancellation
=
False
self
.
p
=
None
self
.
p
=
None
self
.
result
=
None
self
.
result
=
None
self
.
max_timeout
=
kw
.
get
(
"max_timeout"
)
or
MAX_TIMEOUT
self
.
max_timeout
=
max_timeout
self
.
timer_set
=
set
()
self
.
timer_set
=
set
()
def
spawn
(
self
,
*
args
,
**
kw
):
def
spawn
(
self
,
*
args
,
**
kw
):
...
@@ -187,7 +187,7 @@ class ProcessManager(object):
...
@@ -187,7 +187,7 @@ class ProcessManager(object):
return
result
return
result
def
getSupportedParameterList
(
self
,
program_path
):
def
getSupportedParameterList
(
self
,
program_path
):
return
re
.
findall
(
'^ (--
\
w+)
'
,
return
re
.
findall
(
r
'^ (--\
w+)
',
self.spawn(program_path, '
--
help
')['
stdout
'], re.M)
self.spawn(program_path, '
--
help
')['
stdout
'], re.M)
def killall(self, name):
def killall(self, name):
...
@@ -211,13 +211,15 @@ class ProcessManager(object):
...
@@ -211,13 +211,15 @@ class ProcessManager(object):
continue
continue
except (psutil.AccessDenied, psutil.NoSuchProcess):
except (psutil.AccessDenied, psutil.NoSuchProcess):
continue
continue
self.log('ProcesssManager, killall on %s having pid %s' % (name, process.pid))
self.log('ProcesssManager, killall on %s having pid %s',
name, process.pid)
to_kill_list.append(process.pid)
to_kill_list.append(process.pid)
for pid in to_kill_list:
for pid in to_kill_list:
killCommand(pid, self.log)
killCommand(pid, self.log)
def killPreviousRun(self, cancellation=False):
def killPreviousRun(self, cancellation=False):
self.log('ProcessManager killPreviousRun, going to kill %r' % (self.process_pid_set,))
self.log('ProcessManager killPreviousRun, going to kill %r',
self.process_pid_set)
if cancellation:
if cancellation:
self.under_cancellation = True
self.under_cancellation = True
for timer in self.timer_set:
for timer in self.timer_set:
...
@@ -226,12 +228,13 @@ class ProcessManager(object):
...
@@ -226,12 +228,13 @@ class ProcessManager(object):
killCommand(pgpid, self.log)
killCommand(pgpid, self.log)
try:
try:
if os.path.exists(self.supervisord_pid_file):
if os.path.exists(self.supervisord_pid_file):
supervisor_pid = int(open(self.supervisord_pid_file).read().strip())
with open(self.supervisord_pid_file) as f:
self.log('ProcessManager killPreviousRun, going to kill supervisor with pid %r' % supervisor_pid)
supervisor_pid = int(f.read().strip())
self.log('ProcessManager killPreviousRun, going to kill supervisor with pid %r',
supervisor_pid)
os.kill(supervisor_pid, signal.SIGTERM)
os.kill(supervisor_pid, signal.SIGTERM)
except:
except
Exception
:
self.log('ProcessManager killPreviousRun, exception when killing supervisor')
self.log('ProcessManager killPreviousRun, exception when killing supervisor')
pass
self.process_pid_set.clear()
self.process_pid_set.clear()
def sigterm_handler(self, signal, frame):
def sigterm_handler(self, signal, frame):
...
...
erp5/util/testnode/SlapOSControler.py
View file @
dbc82bf0
...
@@ -30,40 +30,14 @@ import slapos.slap
...
@@ -30,40 +30,14 @@ import slapos.slap
import
subprocess
import
subprocess
import
time
import
time
import
xml_marshaller
import
xml_marshaller
import
shutil
import
sys
import
sys
import
glob
import
argparse
import
argparse
import
json
from
slapos
import
client
from
slapos
import
client
from
.Utils
import
createFolder
MAX_PARTIONS
=
10
MAX_PARTI
TI
ONS
=
10
MAX_SR_RETRIES
=
3
MAX_SR_RETRIES
=
3
def
createFolder
(
folder
,
clean
=
False
):
if
clean
and
os
.
path
.
exists
(
folder
):
shutil
.
rmtree
(
folder
)
if
not
(
os
.
path
.
exists
(
folder
)):
os
.
mkdir
(
folder
)
def
createFolders
(
folder
):
if
not
(
os
.
path
.
exists
(
folder
)):
os
.
makedirs
(
folder
)
def
isDir
(
folder
):
return
os
.
path
.
isdir
(
folder
)
def
createFile
(
path
,
mode
,
content
):
f
=
open
(
path
,
mode
)
if
os
.
path
.
exists
(
path
):
f
.
write
(
content
)
f
.
close
()
else
:
# error
pass
class
SlapOSControler
(
object
):
class
SlapOSControler
(
object
):
def
__init__
(
self
,
working_directory
,
config
,
log
):
def
__init__
(
self
,
working_directory
,
config
,
log
):
...
@@ -91,9 +65,12 @@ class SlapOSControler(object):
...
@@ -91,9 +65,12 @@ class SlapOSControler(object):
slapos_url
,
slapos_url
,
slapos_account_certificate_path
,
slapos_account_certificate_path
,
slapos_account_key_path
)
slapos_account_key_path
)
createFile
(
slapos_account_key_path
,
"w"
,
key
)
with
open
(
slapos_account_key_path
,
"w"
)
as
f
:
createFile
(
slapos_account_certificate_path
,
"w"
,
certificate
)
f
.
write
(
key
)
createFile
(
configuration_file_path
,
"w"
,
configuration_file_value
)
with
open
(
slapos_account_certificate_path
,
"w"
)
as
f
:
f
.
write
(
certificate
)
with
open
(
configuration_file_path
,
"w"
)
as
f
:
f
.
write
(
configuration_file_value
)
self
.
configuration_file_path
=
configuration_file_path
self
.
configuration_file_path
=
configuration_file_path
return
slapos_account_key_path
,
slapos_account_certificate_path
,
configuration_file_path
return
slapos_account_key_path
,
slapos_account_certificate_path
,
configuration_file_path
...
@@ -115,26 +92,17 @@ class SlapOSControler(object):
...
@@ -115,26 +92,17 @@ class SlapOSControler(object):
try
:
try
:
local
=
client
.
init
(
config
)
local
=
client
.
init
(
config
)
local
[
'supply'
](
software_url
,
computer_guid
=
computer_id
,
state
=
state
)
local
[
'supply'
](
software_url
,
computer_guid
=
computer_id
,
state
=
state
)
self
.
log
(
'SlapOSControler : supply %s %s %s'
%
(
software_url
,
computer_id
,
state
))
self
.
log
(
'SlapOSControler : supply %s %s %s'
,
software_url
,
computer_id
,
state
)
except
:
except
Exception
:
self
.
log
(
"SlapOSControler.supply,
\
self
.
log
(
"SlapOSControler.supply"
,
exc_info
=
sys
.
exc_info
())
exception in registerOpenOrder"
,
exc_info
=
sys
.
exc_info
())
raise
ValueError
(
"Unable to supply (or remove)"
)
raise
ValueError
(
"Unable to supply (or remove)"
)
else
:
else
:
raise
ValueError
(
"Configuration file not found."
)
raise
ValueError
(
"Configuration file not found."
)
def
destroy
(
self
,
software_url
,
computer_id
):
"""
Request Deletetion of a software release on a specific node
Ex :
my_controler.destroy('kvm.cfg', 'COMP-726')
"""
self
.
supply
(
self
,
software_url
,
computer_id
,
state
=
"destroyed"
)
def
getInstanceRequestedState
(
self
,
reference
):
def
getInstanceRequestedState
(
self
,
reference
):
try
:
try
:
return
self
.
instance_config
[
reference
][
'requested_state'
]
return
self
.
instance_config
[
reference
][
'requested_state'
]
except
:
except
Exception
:
raise
ValueError
(
"Instance '%s' not exist"
%
self
.
instance_config
[
reference
])
raise
ValueError
(
"Instance '%s' not exist"
%
self
.
instance_config
[
reference
])
def
request
(
self
,
reference
,
software_url
,
software_type
=
None
,
def
request
(
self
,
reference
,
software_url
,
software_type
=
None
,
...
@@ -182,11 +150,11 @@ class SlapOSControler(object):
...
@@ -182,11 +150,11 @@ class SlapOSControler(object):
self
.
instance_config
[
reference
][
'partition'
]
=
partition
self
.
instance_config
[
reference
][
'partition'
]
=
partition
if
state
==
'destroyed'
:
if
state
==
'destroyed'
:
del
self
.
instance_config
[
reference
]
del
self
.
instance_config
[
reference
]
if
state
==
'started'
:
el
if
state
==
'started'
:
self
.
log
(
'Instance started with configuration: %s'
%
str
(
software_configuration
))
self
.
log
(
'Instance started with configuration: %s'
,
except
:
software_configuration
)
self
.
log
(
"SlapOSControler.request,
\
except
Exception
:
exception in registerOpenOrder
"
,
exc_info
=
sys
.
exc_info
())
self
.
log
(
"SlapOSControler.request
"
,
exc_info
=
sys
.
exc_info
())
raise
ValueError
(
"Unable to do this request"
)
raise
ValueError
(
"Unable to do this request"
)
else
:
else
:
raise
ValueError
(
"Configuration file not found."
)
raise
ValueError
(
"Configuration file not found."
)
...
@@ -204,31 +172,30 @@ class SlapOSControler(object):
...
@@ -204,31 +172,30 @@ class SlapOSControler(object):
self
.
log
(
'SlapOSControler : delete instance'
)
self
.
log
(
'SlapOSControler : delete instance'
)
try
:
try
:
self
.
_requestSpecificState
(
reference
,
'destroyed'
)
self
.
_requestSpecificState
(
reference
,
'destroyed'
)
except
:
except
Exception
:
raise
ValueError
(
"Can't delete instance
'%s' (instance may not been created?)"
%
reference
)
raise
ValueError
(
"Can't delete instance
%r (instance not created?)"
%
reference
)
def
stopInstance
(
self
,
reference
):
def
stopInstance
(
self
,
reference
):
self
.
log
(
'SlapOSControler : stop instance'
)
self
.
log
(
'SlapOSControler : stop instance'
)
try
:
try
:
self
.
_requestSpecificState
(
reference
,
'stopped'
)
self
.
_requestSpecificState
(
reference
,
'stopped'
)
except
:
except
Exception
:
raise
ValueError
(
"Can't stop instance
'%s' (instance may not been created?)"
%
reference
)
raise
ValueError
(
"Can't stop instance
%r (instance not created?)"
%
reference
)
def
startInstance
(
self
,
reference
):
def
startInstance
(
self
,
reference
):
self
.
log
(
'SlapOSControler : start instance'
)
self
.
log
(
'SlapOSControler : start instance'
)
try
:
try
:
self
.
_requestSpecificState
(
reference
,
'started'
)
self
.
_requestSpecificState
(
reference
,
'started'
)
except
:
except
Exception
:
raise
ValueError
(
"Can't start instance
'%s' (instance may not been created?)"
%
reference
)
raise
ValueError
(
"Can't start instance
%r (instance not created?)"
%
reference
)
def
updateInstanceXML
(
self
,
reference
,
software_configuration
):
def
updateInstanceXML
(
self
,
reference
,
software_configuration
):
"""
"""
Update the XML configuration of an instance
Update the XML configuration of an instance
# Request same instance with different parameters.
# Request same instance with different parameters.
"""
"""
self
.
log
(
'SlapOSControler : updateInstanceXML'
)
self
.
log
(
'SlapOSControler : updateInstanceXML will request same'
self
.
log
(
'SlapOSControler : updateInstanceXML will request same'
'instance with new XML configuration...'
)
'
instance with new XML configuration...'
)
try
:
try
:
self
.
request
(
reference
,
self
.
request
(
reference
,
...
@@ -238,32 +205,28 @@ class SlapOSControler(object):
...
@@ -238,32 +205,28 @@ class SlapOSControler(object):
self
.
instance_config
[
reference
][
'computer_guid'
],
self
.
instance_config
[
reference
][
'computer_guid'
],
state
=
'started'
state
=
'started'
)
)
except
:
except
Exception
:
raise
ValueError
(
"Can't update instance '%s' (may not exist?)"
%
reference
)
raise
ValueError
(
"Can't update instance '%s' (may not exist?)"
%
reference
)
def
_resetSoftware
(
self
):
def
_resetSoftware
(
self
):
self
.
log
(
'SlapOSControler : GOING TO RESET ALL SOFTWARE : %r'
%
self
.
log
(
'SlapOSControler : GOING TO RESET ALL SOFTWARE : %r'
,
(
self
.
software_root
,))
self
.
software_root
)
if
os
.
path
.
exists
(
self
.
software_root
):
createFolder
(
self
.
software_root
,
True
)
shutil
.
rmtree
(
self
.
software_root
)
os
.
mkdir
(
self
.
software_root
)
os
.
chmod
(
self
.
software_root
,
0750
)
def
initializeSlapOSControler
(
self
,
slapproxy_log
=
None
,
process_manager
=
None
,
def
initializeSlapOSControler
(
self
,
slapproxy_log
=
None
,
process_manager
=
None
,
reset_software
=
False
,
software_path_list
=
None
):
reset_software
=
False
,
software_path_list
=
None
):
self
.
process_manager
=
process_manager
self
.
process_manager
=
process_manager
self
.
software_path_list
=
software_path_list
self
.
software_path_list
=
software_path_list
self
.
log
(
'SlapOSControler, initialize, reset_software: %r'
%
reset_software
)
self
.
log
(
'SlapOSControler, initialize, reset_software: %r'
,
reset_software
)
config
=
self
.
config
config
=
self
.
config
slapos_config_dict
=
self
.
config
.
copy
()
slapos_config_dict
=
self
.
config
.
copy
()
slapos_config_dict
.
update
(
software_root
=
self
.
software_root
,
slapos_config_dict
.
update
(
software_root
=
self
.
software_root
,
instance_root
=
self
.
instance_root
,
instance_root
=
self
.
instance_root
,
proxy_database
=
self
.
proxy_database
)
proxy_database
=
self
.
proxy_database
)
open
(
self
.
slapos_config
,
'w'
).
write
(
pkg_resources
.
resource_string
(
with
open
(
self
.
slapos_config
,
'w'
)
as
f
:
f
.
write
(
pkg_resources
.
resource_string
(
'erp5.util.testnode'
,
'template/slapos.cfg.in'
)
%
'erp5.util.testnode'
,
'template/slapos.cfg.in'
)
%
slapos_config_dict
)
slapos_config_dict
)
createFolder
(
self
.
software_root
)
createFolder
(
self
.
instance_root
)
# By erasing everything, we make sure that we are able to "update"
# By erasing everything, we make sure that we are able to "update"
# existing profiles. This is quite dirty way to do updates...
# existing profiles. This is quite dirty way to do updates...
if
os
.
path
.
exists
(
self
.
proxy_database
):
if
os
.
path
.
exists
(
self
.
proxy_database
):
...
@@ -282,32 +245,29 @@ class SlapOSControler(object):
...
@@ -282,32 +245,29 @@ class SlapOSControler(object):
# connections
# connections
time
.
sleep
(
20
)
time
.
sleep
(
20
)
try
:
try
:
slap
=
slapos
.
slap
.
slap
()
slap
=
self
.
slap
=
slapos
.
slap
.
slap
()
self
.
slap
=
slap
slap
.
initializeConnection
(
config
[
'master_url'
])
self
.
slap
.
initializeConnection
(
config
[
'master_url'
])
# register software profile
# register software profile
for
path
in
self
.
software_path_list
:
for
path
in
self
.
software_path_list
:
slap
.
registerSupply
().
supply
(
slap
.
registerSupply
().
supply
(
path
,
path
,
computer_guid
=
config
[
'computer_id'
])
computer_guid
=
config
[
'computer_id'
])
computer
=
slap
.
registerComputer
(
config
[
'computer_id'
])
computer
=
slap
.
registerComputer
(
config
[
'computer_id'
])
except
:
except
Exception
:
self
.
log
(
"SlapOSControler.initializeSlapOSControler
,
\
self
.
log
(
"SlapOSControler.initializeSlapOSControler
"
,
exc
eption in registerSupply"
,
exc
_info
=
sys
.
exc_info
())
exc_info
=
sys
.
exc_info
())
raise
ValueError
(
"Unable to registerSupply"
)
raise
ValueError
(
"Unable to registerSupply"
)
# Reset all previously generated software if needed
# Reset all previously generated software if needed
if
reset_software
:
if
reset_software
:
self
.
_resetSoftware
()
self
.
_resetSoftware
()
else
:
createFolder
(
self
.
software_root
)
instance_root
=
self
.
instance_root
instance_root
=
self
.
instance_root
if
os
.
path
.
exists
(
instance_root
):
# Delete any existing partition in order to not get its data (ex.
# delete old paritions which may exists in order to not get its data
# MySQL DB content) from previous runs. To support changes of partition
# (ex. MySQL db content) from previous testnode's runs
# naming scheme (which already happened), do this at instance_root level.
# In order to be able to change partition naming scheme, do this at
createFolder
(
instance_root
,
True
)
# instance_root level (such change happened already, causing problems).
for
i
in
xrange
(
MAX_PARTITIONS
):
shutil
.
rmtree
(
instance_root
)
if
not
(
os
.
path
.
exists
(
instance_root
)):
os
.
mkdir
(
instance_root
)
for
i
in
range
(
0
,
MAX_PARTIONS
):
# create partition and configure computer
# create partition and configure computer
# XXX: at the moment all partitions do share same virtual interface address
# XXX: at the moment all partitions do share same virtual interface address
# this is not a problem as usually all services are on different ports
# this is not a problem as usually all services are on different ports
...
@@ -336,17 +296,17 @@ class SlapOSControler(object):
...
@@ -336,17 +296,17 @@ class SlapOSControler(object):
def
runSoftwareRelease
(
self
,
config
,
environment
,
**
kw
):
def
runSoftwareRelease
(
self
,
config
,
environment
,
**
kw
):
self
.
log
(
"SlapOSControler.runSoftwareRelease"
)
self
.
log
(
"SlapOSControler.runSoftwareRelease"
)
cpu_count
=
os
.
sysconf
(
"SC_NPROCESSORS_ONLN"
)
cpu_count
=
str
(
os
.
sysconf
(
"SC_NPROCESSORS_ONLN"
)
)
os
.
putenv
(
'MAKEFLAGS'
,
'-j%s'
%
cpu_count
)
os
.
environ
[
'MAKEFLAGS'
]
=
'-j'
+
cpu_count
os
.
putenv
(
'NPY_NUM_BUILD_JOBS'
,
'%s'
%
cpu_count
)
os
.
environ
[
'NPY_NUM_BUILD_JOBS'
]
=
cpu_count
os
.
putenv
(
'BUNDLE_JOBS'
,
'%s'
%
cpu_count
)
os
.
environ
[
'BUNDLE_JOBS'
]
=
cpu_count
os
.
environ
[
'PATH'
]
=
environment
[
'PATH'
]
os
.
environ
[
'PATH'
]
=
environment
[
'PATH'
]
# a SR may fail for number of reasons (incl. network failures)
# a SR may fail for number of reasons (incl. network failures)
# so be tolerant and run it a few times before giving up
# so be tolerant and run it a few times before giving up
for
runs
in
range
(
0
,
MAX_SR_RETRIES
):
for
_
in
xrange
(
MAX_SR_RETRIES
):
status_dict
=
self
.
spawn
(
config
[
'slapos_binary'
],
status_dict
=
self
.
spawn
(
config
[
'slapos_binary'
],
'node'
,
'software'
,
'--all'
,
'node'
,
'software'
,
'--all'
,
'--pidfile'
,
'%s/software.pid'
%
self
.
software_root
,
'--pidfile'
,
os
.
path
.
join
(
self
.
software_root
,
'slapos-node.pid'
)
,
'--cfg'
,
self
.
slapos_config
,
raise_error_if_fail
=
False
,
'--cfg'
,
self
.
slapos_config
,
raise_error_if_fail
=
False
,
log_prefix
=
'slapgrid_sr'
,
get_output
=
False
)
log_prefix
=
'slapgrid_sr'
,
get_output
=
False
)
if
status_dict
[
'status_code'
]
==
0
:
if
status_dict
[
'status_code'
]
==
0
:
...
@@ -355,32 +315,32 @@ class SlapOSControler(object):
...
@@ -355,32 +315,32 @@ class SlapOSControler(object):
def
runComputerPartition
(
self
,
config
,
environment
,
def
runComputerPartition
(
self
,
config
,
environment
,
stdout
=
None
,
stderr
=
None
,
cluster_configuration
=
None
,
**
kw
):
stdout
=
None
,
stderr
=
None
,
cluster_configuration
=
None
,
**
kw
):
self
.
log
(
"SlapOSControler.runComputerPartition with cluster_config: %r"
%
(
cluster_configuration
,))
self
.
log
(
"SlapOSControler.runComputerPartition with cluster_config: %r"
,
cluster_configuration
)
for
path
in
self
.
software_path_list
:
for
path
in
self
.
software_path_list
:
try
:
try
:
self
.
slap
.
registerOpenOrder
().
request
(
path
,
self
.
slap
.
registerOpenOrder
().
request
(
path
,
partition_reference
=
'testing partition %s'
%
\
partition_reference
=
'testing partition %s'
%
\
self
.
software_path_list
.
index
(
path
),
self
.
software_path_list
.
index
(
path
),
partition_parameter_kw
=
cluster_configuration
)
partition_parameter_kw
=
cluster_configuration
)
except
:
except
Exception
:
self
.
log
(
"SlapOSControler.runComputerPartition,
\
self
.
log
(
"SlapOSControler.runComputerPartition"
,
exc_info
=
sys
.
exc_info
())
exception in registerOpenOrder"
,
exc_info
=
sys
.
exc_info
())
raise
ValueError
(
"Unable to registerOpenOrder"
)
raise
ValueError
(
"Unable to registerOpenOrder"
)
# try to run for all partitions as one partition may in theory request another one
# try to run for all partitions as one partition may in theory request another one
# this not always is required but curently no way to know how "tree" of partitions
# this not always is required but curently no way to know how "tree" of partitions
# may "expand"
# may "expand"
sleep_time
=
0
for
_
in
xrange
(
MAX_PARTITIONS
):
for
runs
in
range
(
0
,
MAX_PARTIONS
):
status_dict
=
self
.
spawn
(
config
[
'slapos_binary'
],
'node'
,
'instance'
,
status_dict
=
self
.
spawn
(
config
[
'slapos_binary'
],
'node'
,
'instance'
,
'--pidfile'
,
'%s/instance.pid'
%
self
.
software_root
,
'--pidfile'
,
os
.
path
.
join
(
self
.
instance_root
,
'slapos-node.pid'
)
,
'--cfg'
,
self
.
slapos_config
,
raise_error_if_fail
=
False
,
'--cfg'
,
self
.
slapos_config
,
raise_error_if_fail
=
False
,
log_prefix
=
'slapgrid_cp'
,
get_output
=
False
)
log_prefix
=
'slapgrid_cp'
,
get_output
=
False
)
self
.
log
(
'slapgrid_cp status_dict : %r'
%
(
status_dict
,)
)
self
.
log
(
'slapgrid_cp status_dict : %r'
,
status_dict
)
if
status_dict
[
'status_code'
]
in
(
0
,)
:
if
not
status_dict
[
'status_code'
]
:
break
break
# some hack to handle promise issues (should be only one of the two
else
:
# codes, but depending on slapos versions, we have inconsistent status
# some hack to handle promise issues (should be only one of the two
if
status_dict
[
'status_code'
]
in
(
1
,
2
):
# codes, but depending on slapos versions, we have inconsistent status
status_dict
[
'status_code'
]
=
0
if
status_dict
[
'status_code'
]
in
(
1
,
2
):
status_dict
[
'status_code'
]
=
0
return
status_dict
return
status_dict
erp5/util/testnode/SlapOSMasterCommunicator.py
View file @
dbc82bf0
import
datetime
import
datetime
import
json
import
json
import
sys
import
traceback
import
traceback
import
time
import
time
#import feedparser
#import feedparser
from
functools
import
wraps
from
uritemplate
import
expand
from
uritemplate
import
expand
import
slapos.slap
import
slapos.slap
from
slapos.slap
import
SoftwareProductCollection
from
slapos.slap
import
SoftwareProductCollection
from
slapos.slap.slap
import
ConnectionError
from
requests.exceptions
import
HTTPError
from
requests.exceptions
import
HTTPError
from
erp5.util
.taskdistribution
import
SAFE_RPC_EXCEPTION_LIST
from
.
.taskdistribution
import
SAFE_RPC_EXCEPTION_LIST
# max time to instance changing state: 2 hour
# max time to instance changing state: 2 hour
MAX_INSTANCE_TIME
=
60
*
60
*
2
MAX_INSTANCE_TIME
=
60
*
60
*
2
...
@@ -41,36 +39,29 @@ TESTER_STATE_INSTANCE_UNINSTALLED = "TESTER_STATE_INSTANCE_UNINSTALLED"
...
@@ -41,36 +39,29 @@ TESTER_STATE_INSTANCE_UNINSTALLED = "TESTER_STATE_INSTANCE_UNINSTALLED"
# Simple decorator to prevent raise due small
# Simple decorator to prevent raise due small
# network failures.
# network failures.
def
retryOnNetworkFailure
(
func
):
def
retryOnNetworkFailure
(
func
,
def
wrapper
(
*
args
,
**
kwargs
):
_except_list
=
SAFE_RPC_EXCEPTION_LIST
+
(
HTTPError
,
slapos
.
slap
.
ConnectionError
),
):
def
wrapper
(
*
args
,
**
kw
):
retry_time
=
64
retry_time
=
64
while
True
:
while
True
:
try
:
try
:
return
func
(
*
args
,
**
kwargs
)
return
func
(
*
args
,
**
kw
)
except
SAFE_RPC_EXCEPTION_LIST
,
e
:
except
_except_list
:
print
'Network failure: %s , %s'
%
(
sys
.
exc_info
(),
e
)
traceback
.
print_exc
()
except
HTTPError
,
e
:
print
'Network failure: %s , %s'
%
(
sys
.
exc_info
(),
e
)
print
'Network failure. Retry method %s in %i seconds'
%
(
func
,
retry_time
)
except
ConnectionError
,
e
:
print
'Network failure: %s , %s'
%
(
sys
.
exc_info
(),
e
)
except
slapos
.
slap
.
ConnectionError
,
e
:
print
'Network failure: %s , %s'
%
(
sys
.
exc_info
(),
e
)
print
'Retry method %s in %i seconds'
%
(
func
,
retry_time
)
time
.
sleep
(
retry_time
)
time
.
sleep
(
retry_time
)
retry_time
=
min
(
retry_time
*
1.5
,
640
)
retry_time
=
min
(
retry_time
*
1.5
,
640
)
return
wraps
(
func
)(
wrapper
)
wrapper
.
__name__
=
func
.
__name__
wrapper
.
__doc__
=
func
.
__doc__
return
wrapper
class
SlapOSMasterCommunicator
(
object
):
class
SlapOSMasterCommunicator
(
object
):
latest_state
=
None
latest_state
=
None
def
__init__
(
self
,
slap
,
slap_supply
,
slap_order
,
url
,
logger
):
def
__init__
(
self
,
slap
,
slap_supply
,
slap_order
,
url
,
logger
):
self
.
_logger
=
logger
self
.
_logger
=
logger
self
.
slap
=
slap
self
.
slap
=
slap
self
.
slap_order
=
slap_order
self
.
slap_order
=
slap_order
...
@@ -102,8 +93,7 @@ class SlapOSMasterCommunicator(object):
...
@@ -102,8 +93,7 @@ class SlapOSMasterCommunicator(object):
if
instance_title
is
not
None
:
if
instance_title
is
not
None
:
self
.
name
=
instance_title
self
.
name
=
instance_title
if
request_kw
is
not
None
:
if
request_kw
is
not
None
:
if
isinstance
(
request_kw
,
str
)
or
\
if
isinstance
(
request_kw
,
basestring
):
isinstance
(
request_kw
,
unicode
):
self
.
request_kw
=
json
.
loads
(
request_kw
)
self
.
request_kw
=
json
.
loads
(
request_kw
)
else
:
else
:
self
.
request_kw
=
request_kw
self
.
request_kw
=
request_kw
...
@@ -116,12 +106,11 @@ class SlapOSMasterCommunicator(object):
...
@@ -116,12 +106,11 @@ class SlapOSMasterCommunicator(object):
**
self
.
request_kw
)
**
self
.
request_kw
)
def
isInstanceRequested
(
self
,
instance_title
):
def
isInstanceRequested
(
self
,
instance_title
):
hateoas
=
getattr
(
self
.
slap
,
'_hateoas_navigator'
,
None
)
hateoas
=
self
.
_hateoas_navigator
return
instance_title
in
hateoas
.
getHostingSubscriptionDict
()
return
instance_title
in
hateoas
.
getHostingSubscriptionDict
()
@
retryOnNetworkFailure
@
retryOnNetworkFailure
def
_hateoas_getComputer
(
self
,
reference
):
def
_hateoas_getComputer
(
self
,
reference
):
root_document
=
self
.
hateoas_navigator
.
getRootDocument
()
root_document
=
self
.
hateoas_navigator
.
getRootDocument
()
search_url
=
root_document
[
"_links"
][
'raw_search'
][
'href'
]
search_url
=
root_document
[
"_links"
][
'raw_search'
][
'href'
]
...
@@ -147,7 +136,6 @@ class SlapOSMasterCommunicator(object):
...
@@ -147,7 +136,6 @@ class SlapOSMasterCommunicator(object):
@
retryOnNetworkFailure
@
retryOnNetworkFailure
def
getSoftwareInstallationList
(
self
):
def
getSoftwareInstallationList
(
self
):
# XXX Move me to slap.py API
# XXX Move me to slap.py API
computer
=
self
.
_hateoas_getComputer
(
self
.
computer_guid
)
computer
=
self
.
_hateoas_getComputer
(
self
.
computer_guid
)
# Not a list ?
# Not a list ?
...
@@ -191,7 +179,6 @@ class SlapOSMasterCommunicator(object):
...
@@ -191,7 +179,6 @@ class SlapOSMasterCommunicator(object):
@
retryOnNetworkFailure
@
retryOnNetworkFailure
def
getInstanceUrlList
(
self
):
def
getInstanceUrlList
(
self
):
if
self
.
hosting_subscription_url
is
None
:
if
self
.
hosting_subscription_url
is
None
:
hosting_subscription_dict
=
self
.
hateoas_navigator
.
_hateoas_getHostingSubscriptionDict
()
hosting_subscription_dict
=
self
.
hateoas_navigator
.
_hateoas_getHostingSubscriptionDict
()
for
hs
in
hosting_subscription_dict
:
for
hs
in
hosting_subscription_dict
:
...
@@ -207,7 +194,6 @@ class SlapOSMasterCommunicator(object):
...
@@ -207,7 +194,6 @@ class SlapOSMasterCommunicator(object):
@
retryOnNetworkFailure
@
retryOnNetworkFailure
def
getNewsFromInstance
(
self
,
url
):
def
getNewsFromInstance
(
self
,
url
):
result
=
self
.
hateoas_navigator
.
GET
(
url
)
result
=
self
.
hateoas_navigator
.
GET
(
url
)
result
=
json
.
loads
(
result
)
result
=
json
.
loads
(
result
)
if
result
[
'_links'
].
get
(
'action_object_slap'
,
None
)
is
None
:
if
result
[
'_links'
].
get
(
'action_object_slap'
,
None
)
is
None
:
...
@@ -221,7 +207,6 @@ class SlapOSMasterCommunicator(object):
...
@@ -221,7 +207,6 @@ class SlapOSMasterCommunicator(object):
@
retryOnNetworkFailure
@
retryOnNetworkFailure
def
getInformationFromInstance
(
self
,
url
):
def
getInformationFromInstance
(
self
,
url
):
result
=
self
.
hateoas_navigator
.
GET
(
url
)
result
=
self
.
hateoas_navigator
.
GET
(
url
)
result
=
json
.
loads
(
result
)
result
=
json
.
loads
(
result
)
if
result
[
'_links'
].
get
(
'action_object_slap'
,
None
)
is
None
:
if
result
[
'_links'
].
get
(
'action_object_slap'
,
None
)
is
None
:
...
@@ -329,7 +314,7 @@ class SlapOSMasterCommunicator(object):
...
@@ -329,7 +314,7 @@ class SlapOSMasterCommunicator(object):
self
.
_logger
(
'Got an error requesting partition for '
self
.
_logger
(
'Got an error requesting partition for '
'its state'
)
'its state'
)
return
INSTANCE_STATE_UNKNOWN
return
INSTANCE_STATE_UNKNOWN
except
:
except
Exception
:
self
.
_logger
(
"ERROR getting instance state"
)
self
.
_logger
(
"ERROR getting instance state"
)
return
INSTANCE_STATE_UNKNOWN
return
INSTANCE_STATE_UNKNOWN
...
@@ -377,6 +362,7 @@ class SlapOSMasterCommunicator(object):
...
@@ -377,6 +362,7 @@ class SlapOSMasterCommunicator(object):
return
{
'error_message'
:
None
}
return
{
'error_message'
:
None
}
class
SlapOSTester
(
SlapOSMasterCommunicator
):
class
SlapOSTester
(
SlapOSMasterCommunicator
):
def
__init__
(
self
,
def
__init__
(
self
,
name
,
name
,
logger
,
logger
,
...
@@ -387,7 +373,6 @@ class SlapOSTester(SlapOSMasterCommunicator):
...
@@ -387,7 +373,6 @@ class SlapOSTester(SlapOSMasterCommunicator):
computer_guid
=
None
,
# computer for supply if desired
computer_guid
=
None
,
# computer for supply if desired
request_kw
=
None
request_kw
=
None
):
):
super
(
SlapOSTester
,
self
).
__init__
(
super
(
SlapOSTester
,
self
).
__init__
(
slap
,
slap_supply
,
slap_order
,
url
,
logger
)
slap
,
slap_supply
,
slap_order
,
url
,
logger
)
...
@@ -464,7 +449,6 @@ class SoftwareReleaseTester(SlapOSTester):
...
@@ -464,7 +449,6 @@ class SoftwareReleaseTester(SlapOSTester):
software_timeout
=
3600
,
software_timeout
=
3600
,
instance_timeout
=
3600
,
instance_timeout
=
3600
,
):
):
super
(
SoftwareReleaseTester
,
self
).
__init__
(
super
(
SoftwareReleaseTester
,
self
).
__init__
(
name
,
logger
,
slap
,
slap_order
,
slap_supply
,
url
,
computer_guid
,
request_kw
)
name
,
logger
,
slap
,
slap_order
,
slap_supply
,
url
,
computer_guid
,
request_kw
)
...
...
erp5/util/testnode/UnitTestRunner.py
View file @
dbc82bf0
...
@@ -24,28 +24,22 @@
...
@@ -24,28 +24,22 @@
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
##############################################################################
##############################################################################
from
datetime
import
datetime
,
timedelta
import
os
import
os
import
subprocess
import
sys
import
time
import
glob
import
glob
import
SlapOSControler
import
json
import
json
import
time
from
.ProcessManager
import
SubprocessError
import
shutil
from
.SlapOSControler
import
SlapOSControler
import
logging
from
.Utils
import
createFolder
import
string
import
random
from
ProcessManager
import
SubprocessError
,
ProcessManager
,
CancellationError
from
subprocess
import
CalledProcessError
from
NodeTestSuite
import
SlapOSInstance
from
Updater
import
Updater
from
Utils
import
dealShebang
from
erp5.util
import
taskdistribution
from
slapos.grid.utils
import
md5digest
from
slapos.grid.utils
import
md5digest
class
UnitTestRunner
():
def
dealShebang
(
run_test_suite_path
):
with
open
(
run_test_suite_path
)
as
f
:
if
f
.
read
(
2
)
==
'#!'
:
return
f
.
readline
().
split
(
None
,
1
)
return
[]
class
UnitTestRunner
(
object
):
def
__init__
(
self
,
testnode
):
def
__init__
(
self
,
testnode
):
self
.
testnode
=
testnode
self
.
testnode
=
testnode
...
@@ -53,12 +47,11 @@ class UnitTestRunner():
...
@@ -53,12 +47,11 @@ class UnitTestRunner():
"""
"""
Create a SlapOSControler
Create a SlapOSControler
"""
"""
return
SlapOSControler
.
SlapOSControler
(
return
SlapOSControler
(
working_directory
,
working_directory
,
self
.
testnode
.
config
,
self
.
testnode
.
config
,
self
.
testnode
.
log
)
self
.
testnode
.
log
)
def
_prepareSlapOS
(
self
,
working_directory
,
slapos_instance
,
log
,
def
_prepareSlapOS
(
self
,
working_directory
,
slapos_instance
,
log
,
create_partition
=
1
,
software_path_list
=
None
,
**
kw
):
create_partition
=
1
,
software_path_list
=
None
,
**
kw
):
"""
"""
...
@@ -66,11 +59,11 @@ class UnitTestRunner():
...
@@ -66,11 +59,11 @@ class UnitTestRunner():
"""
"""
slapproxy_log
=
os
.
path
.
join
(
self
.
testnode
.
config
[
'log_directory'
],
slapproxy_log
=
os
.
path
.
join
(
self
.
testnode
.
config
[
'log_directory'
],
'slapproxy.log'
)
'slapproxy.log'
)
log
(
'Configured slapproxy log to %r'
%
slapproxy_log
)
log
(
'Configured slapproxy log to %r'
,
slapproxy_log
)
reset_software
=
slapos_instance
.
retry_software_count
>
10
reset_software
=
slapos_instance
.
retry_software_count
>
10
if
reset_software
:
if
reset_software
:
slapos_instance
.
retry_software_count
=
0
slapos_instance
.
retry_software_count
=
0
log
(
'testnode, retry_software_count : %r'
%
\
log
(
'testnode, retry_software_count : %r'
,
slapos_instance
.
retry_software_count
)
slapos_instance
.
retry_software_count
)
# XXX Create a new controler because working_directory can be
# XXX Create a new controler because working_directory can be
...
@@ -108,26 +101,22 @@ class UnitTestRunner():
...
@@ -108,26 +101,22 @@ class UnitTestRunner():
"""
"""
# report-url, report-project and suite-url are required to seleniumrunner
# report-url, report-project and suite-url are required to seleniumrunner
# instance. This is a hack which must be removed.
# instance. This is a hack which must be removed.
cluster_configuration
=
{}
config
=
self
.
testnode
.
config
config
=
self
.
testnode
.
config
cluster_configuration
[
'report-url'
]
=
config
.
get
(
"report-url"
,
""
)
return
self
.
_prepareSlapOS
(
config
[
'slapos_directory'
],
cluster_configuration
[
'report-project'
]
=
config
.
get
(
"report-project"
,
""
)
cluster_configuration
[
'suite-url'
]
=
config
.
get
(
"suite-url"
,
""
)
return
self
.
_prepareSlapOS
(
self
.
testnode
.
config
[
'slapos_directory'
],
test_node_slapos
,
self
.
testnode
.
log
,
create_partition
=
0
,
test_node_slapos
,
self
.
testnode
.
log
,
create_partition
=
0
,
software_path_list
=
self
.
testnode
.
config
.
get
(
"software_list"
),
software_path_list
=
config
.
get
(
"software_list"
),
cluster_configuration
=
cluster_configuration
cluster_configuration
=
{
)
'report-url'
:
config
.
get
(
"report-url"
,
""
),
'report-project'
:
config
.
get
(
"report-project"
,
""
),
'suite-url'
:
config
.
get
(
"suite-url"
,
""
),
})
def
prepareSlapOSForTestSuite
(
self
,
node_test_suite
):
def
prepareSlapOSForTestSuite
(
self
,
node_test_suite
):
"""
"""
Build softwares needed by testsuites
Build softwares needed by testsuites
"""
"""
log
=
self
.
testnode
.
log
if
log
is
None
:
log
=
self
.
testnode
.
log
return
self
.
_prepareSlapOS
(
node_test_suite
.
working_directory
,
return
self
.
_prepareSlapOS
(
node_test_suite
.
working_directory
,
node_test_suite
,
log
,
node_test_suite
,
self
.
testnode
.
log
,
software_path_list
=
[
node_test_suite
.
custom_profile_path
],
software_path_list
=
[
node_test_suite
.
custom_profile_path
],
cluster_configuration
=
{
'_'
:
json
.
dumps
(
node_test_suite
.
cluster_configuration
)})
cluster_configuration
=
{
'_'
:
json
.
dumps
(
node_test_suite
.
cluster_configuration
)})
...
@@ -171,8 +160,7 @@ class UnitTestRunner():
...
@@ -171,8 +160,7 @@ class UnitTestRunner():
# From this point, test runner becomes responsible for updating test
# From this point, test runner becomes responsible for updating test
# result. We only do cleanup if the test runner itself is not able
# result. We only do cleanup if the test runner itself is not able
# to run.
# to run.
SlapOSControler
.
createFolder
(
node_test_suite
.
test_suite_directory
,
createFolder
(
node_test_suite
.
test_suite_directory
,
clean
=
True
)
clean
=
True
)
self
.
testnode
.
process_manager
.
spawn
(
*
invocation_list
,
self
.
testnode
.
process_manager
.
spawn
(
*
invocation_list
,
cwd
=
node_test_suite
.
test_suite_directory
,
cwd
=
node_test_suite
.
test_suite_directory
,
log_prefix
=
'runTestSuite'
,
get_output
=
False
)
log_prefix
=
'runTestSuite'
,
get_output
=
False
)
...
...
erp5/util/testnode/Updater.py
View file @
dbc82bf0
...
@@ -28,11 +28,8 @@ import errno
...
@@ -28,11 +28,8 @@ import errno
import
os
import
os
import
re
import
re
import
shutil
import
shutil
import
subprocess
import
sys
import
sys
import
threading
from
.ProcessManager
import
SubprocessError
from
ProcessManager
import
SubprocessError
SVN_UP_REV
=
re
.
compile
(
r'^(?:At|Updated to) revision (\
d+).$
')
SVN_UP_REV
=
re
.
compile
(
r'^(?:At|Updated to) revision (\
d+).$
')
SVN_CHANGED_REV = re.compile(r'
^
Last
Changed
Rev
.
*
:
\
s
*
(
\
d
+
)
', re.MULTILINE)
SVN_CHANGED_REV = re.compile(r'
^
Last
Changed
Rev
.
*
:
\
s
*
(
\
d
+
)
', re.MULTILINE)
...
@@ -44,9 +41,8 @@ SVN_TYPE = 'svn'
...
@@ -44,9 +41,8 @@ SVN_TYPE = 'svn'
class Updater(object):
class Updater(object):
_git_cache = {}
_git_cache = {}
stdin = file(os.devnull)
def __init__(self, repository_path, log, revision=None, git_binary=
None
,
def __init__(self, repository_path, log, revision=None, git_binary=
'
git
'
,
branch=None, realtime_output=True, process_manager=None, url=None,
branch=None, realtime_output=True, process_manager=None, url=None,
working_directory=None):
working_directory=None):
self.log = log
self.log = log
...
@@ -81,7 +77,7 @@ class Updater(object):
...
@@ -81,7 +77,7 @@ class Updater(object):
def deletePycFiles(self, path):
def deletePycFiles(self, path):
"""Delete *.pyc files so that deleted/moved files can not be imported"""
"""Delete *.pyc files so that deleted/moved files can not be imported"""
for path,
dir_list
, file_list in os.walk(path):
for path,
_
, file_list in os.walk(path):
for file in file_list:
for file in file_list:
if file[-4:] in ('
.
pyc
', '
.
pyo
'):
if file[-4:] in ('
.
pyc
', '
.
pyo
'):
# allow several processes clean the same folder at the same time
# allow several processes clean the same folder at the same time
...
@@ -115,17 +111,17 @@ class Updater(object):
...
@@ -115,17 +111,17 @@ class Updater(object):
git_repository_path = os.path.join(self.getRepositoryPath(), '
.
git
')
git_repository_path = os.path.join(self.getRepositoryPath(), '
.
git
')
name = os.path.basename(os.path.normpath(self.getRepositoryPath()))
name = os.path.basename(os.path.normpath(self.getRepositoryPath()))
git_repository_link_path = os.path.join(self.getRepositoryPath(), '
%
s
.
git
' %name)
git_repository_link_path = os.path.join(self.getRepositoryPath(), '
%
s
.
git
' %name)
self.log("checking link %s -> %s.."
self.log("checking link %s -> %s.."
,
%(git_repository_link_path,git_repository_path)
)
git_repository_link_path, git_repository_path
)
if ( not os.path.lexists(git_repository_link_path) and
\
if ( not os.path.lexists(git_repository_link_path) and
\
not os.path.exists(git_repository_link_path) ):
not os.path.exists(git_repository_link_path) ):
try:
try:
os.symlink(git_repository_path, git_repository_link_path)
os.symlink(git_repository_path, git_repository_link_path)
self.log("link: %s -> %s created"
self.log("link: %s -> %s created"
,
%(git_repository_link_path,git_repository_path)
)
git_repository_link_path, git_repository_path
)
except:
except
OSError
:
self.log("Cannot create link from %s -> %s"
self.log("Cannot create link from %s -> %s"
,
%(git_repository_link_path,git_repository_path)
)
git_repository_link_path, git_repository_path
)
def _git_find_rev(self, ref):
def _git_find_rev(self, ref):
try:
try:
...
@@ -152,7 +148,7 @@ class Updater(object):
...
@@ -152,7 +148,7 @@ class Updater(object):
raise NotImplementedError
raise NotImplementedError
def deleteRepository(self):
def deleteRepository(self):
self.log("Wrong repository or wrong url, deleting repos %s"
%
\
self.log("Wrong repository or wrong url, deleting repos %s"
,
self.repository_path)
self.repository_path)
shutil.rmtree(self.repository_path)
shutil.rmtree(self.repository_path)
...
@@ -165,7 +161,7 @@ class Updater(object):
...
@@ -165,7 +161,7 @@ class Updater(object):
remote_url = self._git("config", "--get", "remote.origin.url")
remote_url = self._git("config", "--get", "remote.origin.url")
if remote_url == self.url:
if remote_url == self.url:
correct_url = True
correct_url = True
except
(SubprocessError,) as e
:
except
SubprocessError
:
self.log("SubprocessError", exc_info=sys.exc_info())
self.log("SubprocessError", exc_info=sys.exc_info())
if not(correct_url):
if not(correct_url):
self.deleteRepository()
self.deleteRepository()
...
...
erp5/util/testnode/Utils.py
View file @
dbc82bf0
import
sys
import
os
import
json
import
shutil
import
shutil
import
string
from
random
import
choice
def
createFolder
(
folder
,
clean
=
False
):
if
os
.
path
.
exists
(
folder
):
if
not
clean
:
return
shutil
.
rmtree
(
folder
)
os
.
mkdir
(
folder
)
def
deunicodeData
(
data
):
def
deunicodeData
(
data
):
if
isinstance
(
data
,
list
):
if
isinstance
(
data
,
list
):
new_data
=
[]
return
map
(
deunicodeData
,
data
)
for
sub_data
in
data
:
if
isinstance
(
data
,
unicode
):
new_data
.
append
(
deunicodeData
(
sub_data
))
return
data
.
encode
(
'utf8'
)
elif
isinstance
(
data
,
unicode
):
if
isinstance
(
data
,
dict
):
new_data
=
data
.
encode
(
'utf8'
)
return
{
deunicodeData
(
key
):
deunicodeData
(
value
)
elif
isinstance
(
data
,
dict
):
for
key
,
value
in
data
.
iteritems
()}
new_data
=
{}
return
data
for
key
,
value
in
data
.
iteritems
():
key
=
deunicodeData
(
key
)
value
=
deunicodeData
(
value
)
new_data
[
key
]
=
value
else
:
new_data
=
data
return
new_data
def
dealShebang
(
run_test_suite_path
):
line
=
open
(
run_test_suite_path
,
'r'
).
readline
()
invocation_list
=
[]
if
line
[:
2
]
==
'#!'
:
invocation_list
=
line
[
2
:].
split
()
return
invocation_list
erp5/util/testnode/testnode.py
View file @
dbc82bf0
...
@@ -26,30 +26,24 @@
...
@@ -26,30 +26,24 @@
##############################################################################
##############################################################################
import
os
import
os
import
sys
import
sys
import
time
import
json
import
json
import
time
import
time
import
shutil
import
shutil
import
logging
import
logging
import
Utils
from
slapos.slap.slap
import
ConnectionError
from
slapos.slap.slap
import
ConnectionError
import
traceback
from
.ProcessManager
import
SubprocessError
,
ProcessManager
,
CancellationError
from
ProcessManager
import
SubprocessError
,
ProcessManager
,
CancellationError
from
subprocess
import
CalledProcessError
from
subprocess
import
CalledProcessError
from
Updater
import
Updater
from
.Updater
import
Updater
from
NodeTestSuite
import
NodeTestSuite
,
SlapOSInstance
from
.NodeTestSuite
import
NodeTestSuite
,
SlapOSInstance
from
ScalabilityTestRunner
import
ScalabilityTestRunner
from
.ScalabilityTestRunner
import
ScalabilityTestRunner
from
UnitTestRunner
import
UnitTestRunner
from
.UnitTestRunner
import
UnitTestRunner
from
erp5.util
import
taskdistribution
from
.Utils
import
deunicodeData
from
..
import
taskdistribution
DEFAULT_SLEEP_TIMEOUT
=
120
# time in seconds to sleep
MAX_LOG_TIME
=
15
# time in days we should keep logs that we can see through
MAX_LOG_TIME
=
15
# time in days we should keep logs that we can see through
# httd
# httd
MAX_TEMP_TIME
=
0.01
# time in days we should keep temp files
MAX_TEMP_TIME
=
0.01
# time in days we should keep temp files
supervisord_pid_file
=
None
PROFILE_PATH_KEY
=
'profile_path'
PROFILE_PATH_KEY
=
'profile_path'
...
@@ -76,7 +70,6 @@ class TestNode(object):
...
@@ -76,7 +70,6 @@ class TestNode(object):
def
checkOldTestSuite
(
self
,
test_suite_data
):
def
checkOldTestSuite
(
self
,
test_suite_data
):
config
=
self
.
config
installed_reference_set
=
set
(
os
.
listdir
(
self
.
working_directory
))
installed_reference_set
=
set
(
os
.
listdir
(
self
.
working_directory
))
wished_reference_set
=
set
([
x
[
'test_suite_reference'
]
for
x
in
test_suite_data
])
wished_reference_set
=
set
([
x
[
'test_suite_reference'
]
for
x
in
test_suite_data
])
to_remove_reference_set
=
installed_reference_set
.
difference
(
to_remove_reference_set
=
installed_reference_set
.
difference
(
...
@@ -84,11 +77,11 @@ class TestNode(object):
...
@@ -84,11 +77,11 @@ class TestNode(object):
for
y
in
to_remove_reference_set
:
for
y
in
to_remove_reference_set
:
fpath
=
os
.
path
.
join
(
self
.
working_directory
,
y
)
fpath
=
os
.
path
.
join
(
self
.
working_directory
,
y
)
self
.
delNodeTestSuite
(
y
)
self
.
delNodeTestSuite
(
y
)
self
.
log
(
"testnode.checkOldTestSuite, DELETING : %r"
%
(
fpath
,)
)
self
.
log
(
"testnode.checkOldTestSuite, DELETING : %r"
,
fpath
)
if
os
.
path
.
isdir
(
fpath
):
if
os
.
path
.
isdir
(
fpath
):
shutil
.
rmtree
(
fpath
)
shutil
.
rmtree
(
fpath
)
else
:
else
:
os
.
remove
(
fpath
)
os
.
remove
(
fpath
)
def
getNodeTestSuite
(
self
,
reference
):
def
getNodeTestSuite
(
self
,
reference
):
node_test_suite
=
self
.
node_test_suite_dict
.
get
(
reference
)
node_test_suite
=
self
.
node_test_suite_dict
.
get
(
reference
)
...
@@ -103,27 +96,22 @@ class TestNode(object):
...
@@ -103,27 +96,22 @@ class TestNode(object):
return
node_test_suite
return
node_test_suite
def
delNodeTestSuite
(
self
,
reference
):
def
delNodeTestSuite
(
self
,
reference
):
if
self
.
node_test_suite_dict
.
has_key
(
reference
):
self
.
node_test_suite_dict
.
pop
(
reference
,
None
)
self
.
node_test_suite_dict
.
pop
(
reference
)
def
constructProfile
(
self
,
node_test_suite
,
test_type
,
use_relative_path
=
False
):
def
constructProfile
(
self
,
node_test_suite
,
test_type
,
use_relative_path
=
False
):
config
=
self
.
config
profile_content
=
''
assert
len
(
node_test_suite
.
vcs_repository_list
),
"we must have at least one repository"
assert
len
(
node_test_suite
.
vcs_repository_list
),
"we must have at least one repository"
profile_path_count
=
0
software_config_path
=
None
profile_content_list
=
[]
profile_content_list
=
[]
revision_dict
=
dict
(
node_test_suite
.
revision_list
)
revision_dict
=
dict
(
node_test_suite
.
revision_list
)
for
vcs_repository
in
node_test_suite
.
vcs_repository_list
:
for
vcs_repository
in
node_test_suite
.
vcs_repository_list
:
url
=
vcs_repository
[
'url'
]
buildout_section_id
=
vcs_repository
.
get
(
'buildout_section_id'
)
buildout_section_id
=
vcs_repository
.
get
(
'buildout_section_id'
,
None
)
repository_path
=
vcs_repository
[
'repository_path'
]
repository_path
=
vcs_repository
[
'repository_path'
]
try
:
try
:
profile_path
=
vcs_repository
[
PROFILE_PATH_KEY
]
profile_path
=
vcs_repository
[
PROFILE_PATH_KEY
]
except
KeyError
:
except
KeyError
:
pass
pass
else
:
else
:
profile_path_count
+=
1
if
software_config_path
is
not
None
:
if
profile_path_count
>
1
:
raise
ValueError
(
PROFILE_PATH_KEY
+
' defined more than once'
)
raise
ValueError
(
PROFILE_PATH_KEY
+
' defined more than once'
)
# Absolute path to relative path
# Absolute path to relative path
...
@@ -133,11 +121,6 @@ class TestNode(object):
...
@@ -133,11 +121,6 @@ class TestNode(object):
node_test_suite
.
reference
)
node_test_suite
.
reference
)
software_config_path
=
os
.
path
.
relpath
(
software_config_path
,
from_path
)
software_config_path
=
os
.
path
.
relpath
(
software_config_path
,
from_path
)
profile_content_list
.
append
(
"""
[buildout]
extends = %(software_config_path)s
"""
%
{
'software_config_path'
:
software_config_path
})
# Construct sections
# Construct sections
if
not
(
buildout_section_id
is
None
):
if
not
(
buildout_section_id
is
None
):
# Absolute path to relative
# Absolute path to relative
...
@@ -168,14 +151,14 @@ shared = true
...
@@ -168,14 +151,14 @@ shared = true
"""
%
{
'buildout_section_id'
:
buildout_section_id
,
"""
%
{
'buildout_section_id'
:
buildout_section_id
,
'repository_path'
:
repository_path
,
'repository_path'
:
repository_path
,
'branch'
:
vcs_repository
.
get
(
'branch'
,
'master'
)})
'branch'
:
vcs_repository
.
get
(
'branch'
,
'master'
)})
if
not
profile_path_count
:
if
software_config_path
is
None
:
raise
ValueError
(
PROFILE_PATH_KEY
+
' not defined'
)
raise
ValueError
(
PROFILE_PATH_KEY
+
' not defined'
)
profile_content_list
.
sort
()
# Write file
# Write file
custom_profile
=
open
(
node_test_suite
.
custom_profile_path
,
'w'
)
with
open
(
node_test_suite
.
custom_profile_path
,
'w'
)
as
f
:
# sort to have buildout section first
f
.
write
(
"[buildout]
\
n
extends = %s
\
n
%s"
%
(
profile_content_list
.
sort
(
key
=
lambda
x
:
[
x
,
''
][
x
.
startswith
(
'
\
n
[buildout]'
)])
software_config_path
,
custom_profile
.
write
(
''
.
join
(
profile_content_list
))
''
.
join
(
profile_content_list
)))
custom_profile
.
close
()
sys
.
path
.
append
(
repository_path
)
sys
.
path
.
append
(
repository_path
)
def
updateRevisionList
(
self
,
node_test_suite
):
def
updateRevisionList
(
self
,
node_test_suite
):
...
@@ -194,7 +177,7 @@ shared = true
...
@@ -194,7 +177,7 @@ shared = true
url
=
vcs_repository
[
"url"
])
url
=
vcs_repository
[
"url"
])
updater
.
checkout
()
updater
.
checkout
()
revision_list
.
append
((
repository_id
,
updater
.
getRevision
()))
revision_list
.
append
((
repository_id
,
updater
.
getRevision
()))
except
SubprocessError
,
e
:
except
SubprocessError
:
log
(
"Error while getting repository, ignoring this test suite"
,
log
(
"Error while getting repository, ignoring this test suite"
,
exc_info
=
1
)
exc_info
=
1
)
return
False
return
False
...
@@ -211,7 +194,7 @@ shared = true
...
@@ -211,7 +194,7 @@ shared = true
# TODO make the path into url
# TODO make the path into url
test_result
.
reportStatus
(
'LOG url'
,
"%s/%s"
%
(
self
.
config
.
get
(
'httpd_url'
),
test_result
.
reportStatus
(
'LOG url'
,
"%s/%s"
%
(
self
.
config
.
get
(
'httpd_url'
),
folder_id
),
''
)
folder_id
),
''
)
self
.
log
(
"going to switch to log %r"
%
suite_log_path
)
self
.
log
(
"going to switch to log %r"
,
suite_log_path
)
self
.
process_manager
.
log
=
self
.
log
=
self
.
getSuiteLog
()
self
.
process_manager
.
log
=
self
.
log
=
self
.
getSuiteLog
()
return
suite_log_path
return
suite_log_path
...
@@ -230,7 +213,7 @@ shared = true
...
@@ -230,7 +213,7 @@ shared = true
self
.
file_handler
=
logging
.
FileHandler
(
filename
=
suite_log_path
)
self
.
file_handler
=
logging
.
FileHandler
(
filename
=
suite_log_path
)
self
.
file_handler
.
setFormatter
(
formatter
)
self
.
file_handler
.
setFormatter
(
formatter
)
logger
.
addHandler
(
self
.
file_handler
)
logger
.
addHandler
(
self
.
file_handler
)
logger
.
info
(
'Activated logfile %r output'
%
suite_log_path
)
logger
.
info
(
'Activated logfile %r output'
,
suite_log_path
)
self
.
suite_log
=
logger
.
info
self
.
suite_log
=
logger
.
info
def
checkRevision
(
self
,
test_result
,
node_test_suite
):
def
checkRevision
(
self
,
test_result
,
node_test_suite
):
...
@@ -257,14 +240,13 @@ shared = true
...
@@ -257,14 +240,13 @@ shared = true
node_test_suite
.
revision_list
=
revision_list
node_test_suite
.
revision_list
=
revision_list
def
_cleanupLog
(
self
):
def
_cleanupLog
(
self
):
config
=
self
.
config
log_directory
=
self
.
config
[
'log_directory'
]
log_directory
=
self
.
config
[
'log_directory'
]
now
=
time
.
time
()
prune_time
=
time
.
time
()
-
86400
*
self
.
max_log_time
for
log_folder
in
os
.
listdir
(
log_directory
):
for
log_folder
in
os
.
listdir
(
log_directory
):
folder_path
=
os
.
path
.
join
(
log_directory
,
log_folder
)
folder_path
=
os
.
path
.
join
(
log_directory
,
log_folder
)
if
os
.
path
.
isdir
(
folder_path
):
if
os
.
path
.
isdir
(
folder_path
):
if
(
now
-
os
.
stat
(
folder_path
).
st_mtime
)
/
86400
>
self
.
max_log
_time
:
if
os
.
stat
(
folder_path
).
st_mtime
<
prune
_time
:
self
.
log
(
"deleting log directory %r"
%
(
folder_path
,)
)
self
.
log
(
"deleting log directory %r"
,
folder_path
)
shutil
.
rmtree
(
folder_path
)
shutil
.
rmtree
(
folder_path
)
def
_cleanupTemporaryFiles
(
self
):
def
_cleanupTemporaryFiles
(
self
):
...
@@ -273,17 +255,16 @@ shared = true
...
@@ -273,17 +255,16 @@ shared = true
missing disk space, remove old logs
missing disk space, remove old logs
"""
"""
temp_directory
=
self
.
config
[
"system_temp_folder"
]
temp_directory
=
self
.
config
[
"system_temp_folder"
]
now
=
time
.
time
()
user_id
=
os
.
geteuid
()
user_id
=
os
.
geteuid
()
prune_time
=
time
.
time
()
-
86400
*
self
.
max_temp_time
for
temp_folder
in
os
.
listdir
(
temp_directory
):
for
temp_folder
in
os
.
listdir
(
temp_directory
):
folder_path
=
os
.
path
.
join
(
temp_directory
,
temp_folder
)
folder_path
=
os
.
path
.
join
(
temp_directory
,
temp_folder
)
if
(
temp_folder
.
startswith
(
"tmp"
)
or
if
(
temp_folder
.
startswith
(
"tmp"
)
or
temp_folder
.
startswith
(
"buildout"
)):
temp_folder
.
startswith
(
"buildout"
)):
try
:
try
:
stat
=
os
.
stat
(
folder_path
)
stat
=
os
.
stat
(
folder_path
)
if
stat
.
st_uid
==
user_id
and
\
if
stat
.
st_uid
==
user_id
and
stat
.
st_mtime
<
prune_time
:
(
now
-
stat
.
st_mtime
)
/
86400
>
self
.
max_temp_time
:
self
.
log
(
"deleting temp directory %r"
,
folder_path
)
self
.
log
(
"deleting temp directory %r"
%
(
folder_path
,))
if
os
.
path
.
isdir
(
folder_path
):
if
os
.
path
.
isdir
(
folder_path
):
shutil
.
rmtree
(
folder_path
)
shutil
.
rmtree
(
folder_path
)
else
:
else
:
...
@@ -291,9 +272,8 @@ shared = true
...
@@ -291,9 +272,8 @@ shared = true
except
OSError
:
except
OSError
:
self
.
log
(
"_cleanupTemporaryFiles exception"
,
exc_info
=
1
)
self
.
log
(
"_cleanupTemporaryFiles exception"
,
exc_info
=
1
)
def
cleanUp
(
self
,
test_result
):
def
cleanUp
(
self
):
log
=
self
.
log
self
.
log
(
'Testnode.cleanUp'
)
log
(
'Testnode.cleanUp'
)
self
.
process_manager
.
killPreviousRun
()
self
.
process_manager
.
killPreviousRun
()
self
.
_cleanupLog
()
self
.
_cleanupLog
()
self
.
_cleanupTemporaryFiles
()
self
.
_cleanupTemporaryFiles
()
...
@@ -301,19 +281,15 @@ shared = true
...
@@ -301,19 +281,15 @@ shared = true
def
run
(
self
):
def
run
(
self
):
log
=
self
.
log
log
=
self
.
log
config
=
self
.
config
config
=
self
.
config
slapgrid
=
None
previous_revision_dict
=
{}
revision_dict
=
{}
test_result
=
None
test_node_slapos
=
SlapOSInstance
()
test_node_slapos
=
SlapOSInstance
()
test_node_slapos
.
edit
(
working_directory
=
self
.
config
[
'slapos_directory'
])
test_node_slapos
.
edit
(
working_directory
=
config
[
'slapos_directory'
])
try
:
try
:
while
True
:
while
True
:
test_result
=
None
try
:
try
:
node_test_suite
=
None
node_test_suite
=
None
self
.
log
=
self
.
process_manager
.
log
=
self
.
testnode_log
self
.
log
=
self
.
process_manager
.
log
=
self
.
testnode_log
self
.
cleanUp
(
None
)
self
.
cleanUp
()
remote_test_result_needs_cleanup
=
False
begin
=
time
.
time
()
begin
=
time
.
time
()
portal_url
=
config
[
'test_suite_master_url'
]
portal_url
=
config
[
'test_suite_master_url'
]
self
.
taskdistribution
=
taskdistribution
.
TaskDistributor
(
self
.
taskdistribution
=
taskdistribution
.
TaskDistributor
(
...
@@ -321,23 +297,23 @@ shared = true
...
@@ -321,23 +297,23 @@ shared = true
logger
=
DummyLogger
(
log
))
logger
=
DummyLogger
(
log
))
node_configuration
=
self
.
taskdistribution
.
subscribeNode
(
node_title
=
config
[
'test_node_title'
],
node_configuration
=
self
.
taskdistribution
.
subscribeNode
(
node_title
=
config
[
'test_node_title'
],
computer_guid
=
config
[
'computer_id'
])
computer_guid
=
config
[
'computer_id'
])
if
type
(
node_configuration
)
==
str
:
if
type
(
node_configuration
)
is
str
:
# Backward compatiblity
# Backward compatiblity
node_configuration
=
json
.
loads
(
node_configuration
)
node_configuration
=
json
.
loads
(
node_configuration
)
if
node_configuration
is
not
None
and
\
if
node_configuration
is
not
None
and
\
'process_timeout'
in
node_configuration
\
'process_timeout'
in
node_configuration
\
and
node_configuration
[
'process_timeout'
]
is
not
None
:
and
node_configuration
[
'process_timeout'
]
is
not
None
:
process_timeout
=
node_configuration
[
'process_timeout'
]
process_timeout
=
node_configuration
[
'process_timeout'
]
log
(
'Received and using process timeout from master: %i'
%
(
log
(
'Received and using process timeout from master: %i'
,
process_timeout
)
)
process_timeout
)
self
.
process_manager
.
max_timeout
=
process_timeout
self
.
process_manager
.
max_timeout
=
process_timeout
test_suite_data
=
self
.
taskdistribution
.
startTestSuite
(
test_suite_data
=
self
.
taskdistribution
.
startTestSuite
(
node_title
=
config
[
'test_node_title'
],
node_title
=
config
[
'test_node_title'
],
computer_guid
=
config
[
'computer_id'
])
computer_guid
=
config
[
'computer_id'
])
if
type
(
test_suite_data
)
==
str
:
if
type
(
test_suite_data
)
is
str
:
# Backward compatiblity
# Backward compatiblity
test_suite_data
=
json
.
loads
(
test_suite_data
)
test_suite_data
=
json
.
loads
(
test_suite_data
)
test_suite_data
=
Utils
.
deunicodeData
(
test_suite_data
)
test_suite_data
=
deunicodeData
(
test_suite_data
)
log
(
"Got following test suite data from master : %r"
,
log
(
"Got following test suite data from master : %r"
,
test_suite_data
)
test_suite_data
)
try
:
try
:
...
@@ -360,19 +336,18 @@ shared = true
...
@@ -360,19 +336,18 @@ shared = true
# Clean-up test suites
# Clean-up test suites
self
.
checkOldTestSuite
(
test_suite_data
)
self
.
checkOldTestSuite
(
test_suite_data
)
for
test_suite
in
test_suite_data
:
for
test_suite
in
test_suite_data
:
remote_test_result_needs_cleanup
=
False
node_test_suite
=
self
.
getNodeTestSuite
(
node_test_suite
=
self
.
getNodeTestSuite
(
test_suite
[
"test_suite_reference"
])
test_suite
[
"test_suite_reference"
])
node_test_suite
.
edit
(
node_test_suite
.
edit
(
working_directory
=
self
.
config
[
'working_directory'
],
working_directory
=
config
[
'working_directory'
],
log_directory
=
self
.
config
[
'log_directory'
])
log_directory
=
config
[
'log_directory'
])
node_test_suite
.
edit
(
**
test_suite
)
node_test_suite
.
edit
(
**
test_suite
)
if
my_test_type
==
'UnitTest'
:
if
my_test_type
==
'UnitTest'
:
runner
=
UnitTestRunner
(
node_test_suite
)
runner
=
UnitTestRunner
(
node_test_suite
)
elif
my_test_type
==
'ScalabilityTest'
:
elif
my_test_type
==
'ScalabilityTest'
:
runner
=
ScalabilityTestRunner
(
self
)
runner
=
ScalabilityTestRunner
(
self
)
else
:
else
:
log
(
"testnode, Runner type %s not implemented."
,
my_test_type
)
log
(
"testnode, Runner type %s not implemented."
,
my_test_type
)
raise
NotImplementedError
raise
NotImplementedError
...
@@ -380,7 +355,6 @@ shared = true
...
@@ -380,7 +355,6 @@ shared = true
# XXX: temporary hack to prevent empty test_suite
# XXX: temporary hack to prevent empty test_suite
if
not
hasattr
(
node_test_suite
,
'test_suite'
):
if
not
hasattr
(
node_test_suite
,
'test_suite'
):
node_test_suite
.
edit
(
test_suite
=
''
)
node_test_suite
.
edit
(
test_suite
=
''
)
run_software
=
True
# kill processes from previous loop if any
# kill processes from previous loop if any
self
.
process_manager
.
killPreviousRun
()
self
.
process_manager
.
killPreviousRun
()
if
not
self
.
updateRevisionList
(
node_test_suite
):
if
not
self
.
updateRevisionList
(
node_test_suite
):
...
@@ -394,7 +368,6 @@ shared = true
...
@@ -394,7 +368,6 @@ shared = true
config
[
'test_node_title'
],
False
,
config
[
'test_node_title'
],
False
,
node_test_suite
.
test_suite_title
,
node_test_suite
.
test_suite_title
,
node_test_suite
.
project_title
)
node_test_suite
.
project_title
)
remote_test_result_needs_cleanup
=
True
log
(
"testnode, test_result : %r"
,
test_result
)
log
(
"testnode, test_result : %r"
,
test_result
)
if
test_result
is
not
None
:
if
test_result
is
not
None
:
self
.
registerSuiteLog
(
test_result
,
node_test_suite
)
self
.
registerSuiteLog
(
test_result
,
node_test_suite
)
...
@@ -402,10 +375,10 @@ shared = true
...
@@ -402,10 +375,10 @@ shared = true
node_test_suite
.
edit
(
test_result
=
test_result
)
node_test_suite
.
edit
(
test_result
=
test_result
)
# get cluster configuration for this test suite, this is needed to
# get cluster configuration for this test suite, this is needed to
# know slapos parameters to user for creating instances
# know slapos parameters to user for creating instances
log
(
"Getting configuration from test suite "
+
str
(
node_test_suite
.
test_suite_title
))
log
(
"Getting configuration from test suite %s"
,
node_test_suite
.
test_suite_title
)
generated_config
=
self
.
taskdistribution
.
generateConfiguration
(
node_test_suite
.
test_suite_title
)
generated_config
=
self
.
taskdistribution
.
generateConfiguration
(
node_test_suite
.
test_suite_title
)
json_data
=
json
.
loads
(
generated_config
)
json_data
=
json
.
loads
(
generated_config
)
cluster_configuration
=
Utils
.
deunicodeData
(
json_data
[
'configuration_list'
][
0
])
cluster_configuration
=
deunicodeData
(
json_data
[
'configuration_list'
][
0
])
node_test_suite
.
edit
(
cluster_configuration
=
cluster_configuration
)
node_test_suite
.
edit
(
cluster_configuration
=
cluster_configuration
)
# Now prepare the installation of SlapOS and create instance
# Now prepare the installation of SlapOS and create instance
status_dict
=
runner
.
prepareSlapOSForTestSuite
(
node_test_suite
)
status_dict
=
runner
.
prepareSlapOSForTestSuite
(
node_test_suite
)
...
@@ -437,10 +410,10 @@ shared = true
...
@@ -437,10 +410,10 @@ shared = true
raise
NotImplementedError
raise
NotImplementedError
# break the loop to get latest priorities from master
# break the loop to get latest priorities from master
break
break
self
.
cleanUp
(
test_result
)
self
.
cleanUp
()
except
(
SubprocessError
,
CalledProcessError
,
ConnectionError
)
as
e
:
except
(
SubprocessError
,
CalledProcessError
,
ConnectionError
)
as
e
:
log
(
""
,
exc_info
=
1
)
log
(
""
,
exc_info
=
1
)
if
remote_test_result_needs_cleanup
:
if
test_result
is
not
None
:
status_dict
=
getattr
(
e
,
"status_dict"
,
None
)
or
{
status_dict
=
getattr
(
e
,
"status_dict"
,
None
)
or
{
'stderr'
:
"%s: %s"
%
(
e
.
__class__
.
__name__
,
e
)}
'stderr'
:
"%s: %s"
%
(
e
.
__class__
.
__name__
,
e
)}
test_result
.
reportFailure
(
test_result
.
reportFailure
(
...
@@ -454,7 +427,7 @@ shared = true
...
@@ -454,7 +427,7 @@ shared = true
log
(
""
,
exc_info
=
1
)
log
(
""
,
exc_info
=
1
)
if
node_test_suite
is
not
None
:
if
node_test_suite
is
not
None
:
node_test_suite
.
retry_software_count
+=
1
node_test_suite
.
retry_software_count
+=
1
if
remote_test_result_needs_cleanup
:
if
test_result
is
not
None
:
test_result
.
reportFailure
(
test_result
.
reportFailure
(
command
=
''
,
stdout
=
''
,
command
=
''
,
stdout
=
''
,
stderr
=
"ValueError was raised : %s"
%
(
e
,),
stderr
=
"ValueError was raised : %s"
%
(
e
,),
...
@@ -464,10 +437,9 @@ shared = true
...
@@ -464,10 +437,9 @@ shared = true
self
.
process_manager
.
under_cancellation
=
False
self
.
process_manager
.
under_cancellation
=
False
node_test_suite
.
retry
=
True
node_test_suite
.
retry
=
True
continue
continue
now
=
time
.
time
()
self
.
cleanUp
()
self
.
cleanUp
(
test_result
)
sleep_time
=
120
-
(
time
.
time
()
-
begin
)
if
(
now
-
begin
)
<
120
:
if
sleep_time
>
0
:
sleep_time
=
120
-
(
now
-
begin
)
log
(
"End of processing, going to sleep %s"
,
sleep_time
)
log
(
"End of processing, going to sleep %s"
,
sleep_time
)
time
.
sleep
(
sleep_time
)
time
.
sleep
(
sleep_time
)
except
Exception
:
except
Exception
:
...
@@ -480,5 +452,5 @@ shared = true
...
@@ -480,5 +452,5 @@ shared = true
# groups working only in POSIX compilant systems
# groups working only in POSIX compilant systems
# Exceptions are swallowed during cleanup phase
# Exceptions are swallowed during cleanup phase
log
(
"GENERAL EXCEPTION, QUITING"
)
log
(
"GENERAL EXCEPTION, QUITING"
)
self
.
cleanUp
(
test_result
)
self
.
cleanUp
()
log
(
"GENERAL EXCEPTION, QUITING, cleanup finished"
)
log
(
"GENERAL EXCEPTION, QUITING, cleanup finished"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment