Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
W
wendelin.core
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
Kirill Smelkov
wendelin.core
Commits
ef6d0669
Commit
ef6d0669
authored
Apr 30, 2019
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
e76f9f9a
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
25 additions
and
21 deletions
+25
-21
wcfs/wcfs_test.py
wcfs/wcfs_test.py
+25
-21
No files found.
wcfs/wcfs_test.py
View file @
ef6d0669
...
...
@@ -17,7 +17,7 @@
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
"""
test
wcfs filesystem from outside as python client process"""
"""
wcfs_test tests
wcfs filesystem from outside as python client process"""
from
__future__
import
print_function
...
...
@@ -116,7 +116,11 @@ def test_join_autostart():
assert
os
.
path
.
isdir
(
wc
.
mountpoint
+
"/head/bigfile"
)
# tDB is database/wcfs testing environment.
# tDB provides database/wcfs testing environment.
#
# XXX link -> tFile + tWatch.
#
# XXX print -> t.trace/debug() + t.verbose depending on py.test -v -v ?
class
tDB
:
def
__init__
(
t
):
t
.
root
=
testdb
.
dbopen
()
...
...
@@ -126,6 +130,7 @@ class tDB:
t
.
_changed
=
{}
# ZBigFile -> {} blk -> data
# committed: head + head history
# XXX -> vδF (committed changes to files)
t
.
head
=
None
t
.
_headv
=
[]
...
...
@@ -157,7 +162,9 @@ class tDB:
assert
len
(
data
)
<=
zf
.
blksize
zfDelta
[
blk
]
=
data
# commit commits transaction and remembers/returns committed transaction ID.
# commit commits transaction and makes sure wcfs is synchronized to it.
#
# It remembers/returns committed transaction ID.
def
commit
(
t
):
# perform modifications scheduled by change.
# use !wcfs mode so that we prepare data independently of wcfs code paths.
...
...
@@ -171,7 +178,7 @@ class tDB:
t
.
_changed
=
{}
# NOTE there is no clean way to retrieve tid of just committed transaction
# we
are using
last._p_serial as workaround.
# we
use
last._p_serial as workaround.
t
.
root
[
'_last'
]
=
last
=
Persistent
()
last
.
_p_changed
=
1
...
...
@@ -181,10 +188,14 @@ class tDB:
t
.
ncommit
+=
1
t
.
head
=
head
t
.
_headv
.
append
(
head
)
# sync wcfs
t
.
_wcsync
()
return
head
#
wcsync makes sure wcf
s synchronized to latest committed transaction.
def
wcsync
(
t
):
#
_wcsync makes sure wcfs i
s synchronized to latest committed transaction.
def
_
wcsync
(
t
):
while
len
(
t
.
_wc_zheadv
)
<
len
(
t
.
_headv
):
l
=
t
.
_wc_zheadfh
.
readline
()
#print('> zhead read: %r' % l)
...
...
@@ -239,7 +250,7 @@ class tDB:
return
tWatch
(
t
)
# tFile
i
s testing environment for one bigfile on wcfs.
# tFile
provide
s testing environment for one bigfile on wcfs.
class
tFile
:
# maximum number of pages we mmap for 1 file.
# this should be not big not to exceed mlock limit.
...
...
@@ -251,10 +262,10 @@ class tFile:
t
.
f
=
tdb
.
_open
(
zf
,
at
=
at
)
t
.
blksize
=
zf
.
blksize
# mmap the file past the end up to
XXX pages and lock the pages with
#
MLOCK_ONFAULT. This way when a page is read by mmap access we have
#
the guarantee from kernel that the page will stay in pagecache. We
# rely on this to verify OS cache state.
# mmap the file past the end up to
_max_tracked pages and lock the
#
pages with MLOCK_ONFAULT. This way when a page is read by mmap access
#
we have the guarantee from kernel that the page will stay in
#
pagecache. We
rely on this to verify OS cache state.
assert
t
.
blksize
%
mm
.
PAGE_SIZE
==
0
t
.
fmmap
=
mm
.
map_ro
(
t
.
f
.
fileno
(),
0
,
t
.
_max_tracked
*
t
.
blksize
)
mm
.
lock
(
t
.
fmmap
,
mm
.
MLOCK_ONFAULT
)
...
...
@@ -302,7 +313,7 @@ class tFile:
assert
t
.
cached
()
==
incorev
# blk returns bytearray
connected to
view of file[blk].
# blk returns bytearray view of file[blk].
def
blk
(
t
,
blk
):
assert
blk
<=
t
.
_max_tracked
return
bytearray
(
t
.
fmmap
[
blk
*
t
.
blksize
:(
blk
+
1
)
*
t
.
blksize
])
...
...
@@ -348,9 +359,7 @@ class tFile:
# Expected blocks may be given with size < zf.blksize. In such case they
# are implicitly appended with trailing zeros.
#
# It also check file size and optionally mtime.
#
# XXX also check pagecache state?
# It also checks file size and optionally mtime.
def
assertData
(
t
,
datav
,
mtime
=
None
):
st
=
os
.
fstat
(
t
.
f
.
fileno
())
assert
st
.
st_size
==
len
(
datav
)
*
t
.
blksize
...
...
@@ -364,7 +373,7 @@ class tFile:
t
.
assertCache
([
1
]
*
len
(
datav
))
# tWatch
i
s testing environment for /head/watch opened on wcfs.
# tWatch
provide
s testing environment for /head/watch opened on wcfs.
class
tWatch
:
def
__init__
(
t
,
tdb
):
...
...
@@ -540,7 +549,6 @@ def test_wcfs():
t
.
ncommit
=
0
# so that atX in the code correspond with debug output
at0_
=
t
.
commit
()
assert
tidtime
(
at0_
)
>
tidtime
(
at0
)
t
.
wcsync
()
# >>> lookup non-BigFile -> must be rejected
with
raises
(
OSError
)
as
exc
:
...
...
@@ -556,7 +564,6 @@ def test_wcfs():
t
.
change
(
zf
,
{
2
:
b'alpha'
})
at1
=
t
.
commit
()
t
.
wcsync
()
f
.
assertCache
([
0
,
0
,
0
])
# initially not cached
f
.
assertData
([
b''
,
b''
,
b'alpha'
],
mtime
=
t
.
head
)
...
...
@@ -564,8 +571,6 @@ def test_wcfs():
t
.
change
(
zf
,
{
2
:
b'beta'
,
3
:
b'gamma'
})
at2
=
t
.
commit
()
t
.
wcsync
()
# f @head
f
.
assertCache
([
1
,
1
,
0
,
0
])
f
.
assertData
([
b''
,
b''
,
b'beta'
,
b'gamma'
],
mtime
=
t
.
head
)
...
...
@@ -581,7 +586,6 @@ def test_wcfs():
t
.
change
(
zf
,
{
2
:
b'kitty'
})
at3
=
t
.
commit
()
t
.
wcsync
()
f
.
assertCache
([
1
,
1
,
0
,
1
])
# f @head is opened again -> cache must not be lost
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment