Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Carlos Ramos Carreño
neoppod
Commits
4d571267
Commit
4d571267
authored
Dec 26, 2019
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Plain Diff
client: merge load optimizations
parents
d4603189
8ba42463
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
156 additions
and
102 deletions
+156
-102
neo/client/app.py
neo/client/app.py
+56
-66
neo/client/handlers/master.py
neo/client/handlers/master.py
+14
-17
neo/debug.py
neo/debug.py
+1
-4
neo/tests/threaded/__init__.py
neo/tests/threaded/__init__.py
+8
-6
neo/tests/threaded/test.py
neo/tests/threaded/test.py
+77
-9
No files found.
neo/client/app.py
View file @
4d571267
...
@@ -80,7 +80,7 @@ class Application(ThreadedApplication):
...
@@ -80,7 +80,7 @@ class Application(ThreadedApplication):
# no self-assigned NID, primary master will supply us one
# no self-assigned NID, primary master will supply us one
self
.
_cache
=
ClientCache
()
if
cache_size
is
None
else
\
self
.
_cache
=
ClientCache
()
if
cache_size
is
None
else
\
ClientCache
(
max_size
=
cache_size
)
ClientCache
(
max_size
=
cache_size
)
self
.
_loading
_oid
=
None
self
.
_loading
=
defaultdict
(
lambda
:
(
Lock
(),
[]))
self
.
new_oids
=
()
self
.
new_oids
=
()
self
.
last_oid
=
'
\
0
'
*
8
self
.
last_oid
=
'
\
0
'
*
8
self
.
storage_event_handler
=
storage
.
StorageEventHandler
(
self
)
self
.
storage_event_handler
=
storage
.
StorageEventHandler
(
self
)
...
@@ -91,19 +91,13 @@ class Application(ThreadedApplication):
...
@@ -91,19 +91,13 @@ class Application(ThreadedApplication):
self
.
notifications_handler
=
master
.
PrimaryNotificationsHandler
(
self
)
self
.
notifications_handler
=
master
.
PrimaryNotificationsHandler
(
self
)
self
.
_txn_container
=
TransactionContainer
()
self
.
_txn_container
=
TransactionContainer
()
# Lock definition :
# Lock definition :
# _load_lock is used to make loading and storing atomic
lock
=
Lock
()
self
.
_load_lock_acquire
=
lock
.
acquire
self
.
_load_lock_release
=
lock
.
release
# _oid_lock is used in order to not call multiple oid
# _oid_lock is used in order to not call multiple oid
# generation at the same time
# generation at the same time
lock
=
Lock
()
lock
=
Lock
()
self
.
_oid_lock_acquire
=
lock
.
acquire
self
.
_oid_lock_acquire
=
lock
.
acquire
self
.
_oid_lock_release
=
lock
.
release
self
.
_oid_lock_release
=
lock
.
release
lock
=
Lock
()
# _cache_lock is used for the client cache
# _cache_lock is used for the client cache
self
.
_cache_lock_acquire
=
lock
.
acquire
self
.
_cache_lock
=
Lock
()
self
.
_cache_lock_release
=
lock
.
release
# _connecting_to_master_node is used to prevent simultaneous master
# _connecting_to_master_node is used to prevent simultaneous master
# node connection attempts
# node connection attempts
self
.
_connecting_to_master_node
=
Lock
()
self
.
_connecting_to_master_node
=
Lock
()
...
@@ -402,21 +396,32 @@ class Application(ThreadedApplication):
...
@@ -402,21 +396,32 @@ class Application(ThreadedApplication):
"""
"""
# TODO:
# TODO:
# - rename parameters (here? and in handlers & packet definitions)
# - rename parameters (here? and in handlers & packet definitions)
acquired
=
False
acquire
=
self
.
_cache_lock_acquire
lock
=
self
.
_cache_lock
release
=
self
.
_cache_lock_release
# XXX: Consider using a more fine-grained lock.
self
.
_load_lock_acquire
()
try
:
try
:
acquire
()
while
1
:
try
:
with
lock
:
result
=
self
.
_loadFromCache
(
oid
,
tid
,
before_tid
)
if
tid
:
if
result
:
result
=
self
.
_cache
.
load
(
oid
,
tid
+
'*'
)
return
result
assert
not
result
or
result
[
1
]
==
tid
self
.
_loading_oid
=
oid
else
:
self
.
_loading_invalidated
=
[]
result
=
self
.
_cache
.
load
(
oid
,
before_tid
)
finally
:
if
result
:
release
()
return
result
load_lock
=
self
.
_loading
[
oid
][
0
]
acquired
=
load_lock
.
acquire
(
0
)
# Several concurrent cache misses for the same oid are probably
# for the same tid so we use a per-oid lock to avoid asking the
# same data to the storage node.
if
acquired
:
# The first thread does load from storage,
# and fills cache with the response.
break
# The other threads wait for the first one to complete and
# loop, possibly resulting in a new cache miss if a different
# tid is actually wanted or if the data was too big.
with
load_lock
:
pass
# While the cache lock is released, an arbitrary number of
# While the cache lock is released, an arbitrary number of
# invalidations may be processed, for this oid or not. And at this
# invalidations may be processed, for this oid or not. And at this
# precise moment, if both tid and before_tid are None (which is
# precise moment, if both tid and before_tid are None (which is
...
@@ -432,20 +437,24 @@ class Application(ThreadedApplication):
...
@@ -432,20 +437,24 @@ class Application(ThreadedApplication):
# we got from master.
# we got from master.
before_tid
=
p64
(
u64
(
self
.
last_tid
)
+
1
)
before_tid
=
p64
(
u64
(
self
.
last_tid
)
+
1
)
data
,
tid
,
next_tid
,
_
=
self
.
_loadFromStorage
(
oid
,
tid
,
before_tid
)
data
,
tid
,
next_tid
,
_
=
self
.
_loadFromStorage
(
oid
,
tid
,
before_tid
)
acquire
()
with
lock
:
try
:
loading
=
self
.
_loading
.
pop
(
oid
,
None
)
if
self
.
_loading_oid
:
if
loading
:
assert
loading
[
0
]
is
load_lock
if
not
next_tid
:
if
not
next_tid
:
for
t
in
self
.
_loading_invalidated
:
for
t
in
loading
[
1
]
:
if
tid
<
t
:
if
tid
<
t
:
next_tid
=
t
next_tid
=
t
break
break
self
.
_cache
.
store
(
oid
,
data
,
tid
,
next_tid
)
self
.
_cache
.
store
(
oid
,
data
,
tid
,
next_tid
)
# Else, we just reconnected to the master.
# Else, we just reconnected to the master.
finally
:
load_lock
.
release
()
release
()
except
:
finally
:
if
acquired
:
self
.
_load_lock_release
()
with
lock
:
self
.
_loading
.
pop
(
oid
,
None
)
load_lock
.
release
()
raise
return
data
,
tid
,
next_tid
return
data
,
tid
,
next_tid
def
_loadFromStorage
(
self
,
oid
,
at_tid
,
before_tid
):
def
_loadFromStorage
(
self
,
oid
,
at_tid
,
before_tid
):
...
@@ -464,16 +473,6 @@ class Application(ThreadedApplication):
...
@@ -464,16 +473,6 @@ class Application(ThreadedApplication):
Packets
.
AskObject
(
oid
,
at_tid
,
before_tid
),
Packets
.
AskObject
(
oid
,
at_tid
,
before_tid
),
askStorage
)
askStorage
)
def
_loadFromCache
(
self
,
oid
,
at_tid
=
None
,
before_tid
=
None
):
"""
Load from local cache, return None if not found.
"""
if
at_tid
:
result
=
self
.
_cache
.
load
(
oid
,
at_tid
+
'*'
)
assert
not
result
or
result
[
1
]
==
at_tid
return
result
return
self
.
_cache
.
load
(
oid
,
before_tid
)
def
tpc_begin
(
self
,
storage
,
transaction
,
tid
=
None
,
status
=
' '
):
def
tpc_begin
(
self
,
storage
,
transaction
,
tid
=
None
,
status
=
' '
):
"""Begin a new transaction."""
"""Begin a new transaction."""
# First get a transaction, only one is allowed at a time
# First get a transaction, only one is allowed at a time
...
@@ -729,29 +728,23 @@ class Application(ThreadedApplication):
...
@@ -729,29 +728,23 @@ class Application(ThreadedApplication):
txn_container
=
self
.
_txn_container
txn_container
=
self
.
_txn_container
if
not
txn_container
.
get
(
transaction
).
voted
:
if
not
txn_container
.
get
(
transaction
).
voted
:
self
.
tpc_vote
(
transaction
)
self
.
tpc_vote
(
transaction
)
checked_list
=
[]
txn_context
=
txn_container
.
pop
(
transaction
)
self
.
_load_lock_acquire
()
cache_dict
=
txn_context
.
cache_dict
checked_list
=
[
oid
for
oid
,
data
in
cache_dict
.
iteritems
()
if
data
is
CHECKED_SERIAL
]
for
oid
in
checked_list
:
del
cache_dict
[
oid
]
ttid
=
txn_context
.
ttid
p
=
Packets
.
AskFinishTransaction
(
ttid
,
list
(
cache_dict
),
checked_list
)
try
:
try
:
# Call finish on master
tid
=
self
.
_askPrimary
(
p
,
cache_dict
=
cache_dict
,
callback
=
f
)
txn_context
=
txn_container
.
pop
(
transaction
)
assert
tid
cache_dict
=
txn_context
.
cache_dict
except
ConnectionClosed
:
checked_list
=
[
oid
for
oid
,
data
in
cache_dict
.
iteritems
()
tid
=
self
.
_getFinalTID
(
ttid
)
if
data
is
CHECKED_SERIAL
]
if
not
tid
:
for
oid
in
checked_list
:
raise
del
cache_dict
[
oid
]
return
tid
ttid
=
txn_context
.
ttid
p
=
Packets
.
AskFinishTransaction
(
ttid
,
list
(
cache_dict
),
checked_list
)
try
:
tid
=
self
.
_askPrimary
(
p
,
cache_dict
=
cache_dict
,
callback
=
f
)
assert
tid
except
ConnectionClosed
:
tid
=
self
.
_getFinalTID
(
ttid
)
if
not
tid
:
raise
return
tid
finally
:
self
.
_load_lock_release
()
def
_getFinalTID
(
self
,
ttid
):
def
_getFinalTID
(
self
,
ttid
):
try
:
try
:
...
@@ -991,11 +984,8 @@ class Application(ThreadedApplication):
...
@@ -991,11 +984,8 @@ class Application(ThreadedApplication):
# It should not be otherwise required (clients should be free to load
# It should not be otherwise required (clients should be free to load
# old data as long as it is available in cache, event if it was pruned
# old data as long as it is available in cache, event if it was pruned
# by a pack), so don't bother invalidating on other clients.
# by a pack), so don't bother invalidating on other clients.
self
.
_cache_lock_acquire
()
with
self
.
_cache_lock
:
try
:
self
.
_cache
.
clear
()
self
.
_cache
.
clear
()
finally
:
self
.
_cache_lock_release
()
def
getLastTID
(
self
,
oid
):
def
getLastTID
(
self
,
oid
):
return
self
.
load
(
oid
)[
1
]
return
self
.
load
(
oid
)[
1
]
...
...
neo/client/handlers/master.py
View file @
4d571267
...
@@ -45,8 +45,7 @@ class PrimaryNotificationsHandler(MTEventHandler):
...
@@ -45,8 +45,7 @@ class PrimaryNotificationsHandler(MTEventHandler):
# Either we're connecting or we already know the last tid
# Either we're connecting or we already know the last tid
# via invalidations.
# via invalidations.
assert
app
.
master_conn
is
None
,
app
.
master_conn
assert
app
.
master_conn
is
None
,
app
.
master_conn
app
.
_cache_lock_acquire
()
with
app
.
_cache_lock
:
try
:
if
app_last_tid
<
ltid
:
if
app_last_tid
<
ltid
:
app
.
_cache
.
clear_current
()
app
.
_cache
.
clear_current
()
# In the past, we tried not to invalidate the
# In the past, we tried not to invalidate the
...
@@ -60,9 +59,7 @@ class PrimaryNotificationsHandler(MTEventHandler):
...
@@ -60,9 +59,7 @@ class PrimaryNotificationsHandler(MTEventHandler):
app
.
_cache
.
clear
()
app
.
_cache
.
clear
()
# Make sure a parallel load won't refill the cache
# Make sure a parallel load won't refill the cache
# with garbage.
# with garbage.
app
.
_loading_oid
=
app
.
_loading_invalidated
=
None
app
.
_loading
.
clear
()
finally
:
app
.
_cache_lock_release
()
db
=
app
.
getDB
()
db
=
app
.
getDB
()
db
is
None
or
db
.
invalidateCache
()
db
is
None
or
db
.
invalidateCache
()
app
.
last_tid
=
ltid
app
.
last_tid
=
ltid
...
@@ -73,18 +70,20 @@ class PrimaryNotificationsHandler(MTEventHandler):
...
@@ -73,18 +70,20 @@ class PrimaryNotificationsHandler(MTEventHandler):
app
.
last_tid
=
tid
app
.
last_tid
=
tid
# Update cache
# Update cache
cache
=
app
.
_cache
cache
=
app
.
_cache
app
.
_cache_lock_acquire
()
with
app
.
_cache_lock
:
try
:
invalidate
=
app
.
_cache
.
invalidate
loading_get
=
app
.
_loading
.
get
for
oid
,
data
in
cache_dict
.
iteritems
():
for
oid
,
data
in
cache_dict
.
iteritems
():
# Update ex-latest value in cache
# Update ex-latest value in cache
cache
.
invalidate
(
oid
,
tid
)
invalidate
(
oid
,
tid
)
loading
=
loading_get
(
oid
)
if
loading
:
loading
[
1
].
append
(
tid
)
if
data
is
not
None
:
if
data
is
not
None
:
# Store in cache with no next_tid
# Store in cache with no next_tid
cache
.
store
(
oid
,
data
,
tid
,
None
)
cache
.
store
(
oid
,
data
,
tid
,
None
)
if
callback
is
not
None
:
if
callback
is
not
None
:
callback
(
tid
)
callback
(
tid
)
finally
:
app
.
_cache_lock_release
()
def
connectionClosed
(
self
,
conn
):
def
connectionClosed
(
self
,
conn
):
app
=
self
.
app
app
=
self
.
app
...
@@ -113,19 +112,17 @@ class PrimaryNotificationsHandler(MTEventHandler):
...
@@ -113,19 +112,17 @@ class PrimaryNotificationsHandler(MTEventHandler):
if
app
.
ignore_invalidations
:
if
app
.
ignore_invalidations
:
return
return
app
.
last_tid
=
tid
app
.
last_tid
=
tid
app
.
_cache_lock_acquire
()
with
app
.
_cache_lock
:
try
:
invalidate
=
app
.
_cache
.
invalidate
invalidate
=
app
.
_cache
.
invalidate
loading
=
app
.
_loading_oid
loading
_get
=
app
.
_loading
.
get
for
oid
in
oid_list
:
for
oid
in
oid_list
:
invalidate
(
oid
,
tid
)
invalidate
(
oid
,
tid
)
if
oid
==
loading
:
loading
=
loading_get
(
oid
)
app
.
_loading_invalidated
.
append
(
tid
)
if
loading
:
loading
[
1
].
append
(
tid
)
db
=
app
.
getDB
()
db
=
app
.
getDB
()
if
db
is
not
None
:
if
db
is
not
None
:
db
.
invalidate
(
tid
,
oid_list
)
db
.
invalidate
(
tid
,
oid_list
)
finally
:
app
.
_cache_lock_release
()
def
sendPartitionTable
(
self
,
conn
,
ptid
,
num_replicas
,
row_list
):
def
sendPartitionTable
(
self
,
conn
,
ptid
,
num_replicas
,
row_list
):
pt
=
self
.
app
.
pt
=
object
.
__new__
(
PartitionTable
)
pt
=
self
.
app
.
pt
=
object
.
__new__
(
PartitionTable
)
...
...
neo/debug.py
View file @
4d571267
...
@@ -197,8 +197,7 @@ elif IF == 'trace-cache':
...
@@ -197,8 +197,7 @@ elif IF == 'trace-cache':
@
defer
@
defer
def
profile
(
app
):
def
profile
(
app
):
app
.
_cache_lock_acquire
()
with
app
.
_cache_lock
:
try
:
cache
=
app
.
_cache
cache
=
app
.
_cache
if
type
(
cache
)
is
ClientCache
:
if
type
(
cache
)
is
ClientCache
:
app
.
_cache
=
CacheTracer
(
cache
,
'%s-%s.neo-cache-trace'
%
app
.
_cache
=
CacheTracer
(
cache
,
'%s-%s.neo-cache-trace'
%
...
@@ -206,5 +205,3 @@ elif IF == 'trace-cache':
...
@@ -206,5 +205,3 @@ elif IF == 'trace-cache':
app
.
_cache
.
clear
()
app
.
_cache
.
clear
()
else
:
else
:
app
.
_cache
=
cache
.
close
()
app
.
_cache
=
cache
.
close
()
finally
:
app
.
_cache_lock_release
()
neo/tests/threaded/__init__.py
View file @
4d571267
...
@@ -1105,8 +1105,7 @@ class NEOThreadedTest(NeoTestBase):
...
@@ -1105,8 +1105,7 @@ class NEOThreadedTest(NeoTestBase):
def
run
(
self
):
def
run
(
self
):
try
:
try
:
apply
(
*
self
.
__target
)
self
.
__result
=
apply
(
*
self
.
__target
)
self
.
__exc_info
=
None
except
:
except
:
self
.
__exc_info
=
sys
.
exc_info
()
self
.
__exc_info
=
sys
.
exc_info
()
if
self
.
__exc_info
[
0
]
is
NEOThreadedTest
.
failureException
:
if
self
.
__exc_info
[
0
]
is
NEOThreadedTest
.
failureException
:
...
@@ -1114,10 +1113,13 @@ class NEOThreadedTest(NeoTestBase):
...
@@ -1114,10 +1113,13 @@ class NEOThreadedTest(NeoTestBase):
def
join
(
self
,
timeout
=
None
):
def
join
(
self
,
timeout
=
None
):
threading
.
Thread
.
join
(
self
,
timeout
)
threading
.
Thread
.
join
(
self
,
timeout
)
if
not
self
.
is_alive
()
and
self
.
__exc_info
:
if
not
self
.
is_alive
():
etype
,
value
,
tb
=
self
.
__exc_info
try
:
del
self
.
__exc_info
return
self
.
__result
raise
etype
,
value
,
tb
except
AttributeError
:
etype
,
value
,
tb
=
self
.
__exc_info
del
self
.
__exc_info
raise
etype
,
value
,
tb
class
newThread
(
newPausedThread
):
class
newThread
(
newPausedThread
):
...
...
neo/tests/threaded/test.py
View file @
4d571267
...
@@ -902,6 +902,27 @@ class Test(NEOThreadedTest):
...
@@ -902,6 +902,27 @@ class Test(NEOThreadedTest):
self
.
assertEqual
(
c
.
root
()[
'1'
].
value
,
1
)
self
.
assertEqual
(
c
.
root
()[
'1'
].
value
,
1
)
self
.
assertNotIn
(
'2'
,
c
.
root
())
self
.
assertNotIn
(
'2'
,
c
.
root
())
@
with_cluster
()
def
testLoadVsFinish
(
self
,
cluster
):
t1
,
c1
=
cluster
.
getTransaction
()
c1
.
root
()[
'x'
]
=
x1
=
PCounter
()
t1
.
commit
()
t1
.
begin
()
x1
.
value
=
1
t2
,
c2
=
cluster
.
getTransaction
()
x2
=
c2
.
root
()[
'x'
]
cluster
.
client
.
_cache
.
clear
()
def
_loadFromStorage
(
orig
,
*
args
):
r
=
orig
(
*
args
)
ll
()
return
r
with
LockLock
()
as
ll
,
Patch
(
cluster
.
client
,
_loadFromStorage
=
_loadFromStorage
):
t
=
self
.
newThread
(
x2
.
_p_activate
)
ll
()
t1
.
commit
()
t
.
join
()
@
with_cluster
()
@
with_cluster
()
def
testInternalInvalidation
(
self
,
cluster
):
def
testInternalInvalidation
(
self
,
cluster
):
def
_handlePacket
(
orig
,
conn
,
packet
,
kw
=
{},
handler
=
None
):
def
_handlePacket
(
orig
,
conn
,
packet
,
kw
=
{},
handler
=
None
):
...
@@ -989,6 +1010,8 @@ class Test(NEOThreadedTest):
...
@@ -989,6 +1010,8 @@ class Test(NEOThreadedTest):
t
.
join
()
t
.
join
()
self
.
assertEqual
(
x2
.
value
,
1
)
self
.
assertEqual
(
x2
.
value
,
1
)
self
.
assertEqual
(
x1
.
value
,
0
)
self
.
assertEqual
(
x1
.
value
,
0
)
self
.
assertEqual
((
x2
.
_p_serial
,
x1
.
_p_serial
),
cluster
.
client
.
_cache
.
load
(
x1
.
_p_oid
,
x1
.
_p_serial
)[
1
:])
def
invalidations
(
conn
):
def
invalidations
(
conn
):
try
:
try
:
...
@@ -1026,7 +1049,7 @@ class Test(NEOThreadedTest):
...
@@ -1026,7 +1049,7 @@ class Test(NEOThreadedTest):
x
=
r
[
''
]
=
PCounter
()
x
=
r
[
''
]
=
PCounter
()
t
.
commit
()
t
.
commit
()
tid1
=
x
.
_p_serial
tid1
=
x
.
_p_serial
nonlocal_
=
[
0
,
1
]
nonlocal_
=
[
0
,
0
,
0
]
l1
=
threading
.
Lock
();
l1
.
acquire
()
l1
=
threading
.
Lock
();
l1
.
acquire
()
l2
=
threading
.
Lock
();
l2
.
acquire
()
l2
=
threading
.
Lock
();
l2
.
acquire
()
def
invalidateObjects
(
orig
,
*
args
):
def
invalidateObjects
(
orig
,
*
args
):
...
@@ -1036,27 +1059,72 @@ class Test(NEOThreadedTest):
...
@@ -1036,27 +1059,72 @@ class Test(NEOThreadedTest):
nonlocal_
[
0
]
+=
1
nonlocal_
[
0
]
+=
1
if
nonlocal_
[
0
]
==
2
:
if
nonlocal_
[
0
]
==
2
:
l2
.
release
()
l2
.
release
()
def
_cache_lock_release
(
orig
):
class
CacheLock
(
object
):
orig
()
def
__init__
(
self
,
client
):
if
nonlocal_
[
1
]:
self
.
_lock
=
client
.
_cache_lock
nonlocal_
[
1
]
=
0
def
__enter__
(
self
):
self
.
_lock
.
acquire
()
def
__exit__
(
self
,
t
,
v
,
tb
):
count
=
nonlocal_
[
1
]
nonlocal_
[
1
]
=
count
+
1
self
.
_lock
.
release
()
if
count
==
0
:
load_same
.
start
()
l2
.
acquire
()
elif
count
==
1
:
load_other
.
start
()
def
_loadFromStorage
(
orig
,
*
args
):
count
=
nonlocal_
[
2
]
nonlocal_
[
2
]
=
count
+
1
if
not
count
:
l1
.
release
()
l1
.
release
()
l2
.
acquire
(
)
return
orig
(
*
args
)
with
cluster
.
newClient
()
as
client
,
\
with
cluster
.
newClient
()
as
client
,
\
Patch
(
client
.
notifications_handler
,
Patch
(
client
.
notifications_handler
,
invalidateObjects
=
invalidateObjects
):
invalidateObjects
=
invalidateObjects
):
client
.
sync
()
client
.
sync
()
with
cluster
.
master
.
filterConnection
(
client
)
as
mc2
:
with
cluster
.
master
.
filterConnection
(
client
)
as
mc2
:
mc2
.
delayInvalidateObjects
()
mc2
.
delayInvalidateObjects
()
# A first client node (C1) modifies an oid whereas
# invalidations to the other node (C2) are delayed.
x
.
_p_changed
=
1
x
.
_p_changed
=
1
t
.
commit
()
t
.
commit
()
tid2
=
x
.
_p_serial
tid2
=
x
.
_p_serial
# C2 loads the most recent revision of this oid (last_tid=tid1).
self
.
assertEqual
((
tid1
,
tid2
),
client
.
load
(
x
.
_p_oid
)[
1
:])
self
.
assertEqual
((
tid1
,
tid2
),
client
.
load
(
x
.
_p_oid
)[
1
:])
# C2 poll thread is frozen just before processing invalidation
# packet for tid2. C1 modifies something else -> tid3
r
.
_p_changed
=
1
r
.
_p_changed
=
1
t
.
commit
()
t
.
commit
()
with
Patch
(
client
,
_cache_lock_release
=
_cache_lock_release
):
self
.
assertEqual
(
tid1
,
client
.
last_tid
)
self
.
assertEqual
((
tid2
,
None
),
client
.
load
(
x
.
_p_oid
)[
1
:])
load_same
=
self
.
newPausedThread
(
client
.
load
,
x
.
_p_oid
)
self
.
assertEqual
(
nonlocal_
,
[
2
,
0
])
load_other
=
self
.
newPausedThread
(
client
.
load
,
r
.
_p_oid
)
with
Patch
(
client
,
_cache_lock
=
CacheLock
(
client
)),
\
Patch
(
client
,
_loadFromStorage
=
_loadFromStorage
):
# 1. Just after having found nothing in cache, the worker
# thread asks the poll thread to get notified about
# invalidations for the loading oid.
# <context switch> (l1)
# 2. Both invalidations are processed. -> last_tid=tid3
# <context switch> (l2)
# 3. The worker thread loads before tid3+1.
# The poll thread notified [tid2], which must be ignored.
# In parallel, 2 other loads are done (both cache misses):
# - one for the same oid, which waits for first load to
# complete and in particular fill cache, in order to
# avoid asking the same data to the storage node
# - another for a different oid, which doesn't wait, as shown
# by the fact that it returns an old record (i.e. before any
# invalidation packet is processed)
loaded
=
client
.
load
(
x
.
_p_oid
)
self
.
assertEqual
((
tid2
,
None
),
loaded
[
1
:])
self
.
assertEqual
(
loaded
,
load_same
.
join
())
self
.
assertEqual
((
tid1
,
r
.
_p_serial
),
load_other
.
join
()[
1
:])
# To summary:
# - 3 concurrent loads starting with cache misses
# - 2 loads from storage
# - 1 load ending with a cache hit
self
.
assertEqual
(
nonlocal_
,
[
2
,
8
,
2
])
@
with_cluster
(
storage_count
=
2
,
partitions
=
2
)
@
with_cluster
(
storage_count
=
2
,
partitions
=
2
)
def
testReadVerifyingStorage
(
self
,
cluster
):
def
testReadVerifyingStorage
(
self
,
cluster
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment