Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Vincent Pelletier
neoppod
Commits
48ff0513
Commit
48ff0513
authored
Oct 27, 2013
by
Vincent Pelletier
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
client.container: Reduce lock contention on queue.put .
parent
7cdd3f8f
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
50 additions
and
44 deletions
+50
-44
neo/client/container.py
neo/client/container.py
+49
-3
neo/lib/locking.py
neo/lib/locking.py
+0
-38
neo/tests/testConnection.py
neo/tests/testConnection.py
+1
-3
No files found.
neo/client/container.py
View file @
48ff0513
...
...
@@ -15,7 +15,53 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from
thread
import
get_ident
from
neo.lib.locking
import
Queue
from
neo.lib.locking
import
Lock
,
Empty
from
collections
import
deque
class
SimpleQueue
(
object
):
"""
Similar to Queue.Queue but with simpler locking scheme, reducing lock
contention on "put" (benchmark shows 60% less time spent in "put").
As a result:
- only a single consumer possible ("get" vs. "get" race condition)
- only a single producer possible ("put" vs. "put" race condition)
- no blocking size limit possible
- no consumer -> producer notifications (task_done/join API)
Queue is on the critical path: any moment spent here increases client
application wait for object data, transaction completion, etc.
As we have a single consumer (client application's thread) and a single
producer (lib.dispatcher, which can be called from several threads but
serialises calls internally) for each queue, Queue.Queue's locking scheme
can be relaxed to reduce latency.
"""
__slots__
=
(
'_lock'
,
'_unlock'
,
'_popleft'
,
'_append'
,
'_queue'
)
def
__init__
(
self
):
lock
=
Lock
()
self
.
_lock
=
lock
.
acquire
self
.
_unlock
=
lock
.
release
self
.
_queue
=
queue
=
deque
()
self
.
_popleft
=
queue
.
popleft
self
.
_append
=
queue
.
append
def
get
(
self
,
block
):
if
block
:
self
.
_lock
(
False
)
while
True
:
try
:
return
self
.
_popleft
()
except
IndexError
:
if
not
block
:
raise
Empty
self
.
_lock
()
def
put
(
self
,
item
):
self
.
_append
(
item
)
self
.
_lock
(
False
)
self
.
_unlock
()
def
empty
(
self
):
return
not
self
.
_queue
class
ContainerBase
(
object
):
def
__init__
(
self
):
...
...
@@ -44,7 +90,7 @@ class ThreadContainer(ContainerBase):
def
_new
(
self
):
return
{
'queue'
:
Queue
(
0
),
'queue'
:
SimpleQueue
(
),
'answer'
:
None
,
}
...
...
@@ -65,7 +111,7 @@ class TransactionContainer(ContainerBase):
def
_new
(
self
,
txn
):
return
{
'queue'
:
Queue
(
0
),
'queue'
:
SimpleQueue
(
),
'txn'
:
txn
,
'ttid'
:
None
,
'data_dict'
:
{},
...
...
neo/lib/locking.py
View file @
48ff0513
from
threading
import
Lock
as
threading_Lock
from
threading
import
RLock
as
threading_RLock
from
threading
import
currentThread
from
Queue
import
Queue
as
Queue_Queue
from
Queue
import
Empty
"""
...
...
@@ -137,46 +136,9 @@ class VerboseLock(VerboseLockBase):
return
self
.
lock
.
locked
()
_locked
=
locked
class
VerboseQueue
(
Queue_Queue
):
def
__init__
(
self
,
maxsize
=
0
):
if
maxsize
<=
0
:
self
.
put
=
self
.
_verbose_put
Queue_Queue
.
__init__
(
self
,
maxsize
=
maxsize
)
def
_verbose_note
(
self
,
fmt
,
*
args
):
sys
.
stderr
.
write
(
fmt
%
args
+
'
\
n
'
)
sys
.
stderr
.
flush
()
def
get
(
self
,
block
=
True
,
timeout
=
None
):
note
=
self
.
_verbose_note
me
=
'[%r]%s.get(block=%r, timeout=%r)'
%
(
LockUser
(),
self
,
block
,
timeout
)
note
(
'%s waiting'
,
me
)
try
:
result
=
Queue_Queue
.
get
(
self
,
block
=
block
,
timeout
=
timeout
)
except
Exception
,
exc
:
note
(
'%s got exeption %r'
,
me
,
exc
)
raise
note
(
'%s got item'
,
me
)
return
result
def
_verbose_put
(
self
,
item
,
block
=
True
,
timeout
=
None
):
note
=
self
.
_verbose_note
me
=
'[%r]%s.put(..., block=%r, timeout=%r)'
%
(
LockUser
(),
self
,
block
,
timeout
)
try
:
Queue_Queue
.
put
(
self
,
item
,
block
=
block
,
timeout
=
timeout
)
except
Exception
,
exc
:
note
(
'%s got exeption %r'
,
me
,
exc
)
raise
note
(
'%s put item'
,
me
)
def
__repr__
(
self
):
return
'<%s@%X>'
%
(
self
.
__class__
.
__name__
,
id
(
self
))
if
VERBOSE_LOCKING
:
Lock
=
VerboseLock
RLock
=
VerboseRLock
Queue
=
VerboseQueue
else
:
Lock
=
threading_Lock
RLock
=
threading_RLock
Queue
=
Queue_Queue
neo/tests/testConnection.py
View file @
48ff0513
...
...
@@ -27,7 +27,6 @@ from neo.lib.connector import ConnectorException, ConnectorTryAgainException, \
from
neo.lib.handler
import
EventHandler
from
neo.lib.protocol
import
Packets
,
PACKET_HEADER_FORMAT
from
.
import
NeoUnitTestBase
from
neo.lib.locking
import
Queue
class
ConnectionTests
(
NeoUnitTestBase
):
...
...
@@ -850,12 +849,11 @@ class MTConnectionTests(ConnectionTests):
dispatcher
=
self
.
dispatcher
)
def
test_MTClientConnectionQueueParameter
(
self
):
queue
=
Queue
()
ask
=
self
.
_makeClientConnection
().
ask
packet
=
Packets
.
AskPrimary
()
# Any non-Ping simple "ask" packet
# One cannot "ask" anything without a queue
self
.
assertRaises
(
TypeError
,
ask
,
packet
)
ask
(
packet
,
queue
=
queue
)
ask
(
packet
,
queue
=
object
()
)
# ... except Ping
ask
(
Packets
.
Ping
())
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment