Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
G
gevent
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
gevent
Commits
10d05517
Commit
10d05517
authored
May 28, 2020
by
Jason Madden
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Catch greenlet.error when destroying a hub from another thread.
Fixes #1631
parent
30f63ed2
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
86 additions
and
22 deletions
+86
-22
docs/changes/1631.bugfix
docs/changes/1631.bugfix
+3
-0
src/gevent/hub.py
src/gevent/hub.py
+15
-2
src/gevent/tests/test__hub_join.py
src/gevent/tests/test__hub_join.py
+68
-20
No files found.
docs/changes/1631.bugfix
0 → 100644
View file @
10d05517
Forking a process that had use the threadpool to run tasks that
created their own hub would fail to clean up the threadpool by raising
``greenlet.error``.
src/gevent/hub.py
View file @
10d05517
...
...
@@ -14,6 +14,7 @@ import traceback
from
greenlet
import
greenlet
as
RawGreenlet
from
greenlet
import
getcurrent
from
greenlet
import
GreenletExit
from
greenlet
import
error
as
GreenletError
__all__
=
[
'getcurrent'
,
...
...
@@ -755,16 +756,22 @@ class Hub(WaitOperationsGreenlet):
If you manually create hubs, or you use a hub or the gevent
blocking API from multiple native threads, you *should* call this
method before disposing of the hub object reference.
method before disposing of the hub object reference. Ideally,
this should be called from the same thread running the hub, but
it can be called from other threads after that thread has exited.
Once this is done, it is impossible to continue running the
hub. Attempts to use the blocking gevent API with pre-existing
objects from this native thread and bound to this hub will fail.
.. versionchanged:: 20.5.1
E
nsure that Python stack frames and greenlets referenced by this
Attempt to e
nsure that Python stack frames and greenlets referenced by this
hub are cleaned up. This guarantees that switching to the hub again
is not safe after this. (It was never safe, but it's even less safe.)
Note that this only works if the hub is destroyed in the same thread it
is running in. If the hub is destroyed by a different thread
after a ``fork()``, for example, expect some garbage to leak.
"""
if
self
.
periodic_monitoring_thread
is
not
None
:
self
.
periodic_monitoring_thread
.
kill
()
...
...
@@ -786,6 +793,12 @@ class Hub(WaitOperationsGreenlet):
try
:
self
.
throw
(
GreenletExit
)
except
LoopExit
:
# Expected.
pass
except
GreenletError
:
# Must be coming from a different thread.
# Note that python stack frames are likely to leak
# in this case.
pass
if
destroy_loop
is
None
:
...
...
src/gevent/tests/test__hub_join.py
View file @
10d05517
from
contextlib
import
contextmanager
import
unittest
import
gevent
from
gevent.testing
import
ignores_leakcheck
...
...
@@ -20,18 +22,36 @@ class TestJoin(unittest.TestCase):
res
=
gevent
.
get_hub
().
join
()
self
.
assertTrue
(
res
)
@
staticmethod
def
__clean
():
import
gc
for
_
in
range
(
2
):
while
gc
.
collect
():
pass
@
contextmanager
def
assert_no_greenlet_growth
(
self
):
from
gevent._greenlet_primitives
import
get_reachable_greenlets
clean
=
self
.
__clean
clean
()
count_before
=
len
(
get_reachable_greenlets
())
yield
count_after
=
len
(
get_reachable_greenlets
())
if
count_after
>
count_before
:
# We could be off by exactly 1. Not entirely clear where.
# But it only happens the first time.
count_after
-=
1
# If we were run in multiple process, our count could actually have
# gone down due to the GC's we did.
self
.
assertEqual
(
count_after
,
count_before
)
@
ignores_leakcheck
def
test_join_in_new_thread_doesnt_leak_hub_or_greenlet
(
self
):
# https://github.com/gevent/gevent/issues/1601
import
threading
import
gc
from
gevent._greenlet_primitives
import
get_reachable_greenlets
def
_clean
():
for
_
in
range
(
2
):
while
gc
.
collect
():
pass
_clean
()
count_before
=
len
(
get_reachable_greenlets
())
clean
=
self
.
__clean
def
thread_main
():
g
=
gevent
.
Greenlet
(
run
=
lambda
:
0
)
...
...
@@ -47,22 +67,50 @@ class TestJoin(unittest.TestCase):
t
.
start
()
t
.
join
()
_
clean
()
clean
()
with
self
.
assert_no_greenlet_growth
():
for
_
in
range
(
10
):
tester
(
thread_main
)
del
tester
del
thread_main
@
ignores_leakcheck
def
test_destroy_in_main_thread_from_new_thread
(
self
):
# https://github.com/gevent/gevent/issues/1631
import
threading
clean
=
self
.
__clean
class
Thread
(
threading
.
Thread
):
hub
=
None
def
run
(
self
):
g
=
gevent
.
Greenlet
(
run
=
lambda
:
0
)
g
.
start
()
g
.
join
()
del
g
hub
=
gevent
.
get_hub
()
hub
.
join
()
self
.
hub
=
hub
def
tester
(
Thread
,
clean
):
t
=
Thread
()
t
.
start
()
t
.
join
()
t
.
hub
.
destroy
(
destroy_loop
=
True
)
t
.
hub
=
None
del
t
clean
()
# Unfortunately, this WILL leak greenlets,
# at least on CPython. The frames of the dead threads
# are referenced by the hub in some sort of cycle, and
# greenlets don't particpate in GC.
for
_
in
range
(
10
):
tester
(
thread_mai
n
)
tester
(
Thread
,
clea
n
)
del
tester
del
thread_main
count_after
=
len
(
get_reachable_greenlets
())
if
count_after
>
count_before
:
# We could be off by exactly 1. Not entirely clear where.
# But it only happens the first time.
count_after
-=
1
# If we were run in multiple process, our count could actually have
# gone down due to the GC's we did.
self
.
assertEqual
(
count_after
,
count_before
)
del
Thread
if
__name__
==
'__main__'
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment