Commit baf9a2a3 authored by Jason Madden's avatar Jason Madden

Avoid closing the same libuv watcher object twice.

Under some circumstances (only seen on Windows) that could lead the program to crash.
parent 487b3d02
Avoid closing the same Python libuv watcher IO object twice. Under
some circumstances (only seen on Windows), that could lead to program
crashes.
......@@ -92,7 +92,7 @@ class AbstractCallbacks(object):
:func:`_python_handle_error` to deal with it. The Python watcher
object will have the exception tuple saved in ``_exc_info``.
- 1
Everything went according to plan. You should check to see if the libev
Everything went according to plan. You should check to see if the native
watcher is still active, and call :func:`python_stop` if it is not. This will
clean up the memory. Finding the watcher still active at the event loop level,
but not having stopped itself at the gevent level is a buggy scenario and
......@@ -182,8 +182,9 @@ class AbstractCallbacks(object):
and the_watcher in the_watcher.loop._keepaliveset
and the_watcher._watcher is orig_ffi_watcher):
# It didn't stop itself, *and* it didn't stop itself, reset
# its watcher, and start itself again. libuv's io watchers MAY
# do that.
# its watcher, and start itself again. libuv's io watchers
# multiplex and may do this.
# The normal, expected scenario when we find the watcher still
# in the keepaliveset is that it is still active at the event loop
# level, so we don't expect that python_stop gets called.
......
......@@ -483,19 +483,19 @@ class loop(AbstractLoop):
val = _callbacks.python_callback(handle, arg)
if val == -1: # Failure.
_callbacks.python_handle_error(handle, arg)
elif val == 1: # Success
elif val == 1: # Success, and we may need to close the Python watcher.
if not libuv.uv_is_active(watcher_ptr):
# The callback closed the watcher in C. Good.
# It's supposed to also reset the pointer to NULL at
# The callback closed the native watcher resources. Good.
# It's *supposed* to also reset the .data handle to NULL at
# that same time. If it resets it to something else, we're
# re-using the same watcher object, and that's not correct either.
# Prevoiusly we checked for that case, but we shouldn't need to.
# On Windows in particular, if the .data handle is changed because
# the IO multiplexer is being restarted, trying to dereference the
# *old* handle can crash with an FFI error.
handle_after_callback = watcher_ptr.data
try:
if handle_after_callback:
if handle_after_callback and handle_after_callback == handle:
_callbacks.python_stop(handle_after_callback)
if handle_after_callback != handle:
_callbacks.python_stop(handle)
finally:
watcher_ptr.data = ffi.NULL
return True
......
......@@ -18,7 +18,23 @@ from gevent._ffi import _dbg
# A set of uv_handle_t* CFFI objects. Kept around
# to keep the memory alive until libuv is done with them.
_closing_watchers = set()
class _ClosingWatchers(dict):
__slots__ = ()
def remove(self, obj):
try:
del self[obj]
except KeyError: # pragma: no cover
# This has been seen to happen if the module is executed twice
# and so the callback doesn't match the storage seen by watcher objects.
print(
'gevent error: Unable to remove closing watcher from keepaliveset. '
'Has the module state been corrupted or executed more than once?',
file=sys.stderr
)
_closing_watchers = _ClosingWatchers()
# In debug mode, it would be nice to be able to clear the memory of
# the watcher (its size determined by
......@@ -136,7 +152,7 @@ class watcher(_base.watcher):
# and trying to close it results in libuv terminating the process.
# Sigh. Same thing if it's already in the process of being
# closed.
_closing_watchers.add(ffi_watcher)
_closing_watchers[ffi_handle_watcher] = ffi_watcher
libuv.uv_close(ffi_watcher, libuv._uv_close_callback)
def _watcher_ffi_set_init_ref(self, ref):
......
......@@ -40,4 +40,7 @@ class Test(unittest.TestCase):
if __name__ == '__main__':
# This should not be combined with other tests in the same process
# because it messes with global shared state.
# pragma: testrunner-no-combine
main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment