Commit acc67c32 authored by Jason Madden's avatar Jason Madden

Fix Popen.communicate() to raise exceptions from reading the streams.

And a general clean up of how the streams are read, ensuring we just have one greenlet. This avoids the 'reentrant call' errors that could prevent closing the streams on time.

Fixes #1510
parent 785b7b55
......@@ -39,7 +39,21 @@ Library and Dependency Updates
with debugging. The event libraries allocate small amounts of memory
at startup. The allocation functions have to take the GIL, but
because of the limited amount of actual allocation that gets done
this is not expected to be a concern.
this is not expected to be a bottleneck.
Other
-----
- Make `gevent.subprocess.Popen.communicate` raise exceptions raised
by reading from the process, like the standard library. In
particular, under Python 3, if the process output is being decoded
as text, this can now raise ``UnicodeDecodeError``. Reported in
:issue:`1510` by Ofer Koren.
- Make `gevent.subprocess.Popen.communicate` be more careful about
closing files. Previously if a timeout error happened, a second call
to ``communicate`` might not close the pipe.
1.5a3 (2020-01-01)
==================
......
......@@ -371,7 +371,13 @@ class FileObjectBase(object):
return getattr(self._io, name)
def __repr__(self):
return '<%s _fobj=%r%s>' % (self.__class__.__name__, self.io, self._extra_repr())
return '<%s at 0x%x %s_fobj=%r%s>' % (
self.__class__.__name__,
id(self),
'closed' if self.closed else '',
self.io,
self._extra_repr()
)
def _extra_repr(self):
return ''
......
......@@ -39,7 +39,6 @@ import sys
import traceback
from gevent.event import AsyncResult
from gevent.exceptions import ConcurrentObjectUseError
from gevent.hub import _get_hub_noargs as get_hub
from gevent.hub import linkproxy
from gevent.hub import sleep
......@@ -264,6 +263,13 @@ else:
fork = monkey.get_original('os', 'fork')
from gevent.os import fork_and_watch
try:
BrokenPipeError
except NameError: # Python 2
class BrokenPipeError(Exception):
"Never raised, never caught."
def call(*popenargs, **kwargs):
"""
call(args, *, stdin=None, stdout=None, stderr=None, shell=False, timeout=None) -> returncode
......@@ -437,6 +443,95 @@ def FileObject(*args, **kwargs):
globals()['FileObject'] = _FileObject
return _FileObject(*args)
class _CommunicatingGreenlets(object):
# At most, exactly one of these objects may be created
# for a given Popen object. This ensures that only one background
# greenlet at a time will be reading from the file object. This matters because
# if a timeout exception is raised, the user may call back into communicate() to
# get the output (usually after killing the process; see run()). We must not
# lose output in that case (Python 3 specifically documents that raising a timeout
# doesn't lose output). Also, attempting to read from a pipe while it's already
# being read from results in `RuntimeError: reentrant call in io.BufferedReader`;
# the same thing happens if you attempt to close() it while that's in progress.
__slots__ = (
'stdin',
'stdout',
'stderr',
'_all_greenlets',
)
def __init__(self, popen, input_data):
self.stdin = self.stdout = self.stderr = None
if popen.stdin: # Even if no data, we need to close
self.stdin = spawn(self._write_and_close, popen.stdin, input_data)
# If the timeout parameter is used, and the caller calls back after
# getting a TimeoutExpired exception, we can wind up with multiple
# greenlets trying to run and read from and close stdout/stderr.
# That's bad because it can lead to 'RuntimeError: reentrant call in io.BufferedReader'.
# We can't just kill the previous greenlets when a timeout happens,
# though, because we risk losing the output collected by that greenlet
# (and Python 3, where timeout is an official parameter, explicitly says
# that no output should be lost in the event of a timeout.) Instead, we're
# watching for the exception and ignoring it. It's not elegant,
# but it works
if popen.stdout:
self.stdout = spawn(self._read_and_close, popen.stdout)
if popen.stderr:
self.stderr = spawn(self._read_and_close, popen.stderr)
all_greenlets = []
for g in self.stdin, self.stdout, self.stderr:
if g is not None:
all_greenlets.append(g)
self._all_greenlets = tuple(all_greenlets)
def __iter__(self):
return iter(self._all_greenlets)
def __bool__(self):
return bool(self._all_greenlets)
__nonzero__ = __bool__
def __len__(self):
return len(self._all_greenlets)
@staticmethod
def _write_and_close(fobj, data):
try:
if data:
fobj.write(data)
if hasattr(fobj, 'flush'):
# 3.6 started expecting flush to be called.
fobj.flush()
except (OSError, IOError, BrokenPipeError) as ex:
# Test cases from the stdlib can raise BrokenPipeError
# without setting an errno value. This matters because
# Python 2 doesn't have a BrokenPipeError.
if isinstance(ex, BrokenPipeError) and ex.errno is None:
ex.errno = errno.EPIPE
if ex.errno != errno.EPIPE and ex.errno != errno.EINVAL:
raise
finally:
try:
fobj.close()
except EnvironmentError:
pass
@staticmethod
def _read_and_close(fobj):
try:
return fobj.read()
finally:
try:
fobj.close()
except EnvironmentError:
pass
class Popen(object):
"""
The underlying process creation and management in this module is
......@@ -706,13 +801,17 @@ class Popen(object):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
_stdout_buffer = None
_stderr_buffer = None
_communicating_greenlets = None
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
"""
Interact with process and return its output and error.
- Send *input* data to stdin.
- Read data from stdout and stderr, until end-of-file is reached.
- Wait for process to terminate.
The optional *input* argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
......@@ -731,57 +830,9 @@ class Popen(object):
Honor a *timeout* even if there's no way to communicate with the child
(stdin, stdout, and stderr are not pipes).
"""
greenlets = []
if self.stdin:
greenlets.append(spawn(write_and_close, self.stdin, input))
# If the timeout parameter is used, and the caller calls back after
# getting a TimeoutExpired exception, we can wind up with multiple
# greenlets trying to run and read from and close stdout/stderr.
# That's bad because it can lead to 'RuntimeError: reentrant call in io.BufferedReader'.
# We can't just kill the previous greenlets when a timeout happens,
# though, because we risk losing the output collected by that greenlet
# (and Python 3, where timeout is an official parameter, explicitly says
# that no output should be lost in the event of a timeout.) Instead, we're
# watching for the exception and ignoring it. It's not elegant,
# but it works
def _make_pipe_reader(pipe_name):
pipe = getattr(self, pipe_name)
buf_name = '_' + pipe_name + '_buffer'
def _read():
try:
data = pipe.read()
except (
# io.Buffered* can raise RuntimeError: 'reentrant call'
RuntimeError,
# unbuffered Posix IO that we're already waiting on
# can raise this. Closing the pipe will free those greenlets up.
ConcurrentObjectUseError
):
return
if not data:
return
the_buffer = getattr(self, buf_name)
if the_buffer:
the_buffer.append(data)
else:
setattr(self, buf_name, [data])
return _read
if self.stdout:
_read_out = _make_pipe_reader('stdout')
stdout = spawn(_read_out)
greenlets.append(stdout)
else:
stdout = None
if self.stderr:
_read_err = _make_pipe_reader('stderr')
stderr = spawn(_read_err)
greenlets.append(stderr)
else:
stderr = None
if self._communicating_greenlets is None:
self._communicating_greenlets = _CommunicatingGreenlets(self, input)
greenlets = self._communicating_greenlets
# If we were given stdin=stdout=stderr=None, we have no way to
# communicate with the child, and thus no greenlets to wait
......@@ -793,9 +844,18 @@ class Popen(object):
self.wait(timeout=timeout, _raise_exc=True)
done = joinall(greenlets, timeout=timeout)
if timeout is not None and len(done) != len(greenlets):
# Allow finished greenlets, if any, to raise. This takes priority over
# the timeout exception.
for greenlet in done:
greenlet.get()
if timeout is not None and len(done) != len(self._communicating_greenlets):
raise TimeoutExpired(self.args, timeout)
# Close only after we're sure that everything is done
# (there was no timeout, or there was, but everything finished).
# There should be no greenlets still running, even from a prior
# attempt. If there are, then this can raise RuntimeError: 'reentrant call'.
# So we ensure that previous greenlets are dead.
for pipe in (self.stdout, self.stderr):
if pipe:
try:
......@@ -805,21 +865,8 @@ class Popen(object):
self.wait()
def _get_output_value(pipe_name):
buf_name = '_' + pipe_name + '_buffer'
buf_value = getattr(self, buf_name)
setattr(self, buf_name, None)
if buf_value:
buf_value = self._communicate_empty_value.join(buf_value)
else:
buf_value = self._communicate_empty_value
return buf_value
stdout_value = _get_output_value('stdout')
stderr_value = _get_output_value('stderr')
return (None if stdout is None else stdout_value,
None if stderr is None else stderr_value)
return (None if greenlets.stdout is None else greenlets.stdout.get(),
None if greenlets.stderr is None else greenlets.stderr.get())
def poll(self):
"""Check if child process has terminated. Set and return :attr:`returncode` attribute."""
......@@ -1648,22 +1695,6 @@ class Popen(object):
self.send_signal(signal.SIGKILL)
def write_and_close(fobj, data):
try:
if data:
fobj.write(data)
if hasattr(fobj, 'flush'):
# 3.6 started expecting flush to be called.
fobj.flush()
except (OSError, IOError) as ex:
if ex.errno != errno.EPIPE and ex.errno != errno.EINVAL:
raise
finally:
try:
fobj.close()
except EnvironmentError:
pass
def _with_stdout_stderr(exc, stderr):
# Prior to Python 3.5, most exceptions didn't have stdout
# and stderr attributes and can't take the stderr attribute in their
......
......@@ -85,6 +85,7 @@ from .skipping import skipOnPurePython
from .skipping import skipWithCExtensions
from .skipping import skipOnLibuvOnTravisOnCPython27
from .skipping import skipOnPy37
from .skipping import skipOnPy3
from .skipping import skipWithoutResource
from .skipping import skipWithoutExternalNetwork
from .skipping import skipOnPy2
......
......@@ -22,16 +22,52 @@ from __future__ import absolute_import, print_function, division
import os
import unittest
import re
import gc
import functools
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
# Linux/OS X/BSD platforms /can/ implement this by calling out to lsof.
# However, if psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def _collects(func):
# We've seen OSError: No such file or directory /proc/PID/fd/NUM.
# This occurs in the loop that checks open files. It first does
# listdir() and then tries readlink() on each file. But the file
# went away. This must be because of async GC in PyPy running
# destructors at arbitrary times. This became an issue in PyPy 7.2
# but could theoretically be an issue with any objects caught in a
# cycle. This is one reason we GC before we begin. (The other is
# to clean up outstanding objects that will close files in
# __del__.)
#
# Note that this can hide errors, though, by causing greenlets to get
# collected and drop references and thus close files. We should be deterministic
# and careful about closing things.
@functools.wraps(func)
def f():
gc.collect()
gc.collect()
enabled = gc.isenabled()
gc.disable()
try:
return func()
finally:
if enabled:
gc.enable()
return f
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
@_collects
def _run_lsof():
import tempfile
pid = os.getpid()
......@@ -70,6 +106,7 @@ def default_get_open_files(pipes=False):
results['data'] = data
return results
@_collects
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
......@@ -91,12 +128,8 @@ except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
@_collects
def get_open_files():
"""
Return a list of popenfile and pconn objects.
......@@ -108,37 +141,26 @@ else:
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
# We've seen OSError: No such file or directory
# /proc/PID/fd/NUM. This occurs in the loop that checks open
# files. It first does listdir() and then tries readlink() on
# each file. But the file went away. This must be because of
# async GC in PyPy running destructors at arbitrary times.
# This became an issue in PyPy 7.2 but could theoretically be
# an issue with any objects caught in a cycle. Try to clean
# that up before we begin.
import gc
gc.collect()
gc.collect()
results = dict()
gc.disable()
try:
for _ in range(3):
try:
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
break
except OSError:
pass
else:
# No break executed
raise unittest.SkipTest("Unable to read open files")
finally:
gc.enable()
for _ in range(3):
try:
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
break
except OSError:
pass
else:
# No break executed
raise unittest.SkipTest("Unable to read open files")
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
@_collects
def get_number_open_files():
process = psutil.Process()
try:
......@@ -146,3 +168,28 @@ else:
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
class DoesNotLeakFilesMixin(object): # pragma: no cover
"""
A test case mixin that helps find a method that's leaking an
open file.
Only mix this in when needed to debug, it slows tests down.
"""
def setUp(self):
self.__open_files_count = get_number_open_files()
super(DoesNotLeakFilesMixin, self).setUp()
def tearDown(self):
super(DoesNotLeakFilesMixin, self).tearDown()
after = get_number_open_files()
if after > self.__open_files_count:
raise AssertionError(
"Too many open files. Before: %s < After: %s.\n%s" % (
self.__open_files_count,
after,
get_open_files()
)
)
......@@ -43,6 +43,7 @@ skipOnPyPy3 = _do_not_skip
skipOnPyPyOnWindows = _do_not_skip
skipOnPy2 = unittest.skip if sysinfo.PY2 else _do_not_skip
skipOnPy3 = unittest.skip if sysinfo.PY3 else _do_not_skip
skipOnPy37 = unittest.skip if sysinfo.PY37 else _do_not_skip
skipOnPurePython = unittest.skip if sysinfo.PURE_PYTHON else _do_not_skip
......
......@@ -248,6 +248,11 @@ class TestCase(TestCaseMetaClass("NewBase",
__timeout__ = params.LOCAL_TIMEOUT if not sysinfo.RUNNING_ON_CI else params.CI_TIMEOUT
switch_expected = 'default'
#: Set this to true to cause errors that get reported to the hub to
#: always get propagated to the main greenlet. This can be done at the
#: class or method level.
#: .. caution:: This can hide errors and make it look like exceptions
#: are propagated even if they're not.
error_fatal = True
uses_handle_error = True
close_on_teardown = ()
......@@ -257,7 +262,7 @@ class TestCase(TestCaseMetaClass("NewBase",
# pylint:disable=arguments-differ
if self.switch_expected == 'default':
self.switch_expected = get_switch_expected(self.fullname)
return BaseTestCase.run(self, *args, **kwargs)
return super(TestCase, self).run(*args, **kwargs)
def setUp(self):
super(TestCase, self).setUp()
......
......@@ -4,7 +4,6 @@ import errno
import unittest
import time
import gc
import tempfile
import gevent.testing as greentest
......@@ -34,12 +33,11 @@ python_universal_newlines = hasattr(sys.stdout, 'newlines')
python_universal_newlines_broken = PY3 and subprocess.mswindows
@greentest.skipWithoutResource('subprocess')
class Test(greentest.TestCase):
class TestPopen(greentest.TestCase):
def setUp(self):
greentest.TestCase.setUp(self)
gc.collect()
gc.collect()
# Use the normal error handling. Make sure that any background greenlets
# subprocess spawns propagate errors as expected.
error_fatal = False
def test_exit(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(10)'])
......@@ -51,12 +49,10 @@ class Test(greentest.TestCase):
self.assertEqual(popen.poll(), 11)
def test_child_exception(self):
try:
with self.assertRaises(OSError) as exc:
subprocess.Popen(['*']).wait()
except OSError as ex:
assert ex.errno == 2, ex
else:
raise AssertionError('Expected OSError: [Errno 2] No such file or directory')
self.assertEqual(exc.exception.errno, 2)
def test_leak(self):
num_before = greentest.get_number_open_files()
......@@ -65,9 +61,6 @@ class Test(greentest.TestCase):
p.wait()
p.stdout.close()
del p
if PYPY:
gc.collect()
gc.collect()
num_after = greentest.get_number_open_files()
self.assertEqual(num_before, num_after)
......@@ -106,7 +99,8 @@ class Test(greentest.TestCase):
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
universal_newlines=True
)
(stdout, stderr) = p.communicate('banana\r\n\xff\xff\xf2\xf9\r\n')
self.assertIsInstance(stdout, str)
self.assertIsInstance(stderr, str)
......@@ -116,25 +110,50 @@ class Test(greentest.TestCase):
self.assertEqual(stderr,
'pineapple\n\xff\xff\xf2\xf9\n')
@greentest.skipOnPy2("Only Python 2 decodes")
def test_communicate_undecodable(self):
# If the subprocess writes non-decodable data, `communicate` raises the
# same UnicodeDecodeError that the stdlib does, instead of
# printing it to the hub. This only applies to Python 3, because only it
# will actually use text mode.
# See https://github.com/gevent/gevent/issues/1510
with subprocess.Popen(
[
sys.executable,
'-W', 'ignore',
'-c',
"import os, sys; "
r'os.write(sys.stdout.fileno(), b"\xff")'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True, universal_newlines=True
) as p:
with self.assertRaises(UnicodeDecodeError):
p.communicate()
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal1(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1)
try:
with subprocess.Popen(
[
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'
],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1
) as p:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
......@@ -149,26 +168,27 @@ class Test(greentest.TestCase):
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
finally:
p.stdout.close()
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal2(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1)
try:
with subprocess.Popen(
[
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'
],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1
) as p:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
......@@ -183,60 +203,54 @@ class Test(greentest.TestCase):
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
finally:
p.stdout.close()
@greentest.skipOnWindows("Uses 'grep' command")
def test_nonblock_removed(self):
# see issue #134
r, w = os.pipe()
stdin = subprocess.FileObject(r)
p = subprocess.Popen(['grep', 'text'], stdin=stdin)
try:
# Closing one half of the pipe causes Python 3 on OS X to terminate the
# child process; it exits with code 1 and the assert that p.poll is None
# fails. Removing the close lets it pass under both Python 3 and 2.7.
# If subprocess.Popen._remove_nonblock_flag is changed to a noop, then
# the test fails (as expected) even with the close removed
#os.close(w)
time.sleep(0.1)
self.assertEqual(p.poll(), None)
finally:
if p.poll() is None:
p.kill()
stdin.close()
os.close(w)
with subprocess.Popen(['grep', 'text'], stdin=stdin) as p:
try:
# Closing one half of the pipe causes Python 3 on OS X to terminate the
# child process; it exits with code 1 and the assert that p.poll is None
# fails. Removing the close lets it pass under both Python 3 and 2.7.
# If subprocess.Popen._remove_nonblock_flag is changed to a noop, then
# the test fails (as expected) even with the close removed
#os.close(w)
time.sleep(0.1)
self.assertEqual(p.poll(), None)
finally:
if p.poll() is None:
p.kill()
stdin.close()
os.close(w)
def test_issue148(self):
for _ in range(7):
try:
subprocess.Popen('this_name_must_not_exist')
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
else:
raise AssertionError('must fail with ENOENT')
with self.assertRaises(OSError) as exc:
with subprocess.Popen('this_name_must_not_exist'):
pass
self.assertEqual(exc.exception.errno, errno.ENOENT)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_keyword_error(self):
try:
with self.assertRaises(subprocess.CalledProcessError) as exc: # pylint:disable=no-member
subprocess.check_output([sys.executable, '-c', 'import sys; sys.exit(44)'])
except subprocess.CalledProcessError as e: # pylint:disable=no-member
self.assertEqual(e.returncode, 44)
else:
raise AssertionError('must fail with CalledProcessError')
self.assertEqual(exc.exception.returncode, 44)
@greentest.skipOnPy3("The default buffer changed in Py3")
def test_popen_bufsize(self):
# Test that subprocess has unbuffered output by default
# (as the vanilla subprocess module)
if PY3:
# The default changed under python 3.
return
p = subprocess.Popen([sys.executable, '-u', '-c',
'import sys; sys.stdout.write(sys.stdin.readline())'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(b'foobar\n')
r = p.stdout.readline()
with subprocess.Popen(
[sys.executable, '-u', '-c',
'import sys; sys.stdout.write(sys.stdin.readline())'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE
) as p:
p.stdin.write(b'foobar\n')
r = p.stdout.readline()
self.assertEqual(r, b'foobar\n')
@greentest.ignores_leakcheck
......@@ -253,7 +267,6 @@ class Test(greentest.TestCase):
def fn():
with self.assertRaises(TypeError) as exc:
gevent.subprocess.Popen('echo 123', shell=True)
raise AssertionError("Should not be able to construct Popen")
ex.append(exc.exception)
thread = Thread(target=fn)
......@@ -267,10 +280,12 @@ class Test(greentest.TestCase):
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def __test_no_output(self, kwargs, kind):
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
**kwargs)
stdout, stderr = proc.communicate()
with subprocess.Popen(
[sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
**kwargs
) as proc:
stdout, stderr = proc.communicate()
self.assertIsInstance(stdout, kind)
self.assertIsNone(stderr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment