Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
C
cython
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
nexedi
cython
Commits
e1261521
Commit
e1261521
authored
Feb 18, 2011
by
Lisandro Dalcin
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Python 3: fix NumPy support
parent
a7506d0c
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
48 additions
and
30 deletions
+48
-30
Cython/Compiler/Buffer.py
Cython/Compiler/Buffer.py
+10
-2
Cython/Compiler/Interpreter.py
Cython/Compiler/Interpreter.py
+7
-1
Cython/Includes/numpy.pxd
Cython/Includes/numpy.pxd
+5
-2
tests/run/numpy_test.pyx
tests/run/numpy_test.pyx
+26
-25
No files found.
Cython/Compiler/Buffer.py
View file @
e1261521
...
...
@@ -527,8 +527,7 @@ def use_py2_buffer_functions(env):
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_HAVE_NEWBUFFER)
return PyObject_GetBuffer(obj, view, flags);
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
#endif
"""
)
if
len
(
types
)
>
0
:
...
...
@@ -548,11 +547,15 @@ def use_py2_buffer_functions(env):
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject* obj = view->obj;
if (obj) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;}
#endif
"""
)
if
len
(
types
)
>
0
:
clause
=
"if"
for
t
,
get
,
release
in
types
:
if
release
:
code
+=
" "
code
+=
"%s (PyObject_TypeCheck(obj, %s)) %s(obj, view);"
%
(
clause
,
t
,
release
)
clause
=
"else if"
code
+=
dedent
(
"""
...
...
@@ -1114,6 +1117,11 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
default:
{
ctx->new_count = __Pyx_BufFmt_ParseNumber(&ts);
...
...
Cython/Compiler/Interpreter.py
View file @
e1261521
...
...
@@ -6,6 +6,7 @@ For now this only covers parse tree to value conversion of
compile-time values.
"""
import
sys
from
Nodes
import
*
from
ExprNodes
import
*
from
Errors
import
CompileError
...
...
@@ -44,6 +45,10 @@ def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=())
else
:
raise
CompileError
(
node
.
pos
,
"Type not allowed here."
)
else
:
if
(
sys
.
version_info
[
0
]
>=
3
and
isinstance
(
node
,
StringNode
)
and
node
.
unicode_value
is
not
None
):
return
(
node
.
unicode_value
,
node
.
pos
)
return
(
node
.
compile_time_value
(
empty_scope
),
node
.
pos
)
if
optlist
:
...
...
@@ -52,6 +57,7 @@ def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=())
assert
isinstance
(
optdict
,
DictNode
)
new_optdict
=
{}
for
item
in
optdict
.
key_value_pairs
:
new_optdict
[
item
.
key
.
value
]
=
interpret
(
item
.
value
,
item
.
key
.
value
)
new_key
,
dummy
=
interpret
(
item
.
key
,
None
)
new_optdict
[
new_key
]
=
interpret
(
item
.
value
,
item
.
key
.
value
)
optdict
=
new_optdict
return
(
optlist
,
new_optdict
)
Cython/Includes/numpy.pxd
View file @
e1261521
...
...
@@ -737,11 +737,14 @@ ctypedef double complex complex128_t
# numpy.int corresponds to 'l' and numpy.long to 'q'
ctypedef
npy_long
int_t
ctypedef
npy_longlong
long_t
ctypedef
npy_intp
intp_t
ctypedef
npy_uintp
uintp_t
ctypedef
npy_longlong
longlong_t
ctypedef
npy_ulong
uint_t
ctypedef
npy_ulonglong
ulong_t
ctypedef
npy_ulonglong
ulonglong_t
ctypedef
npy_intp
intp_t
ctypedef
npy_uintp
uintp_t
ctypedef
npy_double
float_t
ctypedef
npy_double
double_t
...
...
tests/run/numpy_test.pyx
View file @
e1261521
...
...
@@ -72,23 +72,23 @@ try:
Write to slices
>>> b = a.copy()
>>> put_range_long_1d(b[:, 3])
>>> print
b
>>> print
(b)
[[0 1 2 0 4]
[5 6 7 1 9]]
>>> put_range_long_1d(b[::-1, 3])
>>> print
b
>>> print
(b)
[[0 1 2 1 4]
[5 6 7 0 9]]
>>> a = np.zeros(9, dtype='l')
>>> put_range_long_1d(a[1::3])
>>> print
a
>>> print
(a)
[0 0 0 0 1 0 0 2 0]
Write to picked subarrays. This should NOT change the original
array as picking creates a new mutable copy.
>>> a = np.zeros(10, dtype='l').reshape(2, 5)
>>> put_range_long_1d(a[[0, 0, 1, 1, 0], [0, 1, 2, 4, 3]])
>>> print
a
>>> print
(a)
[[0 0 0 0 0]
[0 0 0 0 0]]
...
...
@@ -103,18 +103,18 @@ try:
0 1 2 3
4 5 6 7
8 9 10 11
>>> test_c_contig(f_arr)
>>> test_c_contig(f_arr)
#doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ndarray is not C
contiguous
>>> test_f_contig(c_arr)
ValueError: ndarray is not C
...
contiguous
>>> test_f_contig(c_arr)
#doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ndarray is not Fortran contiguous
>>> test_c_contig(c_arr[::2,::2])
>>> test_c_contig(c_arr[::2,::2])
#doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ndarray is not C
contiguous
ValueError: ndarray is not C
...
contiguous
>>> test_dtype('b', inc1_byte)
>>> test_dtype('B', inc1_ubyte)
...
...
@@ -137,7 +137,7 @@ try:
>>> test_dtype('G', inc1_clongdouble_struct)
>>> test_dtype(np.int, inc1_int_t)
>>> test_dtype(np.long
, inc1_
long_t)
>>> test_dtype(np.long
long, inc1_long
long_t)
>>> test_dtype(np.float, inc1_float_t)
>>> test_dtype(np.double, inc1_double_t)
>>> test_dtype(np.intp, inc1_intp_t)
...
...
@@ -150,10 +150,10 @@ try:
Endian tests:
>>> test_dtype('%si' % my_endian, inc1_int)
>>> test_dtype('%si' % other_endian, inc1_int)
>>> test_dtype('%si' % other_endian, inc1_int)
#doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:
Non-native byte order not supported
ValueError:
...
...
...
@@ -181,15 +181,15 @@ try:
array([(22, 23)],
dtype=[('f0', '|i1'), ('', '|V3'), ('f1', '!i4')])
>>> print(test_packed_align(np.zeros((1,), dtype=np.dtype('b,i', align=True))))
>>> print(test_packed_align(np.zeros((1,), dtype=np.dtype('b,i', align=True))))
#doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:
Buffer dtype mismatch; next field is at offset 4 but 1 expected
ValueError:
...
>>> print(test_unpacked_align(np.zeros((1,), dtype=np.dtype('b,i', align=False))))
>>> print(test_unpacked_align(np.zeros((1,), dtype=np.dtype('b,i', align=False))))
#doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:
Buffer dtype mismatch; next field is at offset 1 but 4 expected
ValueError:
...
>>> test_good_cast()
...
...
@@ -235,17 +235,17 @@ def ndarray_str(arr):
return
unicode
(
arr
).
replace
(
u'
\
n
\
n
'
,
u'
\
n
<_BLANKLINE_>
\
n
'
)
def
basic
():
cdef
object
[
int
,
ndim
=
2
]
buf
=
np
.
arange
(
10
,
dtype
=
b
'i'
).
reshape
((
2
,
5
))
cdef
object
[
int
,
ndim
=
2
]
buf
=
np
.
arange
(
10
,
dtype
=
'i'
).
reshape
((
2
,
5
))
print
buf
print
buf
[
0
,
2
],
buf
[
0
,
0
],
buf
[
1
,
4
],
buf
[
1
,
0
]
def
three_dim
():
cdef
object
[
double
,
ndim
=
3
]
buf
=
np
.
arange
(
24
,
dtype
=
b
'd'
).
reshape
((
3
,
2
,
4
))
cdef
object
[
double
,
ndim
=
3
]
buf
=
np
.
arange
(
24
,
dtype
=
'd'
).
reshape
((
3
,
2
,
4
))
print
ndarray_str
(
buf
)
print
buf
[
0
,
1
,
2
],
buf
[
0
,
0
,
0
],
buf
[
1
,
1
,
1
],
buf
[
1
,
0
,
0
]
def
obj_array
():
cdef
object
[
object
,
ndim
=
1
]
buf
=
np
.
array
([
b
"a"
,
1
,
{}])
cdef
object
[
object
,
ndim
=
1
]
buf
=
np
.
array
([
"a"
,
1
,
{}])
print
buf
print
buf
[
0
],
buf
[
1
],
buf
[
2
]
...
...
@@ -262,12 +262,12 @@ def put_range_long_1d(np.ndarray[long] arr):
arr
[
i
]
=
value
value
+=
1
def
test_c_contig
(
np
.
ndarray
[
int
,
ndim
=
2
,
mode
=
b
'c'
]
arr
):
def
test_c_contig
(
np
.
ndarray
[
int
,
ndim
=
2
,
mode
=
'c'
]
arr
):
cdef
int
i
,
j
for
i
in
range
(
arr
.
shape
[
0
]):
print
u" "
.
join
([
unicode
(
arr
[
i
,
j
])
for
j
in
range
(
arr
.
shape
[
1
])])
def
test_f_contig
(
np
.
ndarray
[
int
,
ndim
=
2
,
mode
=
b
'fortran'
]
arr
):
def
test_f_contig
(
np
.
ndarray
[
int
,
ndim
=
2
,
mode
=
'fortran'
]
arr
):
cdef
int
i
,
j
for
i
in
range
(
arr
.
shape
[
0
]):
print
u" "
.
join
([
unicode
(
arr
[
i
,
j
])
for
j
in
range
(
arr
.
shape
[
1
])])
...
...
@@ -314,6 +314,7 @@ def inc1_object(np.ndarray[object] arr):
def
inc1_int_t
(
np
.
ndarray
[
np
.
int_t
]
arr
):
arr
[
1
]
+=
1
def
inc1_long_t
(
np
.
ndarray
[
np
.
long_t
]
arr
):
arr
[
1
]
+=
1
def
inc1_longlong_t
(
np
.
ndarray
[
np
.
longlong_t
]
arr
):
arr
[
1
]
+=
1
def
inc1_float_t
(
np
.
ndarray
[
np
.
float_t
]
arr
):
arr
[
1
]
+=
1
def
inc1_double_t
(
np
.
ndarray
[
np
.
double_t
]
arr
):
arr
[
1
]
+=
1
def
inc1_longdouble_t
(
np
.
ndarray
[
np
.
longdouble_t
]
arr
):
arr
[
1
]
+=
1
...
...
@@ -330,7 +331,7 @@ def test_dtype(dtype, inc1):
"G"
,
np
.
clongdouble
):
if
sizeof
(
double
)
==
sizeof
(
long
double
):
# MSVC
return
if
dtype
in
(
b'F'
,
b'D'
,
b
'G'
):
if
dtype
in
(
'F'
,
'D'
,
'G'
):
a
=
np
.
array
([
0
,
10
+
10j
],
dtype
=
dtype
)
inc1
(
a
)
if
a
[
1
]
!=
(
11
+
11j
):
print
u"failed!"
,
a
[
1
]
...
...
@@ -344,7 +345,7 @@ cdef struct DoubleInt:
def
test_recordarray
():
cdef
object
[
DoubleInt
]
arr
arr
=
np
.
array
([(
5
,
5
),
(
4
,
6
)],
dtype
=
np
.
dtype
(
b
'i,i'
))
arr
=
np
.
array
([(
5
,
5
),
(
4
,
6
)],
dtype
=
np
.
dtype
(
'i,i'
))
cdef
DoubleInt
rec
rec
=
arr
[
0
]
if
rec
.
x
!=
5
:
print
u"failed"
...
...
@@ -384,13 +385,13 @@ def test_bad_nested_dtypes():
def
test_good_cast
():
# Check that a signed int can round-trip through casted unsigned int access
cdef
np
.
ndarray
[
unsigned
int
,
cast
=
True
]
arr
=
np
.
array
([
-
100
],
dtype
=
b
'i'
)
cdef
np
.
ndarray
[
unsigned
int
,
cast
=
True
]
arr
=
np
.
array
([
-
100
],
dtype
=
'i'
)
cdef
unsigned
int
data
=
arr
[
0
]
return
-
100
==
<
int
>
data
def
test_bad_cast
():
# This should raise an exception
cdef
np
.
ndarray
[
int
,
cast
=
True
]
arr
=
np
.
array
([
1
],
dtype
=
b
'b'
)
cdef
np
.
ndarray
[
int
,
cast
=
True
]
arr
=
np
.
array
([
1
],
dtype
=
'b'
)
cdef
packed
struct
PackedStruct
:
char
a
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment