Commit f6c09675 authored by Stefan Behnel's avatar Stefan Behnel

merge

parents 0cba51af 7c0ea3b0
...@@ -115,6 +115,7 @@ class IntroduceBufferAuxiliaryVars(CythonTransform): ...@@ -115,6 +115,7 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
# #
buffer_options = ("dtype", "ndim", "mode") # ordered! buffer_options = ("dtype", "ndim", "mode") # ordered!
buffer_defaults = {"ndim": 1, "mode": "full"} buffer_defaults = {"ndim": 1, "mode": "full"}
buffer_positional_options_count = 1 # anything beyond this needs keyword argument
ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option' ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
ERR_BUF_TOO_MANY = 'Too many buffer options' ERR_BUF_TOO_MANY = 'Too many buffer options'
...@@ -140,11 +141,12 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee ...@@ -140,11 +141,12 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee
posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env) posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env)
if len(posargs) > len(buffer_options): if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY) raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
options = {} options = {}
for name, (value, pos) in dictargs.iteritems(): for unicode_name, (value, pos) in dictargs.iteritems():
name = str(unicode_name)
if not name in buffer_options: if not name in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name) raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
options[name] = value options[name] = value
...@@ -457,10 +459,10 @@ def get_ts_check_item(dtype, writer): ...@@ -457,10 +459,10 @@ def get_ts_check_item(dtype, writer):
code = dedent("""\ code = dedent("""\
if (*ts == '1') ++ts; if (*ts == '1') ++ts;
if (*ts != '%s') { if (*ts != '%s') {
PyErr_Format(PyExc_ValueError, "Buffer datatype mismatch (rejecting on '%%s')", ts); PyErr_Format(PyExc_ValueError, "Buffer datatype mismatch (expected '%s', got '%%s')", ts);
return NULL; return NULL;
} else return ts + 1; } else return ts + 1;
""", 2) % char """, 2) % (char, char)
else: else:
# Cannot trust declared size; but rely on int vs float and # Cannot trust declared size; but rely on int vs float and
# signed/unsigned to be correctly declared # signed/unsigned to be correctly declared
...@@ -474,6 +476,11 @@ def get_ts_check_item(dtype, writer): ...@@ -474,6 +476,11 @@ def get_ts_check_item(dtype, writer):
('b', 'char'), ('h', 'short'), ('i', 'int'), ('b', 'char'), ('h', 'short'), ('i', 'int'),
('l', 'long'), ('q', 'long long') ('l', 'long'), ('q', 'long long')
] ]
elif dtype.is_float:
types = [('f', 'float'), ('d', 'double'), ('g', 'long double')]
else:
assert dtype.is_error
return name
if dtype.signed == 0: if dtype.signed == 0:
code += "".join(["\n case '%s': ok = (sizeof(%s) == sizeof(%s) && (%s)-1 > 0); break;" % code += "".join(["\n case '%s': ok = (sizeof(%s) == sizeof(%s) && (%s)-1 > 0); break;" %
(char.upper(), ctype, against, ctype) for char, against in types]) (char.upper(), ctype, against, ctype) for char, against in types])
...@@ -488,6 +495,8 @@ def get_ts_check_item(dtype, writer): ...@@ -488,6 +495,8 @@ def get_ts_check_item(dtype, writer):
return NULL; return NULL;
} else return ts + 1; } else return ts + 1;
""", 2) """, 2)
writer.globalstate.use_utility_code([dedent("""\ writer.globalstate.use_utility_code([dedent("""\
static const char* %s(const char* ts); /*proto*/ static const char* %s(const char* ts); /*proto*/
""") % name, dedent(""" """) % name, dedent("""
...@@ -537,7 +546,7 @@ def get_getbuffer_code(dtype, code): ...@@ -537,7 +546,7 @@ def get_getbuffer_code(dtype, code):
ts = __Pyx_ConsumeWhitespace(ts); ts = __Pyx_ConsumeWhitespace(ts);
if (*ts != 0) { if (*ts != 0) {
PyErr_Format(PyExc_ValueError, PyErr_Format(PyExc_ValueError,
"Expected non-struct buffer data type (rejecting on '%%s')", ts); "Expected non-struct buffer data type (expected end, got '%%s')", ts);
goto fail; goto fail;
} }
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
......
...@@ -38,7 +38,7 @@ Options: ...@@ -38,7 +38,7 @@ Options:
-a, --annotate Produce a colorized HTML version of the source. -a, --annotate Produce a colorized HTML version of the source.
--convert-range Convert for loops using range() function to for...from loops. --convert-range Convert for loops using range() function to for...from loops.
--cplus Output a c++ rather than c file. --cplus Output a c++ rather than c file.
-O, --option <name>=<value>[,<name=value,...] Overrides an optimization/code generation option -X, --directive <name>=<value>[,<name=value,...] Overrides a compiler directive
""" """
#The following experimental options are supported only on MacOSX: #The following experimental options are supported only on MacOSX:
# -C, --compile Compile generated .c file to .o file # -C, --compile Compile generated .c file to .o file
...@@ -114,11 +114,11 @@ def parse_command_line(args): ...@@ -114,11 +114,11 @@ def parse_command_line(args):
Options.annotate = True Options.annotate = True
elif option == "--convert-range": elif option == "--convert-range":
Options.convert_range = True Options.convert_range = True
elif option in ("-O", "--option"): elif option in ("-X", "--directive"):
try: try:
options.pragma_overrides = Options.parse_option_list(pop_arg()) options.pragma_overrides = Options.parse_option_list(pop_arg())
except ValueError, e: except ValueError, e:
sys.stderr.write("Error in option string: %s\n" % e.message) sys.stderr.write("Error in compiler directive: %s\n" % e.message)
sys.exit(1) sys.exit(1)
else: else:
bad_usage() bad_usage()
......
...@@ -1369,6 +1369,10 @@ class IndexNode(ExprNode): ...@@ -1369,6 +1369,10 @@ class IndexNode(ExprNode):
# Note: This might be cleaned up by having IndexNode # Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if # parsed in a saner way and only construct the tuple if
# needed. # needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False self.is_buffer_access = False
self.base.analyse_types(env) self.base.analyse_types(env)
...@@ -1379,7 +1383,12 @@ class IndexNode(ExprNode): ...@@ -1379,7 +1383,12 @@ class IndexNode(ExprNode):
skip_child_analysis = False skip_child_analysis = False
buffer_access = False buffer_access = False
if self.base.type.is_buffer: if self.base.type.is_buffer:
assert isinstance(self.base, NameNode) assert hasattr(self.base, "entry") # Must be a NameNode-like node
if self.indices:
indices = self.indices
else:
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not isinstance(self.index, CloneNode)
if isinstance(self.index, TupleNode): if isinstance(self.index, TupleNode):
indices = self.index.args indices = self.index.args
else: else:
...@@ -1469,7 +1478,7 @@ class IndexNode(ExprNode): ...@@ -1469,7 +1478,7 @@ class IndexNode(ExprNode):
def generate_subexpr_evaluation_code(self, code): def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code) self.base.generate_evaluation_code(code)
if self.index is not None: if not self.indices:
self.index.generate_evaluation_code(code) self.index.generate_evaluation_code(code)
else: else:
for i in self.indices: for i in self.indices:
...@@ -1477,7 +1486,7 @@ class IndexNode(ExprNode): ...@@ -1477,7 +1486,7 @@ class IndexNode(ExprNode):
def generate_subexpr_disposal_code(self, code): def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code) self.base.generate_disposal_code(code)
if self.index is not None: if not self.indices:
self.index.generate_disposal_code(code) self.index.generate_disposal_code(code)
else: else:
for i in self.indices: for i in self.indices:
...@@ -1526,9 +1535,8 @@ class IndexNode(ExprNode): ...@@ -1526,9 +1535,8 @@ class IndexNode(ExprNode):
self.index_unsigned_parameter(), self.index_unsigned_parameter(),
code.error_goto(self.pos))) code.error_goto(self.pos)))
def generate_assignment_code(self, rhs, code): def generate_buffer_setitem_code(self, rhs, code, op=""):
self.generate_subexpr_evaluation_code(code) # Used from generate_assignment_code and InPlaceAssignmentNode
if self.is_buffer_access:
ptrexpr = self.buffer_lookup_code(code) ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject: if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there # Must manage refcounts. Decref what is already there
...@@ -1542,13 +1550,18 @@ class IndexNode(ExprNode): ...@@ -1542,13 +1550,18 @@ class IndexNode(ExprNode):
code.putln("Py_DECREF(*%s); Py_INCREF(%s);" % ( code.putln("Py_DECREF(*%s); Py_INCREF(%s);" % (
ptr, rhs_code ptr, rhs_code
)) ))
code.putln("*%s = %s;" % (ptr, rhs_code)) code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
if rhs.is_temp: if rhs.is_temp:
code.funcstate.release_temp(rhs_code) code.funcstate.release_temp(rhs_code)
code.funcstate.release_temp(ptr) code.funcstate.release_temp(ptr)
else: else:
# Simple case # Simple case
code.putln("*%s = %s;" % (ptrexpr, rhs.result_code)) code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result_code))
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access:
self.generate_buffer_setitem_code(rhs, code)
elif self.type.is_pyobject: elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code) self.generate_setitem_code(rhs.py_result(), code)
else: else:
...@@ -1582,6 +1595,9 @@ class IndexNode(ExprNode): ...@@ -1582,6 +1595,9 @@ class IndexNode(ExprNode):
code.putln("%s = %s;" % (temp, index.result_code)) code.putln("%s = %s;" % (temp, index.result_code))
# Generate buffer access code using these temps # Generate buffer access code using these temps
import Buffer import Buffer
assert self.options is not None
# The above could happen because child_attrs is wrong somewhere so that
# options are not propagated.
return Buffer.put_buffer_lookup_code(entry=self.base.entry, return Buffer.put_buffer_lookup_code(entry=self.base.entry,
index_signeds=[i.type.signed for i in self.indices], index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps, index_cnames=index_temps,
...@@ -2564,6 +2580,8 @@ class ListComprehensionNode(SequenceNode): ...@@ -2564,6 +2580,8 @@ class ListComprehensionNode(SequenceNode):
subexprs = [] subexprs = []
is_sequence_constructor = 0 # not unpackable is_sequence_constructor = 0 # not unpackable
child_attrs = ["loop", "append"]
def analyse_types(self, env): def analyse_types(self, env):
self.type = list_type self.type = list_type
self.is_temp = 1 self.is_temp = 1
...@@ -2589,6 +2607,8 @@ class ListComprehensionNode(SequenceNode): ...@@ -2589,6 +2607,8 @@ class ListComprehensionNode(SequenceNode):
class ListComprehensionAppendNode(ExprNode): class ListComprehensionAppendNode(ExprNode):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
subexprs = ['expr'] subexprs = ['expr']
def analyse_types(self, env): def analyse_types(self, env):
...@@ -3937,6 +3957,7 @@ class CoercionNode(ExprNode): ...@@ -3937,6 +3957,7 @@ class CoercionNode(ExprNode):
def __init__(self, arg): def __init__(self, arg):
self.pos = arg.pos self.pos = arg.pos
self.arg = arg self.arg = arg
self.options = arg.options
if debug_coercion: if debug_coercion:
print("%s Coercing %s" % (self, self.arg)) print("%s Coercing %s" % (self, self.arg))
......
...@@ -79,9 +79,7 @@ def make_lexicon(): ...@@ -79,9 +79,7 @@ def make_lexicon():
escaped_newline = Str("\\\n") escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n")) lineterm = Eol + Opt(Str("\n"))
comment_start = Str("#") comment = Str("#") + Rep(AnyBut("\n"))
comment = comment_start + Rep(AnyBut("\n"))
option_comment = comment_start + Str("cython:") + Rep(AnyBut("\n"))
return Lexicon([ return Lexicon([
(name, 'IDENT'), (name, 'IDENT'),
...@@ -98,13 +96,12 @@ def make_lexicon(): ...@@ -98,13 +96,12 @@ def make_lexicon():
#(stringlit, 'STRING'), #(stringlit, 'STRING'),
(beginstring, Method('begin_string_action')), (beginstring, Method('begin_string_action')),
(option_comment, Method('option_comment')),
(comment, IGNORE), (comment, IGNORE),
(spaces, IGNORE), (spaces, IGNORE),
(escaped_newline, IGNORE), (escaped_newline, IGNORE),
State('INDENT', [ State('INDENT', [
(option_comment + lineterm, Method('option_comment')), (comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE), (Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')), (indentation, Method('indentation_action')),
(Eof, Method('eof_action')) (Eof, Method('eof_action'))
......
...@@ -422,7 +422,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): ...@@ -422,7 +422,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("#if PY_VERSION_HEX < 0x02060000") code.putln("#if PY_VERSION_HEX < 0x02060000")
code.putln(" #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)") code.putln(" #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)")
code.putln(" #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)") code.putln(" #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)")
code.putln(" #define Py_SIZE(ob) ((PyVarObject*)(ob))->ob_size)") code.putln(" #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)")
code.putln(" #define PyVarObject_HEAD_INIT(type, size) \\") code.putln(" #define PyVarObject_HEAD_INIT(type, size) \\")
code.putln(" PyObject_HEAD_INIT(type) size,") code.putln(" PyObject_HEAD_INIT(type) size,")
code.putln(" #define PyType_Modified(t)") code.putln(" #define PyType_Modified(t)")
...@@ -489,12 +489,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): ...@@ -489,12 +489,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(" #define PyMethod_New(func, self, klass) PyInstanceMethod_New(func)") code.putln(" #define PyMethod_New(func, self, klass) PyInstanceMethod_New(func)")
code.putln("#endif") code.putln("#endif")
code.putln("#ifndef __stdcall") code.putln("#if !defined(WIN32) && !defined(MS_WINDOWS)")
code.putln(" #ifndef __stdcall")
code.putln(" #define __stdcall") code.putln(" #define __stdcall")
code.putln("#endif") code.putln(" #endif")
code.putln("#ifndef __cdecl") code.putln(" #ifndef __cdecl")
code.putln(" #define __cdecl") code.putln(" #define __cdecl")
code.putln(" #endif")
code.putln("#else")
code.putln(" #define _USE_MATH_DEFINES")
code.putln("#endif") code.putln("#endif")
self.generate_extern_c_macro_definition(code) self.generate_extern_c_macro_definition(code)
code.putln("#include <math.h>") code.putln("#include <math.h>")
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env)) code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
......
...@@ -13,7 +13,7 @@ from PyrexTypes import py_object_type, error_type, CTypedefType, CFuncType ...@@ -13,7 +13,7 @@ from PyrexTypes import py_object_type, error_type, CTypedefType, CFuncType
from Symtab import ModuleScope, LocalScope, GeneratorLocalScope, \ from Symtab import ModuleScope, LocalScope, GeneratorLocalScope, \
StructOrUnionScope, PyClassScope, CClassScope StructOrUnionScope, PyClassScope, CClassScope
from Cython.Utils import open_new_file, replace_suffix from Cython.Utils import open_new_file, replace_suffix
from StringEncoding import EncodedString, escape_byte_string from StringEncoding import EncodedString, escape_byte_string, split_docstring
import Options import Options
import ControlFlow import ControlFlow
...@@ -71,10 +71,12 @@ class Node(object): ...@@ -71,10 +71,12 @@ class Node(object):
# pos (string, int, int) Source file position # pos (string, int, int) Source file position
# is_name boolean Is a NameNode # is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode # is_literal boolean Is a ConstNode
# options dict Compiler directives in effect for this node
is_name = 0 is_name = 0
is_literal = 0 is_literal = 0
temps = None temps = None
options = None
# All descandants should set child_attrs to a list of the attributes # All descandants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute # containing nodes considered "children" in the tree. Each such attribute
...@@ -174,10 +176,18 @@ class Node(object): ...@@ -174,10 +176,18 @@ class Node(object):
self._end_pos = pos self._end_pos = pos
return pos return pos
def dump(self, level=0, filter_out=("pos",)): def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (%d) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level): def dump_child(x, level):
if isinstance(x, Node): if isinstance(x, Node):
return x.dump(level) return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list): elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x]) return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else: else:
...@@ -591,6 +601,7 @@ class CBufferAccessTypeNode(CBaseTypeNode): ...@@ -591,6 +601,7 @@ class CBufferAccessTypeNode(CBaseTypeNode):
def analyse(self, env): def analyse(self, env):
base_type = self.base_type_node.analyse(env) base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
import Buffer import Buffer
options = Buffer.analyse_buffer_options( options = Buffer.analyse_buffer_options(
...@@ -1522,7 +1533,7 @@ class DefNode(FuncDefNode): ...@@ -1522,7 +1533,7 @@ class DefNode(FuncDefNode):
code.putln( code.putln(
'static char %s[] = "%s";' % ( 'static char %s[] = "%s";' % (
self.entry.doc_cname, self.entry.doc_cname,
escape_byte_string(docstr))) split_docstring(escape_byte_string(docstr))))
if with_pymethdef: if with_pymethdef:
code.put( code.put(
"static PyMethodDef %s = " % "static PyMethodDef %s = " %
...@@ -2140,15 +2151,6 @@ class CClassDefNode(ClassDefNode): ...@@ -2140,15 +2151,6 @@ class CClassDefNode(ClassDefNode):
if self.doc and Options.docstrings: if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc) scope.doc = embed_position(self.pos, self.doc)
if has_body and not self.in_pxd:
# transforms not yet run on pxd files
from ParseTreeTransforms import AnalyseDeclarationsTransform
transform = AnalyseDeclarationsTransform(None)
for entry in scope.var_entries:
if hasattr(entry, 'needs_property'):
property = transform.create_Property(entry)
self.body.stats.append(property)
if has_body: if has_body:
self.body.analyse_declarations(scope) self.body.analyse_declarations(scope)
if self.in_pxd: if self.in_pxd:
...@@ -2517,12 +2519,16 @@ class InPlaceAssignmentNode(AssignmentNode): ...@@ -2517,12 +2519,16 @@ class InPlaceAssignmentNode(AssignmentNode):
def generate_execution_code(self, code): def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code) self.rhs.generate_evaluation_code(code)
self.dup.generate_subexpr_evaluation_code(code) self.dup.generate_subexpr_evaluation_code(code)
self.dup.generate_result_code(code) # self.dup.generate_result_code is run only if it is not buffer access
if self.operator == "**": if self.operator == "**":
extra = ", Py_None" extra = ", Py_None"
else: else:
extra = "" extra = ""
import ExprNodes
if self.lhs.type.is_pyobject: if self.lhs.type.is_pyobject:
if isinstance(self.lhs, ExprNodes.IndexNode) and self.lhs.is_buffer_access:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
self.dup.generate_result_code(code)
code.putln( code.putln(
"%s = %s(%s, %s%s); %s" % ( "%s = %s(%s, %s%s); %s" % (
self.result.result_code, self.result.result_code,
...@@ -2545,6 +2551,10 @@ class InPlaceAssignmentNode(AssignmentNode): ...@@ -2545,6 +2551,10 @@ class InPlaceAssignmentNode(AssignmentNode):
else: else:
error(self.pos, "No C inplace power operator") error(self.pos, "No C inplace power operator")
# have to do assignment directly to avoid side-effects # have to do assignment directly to avoid side-effects
if isinstance(self.lhs, ExprNodes.IndexNode) and self.lhs.is_buffer_access:
self.lhs.generate_buffer_setitem_code(self.rhs, code, c_op)
else:
self.dup.generate_result_code(code)
code.putln("%s %s= %s;" % (self.lhs.result_code, c_op, self.rhs.result_code) ) code.putln("%s %s= %s;" % (self.lhs.result_code, c_op, self.rhs.result_code) )
self.rhs.generate_disposal_code(code) self.rhs.generate_disposal_code(code)
if self.dup.is_temp: if self.dup.is_temp:
...@@ -2555,11 +2565,32 @@ class InPlaceAssignmentNode(AssignmentNode): ...@@ -2555,11 +2565,32 @@ class InPlaceAssignmentNode(AssignmentNode):
self.dup = self.lhs self.dup = self.lhs
self.dup.analyse_types(env) self.dup.analyse_types(env)
if isinstance(self.lhs, ExprNodes.NameNode): if isinstance(self.lhs, ExprNodes.NameNode):
target_lhs = ExprNodes.NameNode(self.dup.pos, name = self.dup.name, is_temp = self.dup.is_temp, entry = self.dup.entry) target_lhs = ExprNodes.NameNode(self.dup.pos,
name = self.dup.name,
is_temp = self.dup.is_temp,
entry = self.dup.entry,
options = self.dup.options)
elif isinstance(self.lhs, ExprNodes.AttributeNode): elif isinstance(self.lhs, ExprNodes.AttributeNode):
target_lhs = ExprNodes.AttributeNode(self.dup.pos, obj = ExprNodes.CloneNode(self.lhs.obj), attribute = self.dup.attribute, is_temp = self.dup.is_temp) target_lhs = ExprNodes.AttributeNode(self.dup.pos,
obj = ExprNodes.CloneNode(self.lhs.obj),
attribute = self.dup.attribute,
is_temp = self.dup.is_temp,
options = self.dup.options)
elif isinstance(self.lhs, ExprNodes.IndexNode): elif isinstance(self.lhs, ExprNodes.IndexNode):
target_lhs = ExprNodes.IndexNode(self.dup.pos, base = ExprNodes.CloneNode(self.dup.base), index = ExprNodes.CloneNode(self.lhs.index), is_temp = self.dup.is_temp) if self.lhs.index:
index = ExprNodes.CloneNode(self.lhs.index)
else:
index = None
if self.lhs.indices:
indices = [ExprNodes.CloneNode(x) for x in self.lhs.indices]
else:
indices = []
target_lhs = ExprNodes.IndexNode(self.dup.pos,
base = ExprNodes.CloneNode(self.dup.base),
index = index,
indices = indices,
is_temp = self.dup.is_temp,
options = self.dup.options)
self.lhs = target_lhs self.lhs = target_lhs
return self.dup return self.dup
......
...@@ -63,6 +63,30 @@ option_defaults = { ...@@ -63,6 +63,30 @@ option_defaults = {
'boundscheck' : True 'boundscheck' : True
} }
def parse_option_value(name, value):
"""
Parses value as an option value for the given name and returns
the interpreted value. None is returned if the option does not exist.
>>> print parse_option_value('nonexisting', 'asdf asdfd')
None
>>> parse_option_value('boundscheck', 'True')
True
>>> parse_option_value('boundscheck', 'true')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False
"""
type = option_types.get(name)
if not type: return None
if type is bool:
if value == "True": return True
elif value == "False": return False
else: raise ValueError("%s directive must be set to True or False" % name)
else:
assert False
def parse_option_list(s): def parse_option_list(s):
""" """
Parses a comma-seperated list of pragma options. Whitespace Parses a comma-seperated list of pragma options. Whitespace
......
...@@ -2346,14 +2346,20 @@ def p_code(s, level=None): ...@@ -2346,14 +2346,20 @@ def p_code(s, level=None):
repr(s.sy), repr(s.systring))) repr(s.sy), repr(s.systring)))
return body return body
def p_option_comments(s): COMPILER_DIRECTIVE_COMMENT_RE = re.compile(r"^#\s*([a-z]+)\s*=(.*)$")
def p_compiler_directive_comments(s):
result = {} result = {}
while s.sy == 'option_comment': while s.sy == 'commentline':
opts = s.systring[len("#cython:"):] m = COMPILER_DIRECTIVE_COMMENT_RE.match(s.systring)
if m:
name = m.group(1)
try: try:
result.update(Options.parse_option_list(opts)) value = Options.parse_option_value(str(name), str(m.group(2).strip()))
except ValueError, e: except ValueError, e:
s.error(e.message, fatal=False) s.error(e.args[0], fatal=False)
if value is not None: # can be False!
result[name] = value
s.next() s.next()
return result return result
...@@ -2367,8 +2373,8 @@ def p_module(s, pxd, full_module_name): ...@@ -2367,8 +2373,8 @@ def p_module(s, pxd, full_module_name):
else: else:
level = 'module' level = 'module'
option_comments = p_option_comments(s) option_comments = p_compiler_directive_comments(s)
s.parse_option_comments = False s.parse_comments = False
body = p_statement_list(s, Ctx(level = level), first_statement = 1) body = p_statement_list(s, Ctx(level = level), first_statement = 1)
if s.sy != 'EOF': if s.sy != 'EOF':
s.error("Syntax error in statement [%s,%s]" % ( s.error("Syntax error in statement [%s,%s]" % (
......
...@@ -306,7 +306,7 @@ class PyrexScanner(Scanner): ...@@ -306,7 +306,7 @@ class PyrexScanner(Scanner):
self.compile_time_env = initial_compile_time_env() self.compile_time_env = initial_compile_time_env()
self.compile_time_eval = 1 self.compile_time_eval = 1
self.compile_time_expr = 0 self.compile_time_expr = 0
self.parse_option_comments = True self.parse_comments = True
self.source_encoding = source_encoding self.source_encoding = source_encoding
self.trace = trace_scanner self.trace = trace_scanner
self.indentation_stack = [0] self.indentation_stack = [0]
...@@ -316,12 +316,9 @@ class PyrexScanner(Scanner): ...@@ -316,12 +316,9 @@ class PyrexScanner(Scanner):
self.sy = '' self.sy = ''
self.next() self.next()
def option_comment(self, text): def commentline(self, text):
# #cython:-comments should be treated as literals until if self.parse_comments:
# parse_option_comments is set to False, at which point self.produce('commentline', text)
# they should be ignored.
if self.parse_option_comments:
self.produce('option_comment', text)
def current_level(self): def current_level(self):
return self.indentation_stack[-1] return self.indentation_stack[-1]
......
...@@ -142,3 +142,8 @@ def escape_byte_string(s): ...@@ -142,3 +142,8 @@ def escape_byte_string(s):
else: else:
append(c) append(c)
return ''.join(l) return ''.join(l)
def split_docstring(s):
if len(s) < 2047:
return s
return '\\n\"\"'.join(s.split(r'\n'))
...@@ -3,79 +3,107 @@ cdef extern from "Python.h": ...@@ -3,79 +3,107 @@ cdef extern from "Python.h":
cdef extern from "numpy/arrayobject.h": cdef extern from "numpy/arrayobject.h":
ctypedef Py_intptr_t npy_intp ctypedef Py_intptr_t npy_intp
ctypedef struct PyArray_Descr:
int elsize cdef enum:
NPY_BOOL,
NPY_BYTE, NPY_UBYTE,
NPY_SHORT, NPY_USHORT,
NPY_INT, NPY_UINT,
NPY_LONG, NPY_ULONG,
NPY_LONGLONG, NPY_ULONGLONG,
NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
NPY_OBJECT,
NPY_STRING, NPY_UNICODE,
NPY_VOID,
NPY_NTYPES,
NPY_NOTYPE,
NPY_CHAR,
NPY_USERDEF
ctypedef class numpy.ndarray [object PyArrayObject]: ctypedef class numpy.ndarray [object PyArrayObject]:
cdef __cythonbufferdefaults__ = {"mode": "strided"}
cdef: cdef:
char *data char *data
int nd int ndim "nd"
npy_intp *dimensions npy_intp *shape "dimensions"
npy_intp *strides npy_intp *strides
object base
# descr not implemented yet here...
int flags
int itemsize
object weakreflist
PyArray_Descr* descr
# Note: This syntax (function definition in pxd files) is an
# experimental exception made for __getbuffer__ and __releasebuffer__
# -- the details of this may change.
def __getbuffer__(ndarray self, Py_buffer* info, int flags): def __getbuffer__(ndarray self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython
# requirements, and does not yet fullfill the PEP (specifically,
# Cython always requests and we always provide strided access,
# so the flags are not even checked).
if sizeof(npy_intp) != sizeof(Py_ssize_t): if sizeof(npy_intp) != sizeof(Py_ssize_t):
raise RuntimeError("Py_intptr_t and Py_ssize_t differs in size, numpy.pxd does not support this") raise RuntimeError("Py_intptr_t and Py_ssize_t differs in size, numpy.pxd does not support this")
cdef int typenum = PyArray_TYPE(self) info.buf = PyArray_DATA(self)
info.ndim = PyArray_NDIM(self)
info.buf = <void*>self.data info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
info.ndim = 2 info.shape = <Py_ssize_t*>PyArray_DIMS(self)
info.strides = <Py_ssize_t*>self.strides
info.shape = <Py_ssize_t*>self.dimensions
info.suboffsets = NULL info.suboffsets = NULL
info.format = "i" info.itemsize = PyArray_ITEMSIZE(self)
info.itemsize = self.descr.elsize
info.readonly = not PyArray_ISWRITEABLE(self) info.readonly = not PyArray_ISWRITEABLE(self)
# PS TODO TODO!: Py_ssize_t vs Py_intptr_t # Formats that are not tested and working in Cython are not
# made available from this pxd file yet.
cdef int t = PyArray_TYPE(self)
## PyArrayObject *arr = (PyArrayObject*)obj; cdef char* f = NULL
## PyArray_Descr *type = (PyArray_Descr*)arr->descr; if t == NPY_BYTE: f = "b"
elif t == NPY_UBYTE: f = "B"
elif t == NPY_SHORT: f = "h"
## int typenum = PyArray_TYPE(obj); elif t == NPY_USHORT: f = "H"
## if (!PyTypeNum_ISNUMBER(typenum)) { elif t == NPY_INT: f = "i"
## PyErr_Format(PyExc_TypeError, "Only numeric NumPy types currently supported."); elif t == NPY_UINT: f = "I"
## return -1; elif t == NPY_LONG: f = "l"
## } elif t == NPY_ULONG: f = "L"
elif t == NPY_LONGLONG: f = "q"
## /* elif t == NPY_ULONGLONG: f = "Q"
## NumPy format codes doesn't completely match buffer codes; elif t == NPY_FLOAT: f = "f"
## seems safest to retranslate. elif t == NPY_DOUBLE: f = "d"
## 01234567890123456789012345*/ elif t == NPY_LONGDOUBLE: f = "g"
## const char* base_codes = "?bBhHiIlLqQfdgfdgO"; elif t == NPY_OBJECT: f = "O"
## char* format = (char*)malloc(4); if f == NULL:
## char* fp = format; raise ValueError("only objects, int and float dtypes supported for ndarray buffer access so far (dtype is %d)" % t)
## *fp++ = type->byteorder; info.format = f
## if (PyTypeNum_ISCOMPLEX(typenum)) *fp++ = 'Z';
## *fp++ = base_codes[typenum];
## *fp = 0; cdef void* PyArray_DATA(ndarray arr)
## view->buf = arr->data;
## view->readonly = !PyArray_ISWRITEABLE(obj);
## view->ndim = PyArray_NDIM(arr);
## view->strides = PyArray_STRIDES(arr);
## view->shape = PyArray_DIMS(arr);
## view->suboffsets = NULL;
## view->format = format;
## view->itemsize = type->elsize;
## view->internal = 0;
## return 0;
## print "hello" + str(43) + "asdf" + "three"
## pass
cdef int PyArray_TYPE(ndarray arr) cdef int PyArray_TYPE(ndarray arr)
cdef int PyArray_NDIM(ndarray arr)
cdef int PyArray_ISWRITEABLE(ndarray arr) cdef int PyArray_ISWRITEABLE(ndarray arr)
cdef npy_intp PyArray_STRIDES(ndarray arr)
cdef npy_intp PyArray_DIMS(ndarray arr)
cdef Py_ssize_t PyArray_ITEMSIZE(ndarray arr)
ctypedef signed int npy_byte
ctypedef signed int npy_short
ctypedef signed int npy_int
ctypedef signed int npy_long
ctypedef signed int npy_longlong
ctypedef unsigned int npy_ubyte
ctypedef unsigned int npy_ushort
ctypedef unsigned int npy_uint
ctypedef unsigned int npy_ulong
ctypedef unsigned int npy_ulonglong
ctypedef float npy_float
ctypedef float npy_double
ctypedef float npy_longdouble
ctypedef signed int npy_int8
ctypedef signed int npy_int16
ctypedef signed int npy_int32
ctypedef signed int npy_int64
ctypedef signed int npy_int96
ctypedef signed int npy_int128
ctypedef unsigned int npy_uint8 ctypedef unsigned int npy_uint8
ctypedef unsigned int npy_uint16 ctypedef unsigned int npy_uint16
...@@ -83,7 +111,6 @@ cdef extern from "numpy/arrayobject.h": ...@@ -83,7 +111,6 @@ cdef extern from "numpy/arrayobject.h":
ctypedef unsigned int npy_uint64 ctypedef unsigned int npy_uint64
ctypedef unsigned int npy_uint96 ctypedef unsigned int npy_uint96
ctypedef unsigned int npy_uint128 ctypedef unsigned int npy_uint128
ctypedef signed int npy_int64
ctypedef float npy_float32 ctypedef float npy_float32
ctypedef float npy_float64 ctypedef float npy_float64
...@@ -91,5 +118,40 @@ cdef extern from "numpy/arrayobject.h": ...@@ -91,5 +118,40 @@ cdef extern from "numpy/arrayobject.h":
ctypedef float npy_float96 ctypedef float npy_float96
ctypedef float npy_float128 ctypedef float npy_float128
# Typedefs that matches the runtime dtype objects in
# the numpy module.
# The ones that are commented out needs an IFDEF function
# in Cython to enable them only on the right systems.
ctypedef npy_int8 int8_t
ctypedef npy_int16 int16_t
ctypedef npy_int32 int32_t
ctypedef npy_int64 int64_t
#ctypedef npy_int96 int96_t
#ctypedef npy_int128 int128_t
ctypedef npy_uint8 uint8_t
ctypedef npy_uint16 uint16_t
ctypedef npy_uint32 uint32_t
ctypedef npy_uint64 uint64_t
#ctypedef npy_uint96 uint96_t
#ctypedef npy_uint128 uint128_t
ctypedef npy_float32 float32_t
ctypedef npy_float64 float64_t
#ctypedef npy_float80 float80_t
#ctypedef npy_float128 float128_t
# The int types are mapped a bit surprising --
# numpy.int corresponds to 'l' and numpy.long to 'q'
ctypedef npy_long int_t
ctypedef npy_longlong long_t
ctypedef npy_ulong uint_t
ctypedef npy_ulonglong ulong_t
ctypedef npy_double float_t
ctypedef npy_double double_t
ctypedef npy_longdouble longdouble_t
ctypedef npy_int64 int64
Metadata-Version: 1.0
Name: pyximport
Version: 1.0
Summary: Hooks to build and run Pyrex files as if they were simple Python files
Home-page: http://www.prescod.net/pyximport
Author: Paul Prescod
Author-email: paul@prescod.net
License: Python
Description: UNKNOWN
Keywords: pyrex import hook
Platform: UNKNOWN
== Pyximport ==
Download: pyx-import-1.0.tar.gz
<http://www.prescod.net/pyximport/pyximport-1.0.tar.gz>
Pyrex is a compiler. Therefore it is natural that people tend to go
through an edit/compile/test cycle with Pyrex modules. But my personal
opinion is that one of the deep insights in Python's implementation is
that a language can be compiled (Python modules are compiled to .pyc)
files and hide that compilation process from the end-user so that they
do not have to worry about it. Pyximport does this for Pyrex modules.
For instance if you write a Pyrex module called "foo.pyx", with
Pyximport you can import it in a regular Python module like this:
import pyximport; pyximport.install()
import foo
Doing so will result in the compilation of foo.pyx (with appropriate
exceptions if it has an error in it).
If you would always like to import pyrex files without building them
specially, you can also the first line above to your sitecustomize.py.
That will install the hook every time you run Python. Then you can use
Pyrex modules just with simple import statements. I like to test my
Pyrex modules like this:
python -c "import foo"
== Dependency Handling ==
In Pyximport 1.1 it is possible to declare that your module depends on
multiple files, (likely ".h" and ".pxd" files). If your Pyrex module is
named "foo" and thus has the filename "foo.pyx" then you should make
another file in the same directory called "foo.pyxdep". The
"modname.pyxdep" file can be a list of filenames or "globs" (like
"*.pxd" or "include/*.h"). Each filename or glob must be on a separate
line. Pyximport will check the file date for each of those files before
deciding whether to rebuild the module. In order to keep track of the
fact that the dependency has been handled, Pyximport updates the
modification time of your ".pyx" source file. Future versions may do
something more sophisticated like informing distutils of the
dependencies directly.
== Limitations ==
Pyximport does not give you any control over how your Pyrex file is
compiled. Usually the defaults are fine. You might run into problems if
you wanted to write your program in half-C, half-Pyrex and build them
into a single library. Pyximport 1.2 will probably do this.
Pyximport does not hide the Distutils/GCC warnings and errors generated
by the import process. Arguably this will give you better feedback if
something went wrong and why. And if nothing went wrong it will give you
the warm fuzzy that pyximport really did rebuild your module as it was
supposed to.
== For further thought and discussion ==
I don't think that Python's "reload" will do anything for changed .SOs
on some (all?) platforms. It would require some (easy) experimentation
that I haven't gotten around to. But reload is rarely used in
applications outside of the Python interactive interpreter and certainly
not used much for C extension modules. Info about Windows
<http://mail.python.org/pipermail/python-list/2001-July/053798.html>
"setup.py install" does not modify sitecustomize.py for you. Should it?
Modifying Python's "standard interpreter" behaviour may be more than
most people expect of a package they install..
Pyximport puts your ".c" file beside your ".pyx" file (analogous to
".pyc" beside ".py"). But it puts the platform-specific binary in a
build directory as per normal for Distutils. If I could wave a magic
wand and get Pyrex or distutils or whoever to put the build directory I
might do it but not necessarily: having it at the top level is VERY
HELPFUL for debugging Pyrex problems.
from distutils.core import setup
import sys, os
from StringIO import StringIO
if "sdist" in sys.argv:
try:
os.remove("MANIFEST")
except (IOError, OSError):
pass
import html2text
out = StringIO()
html2text.convert_files(open("index.html"), out)
out.write("\n\n")
open("README", "w").write(out.getvalue())
setup(
name = "pyximport",
fullname = "Pyrex Import Hooks",
version = "1.0",
description = "Hooks to build and run Pyrex files as if they were simple Python files",
author = "Paul Prescod",
author_email = "paul@prescod.net",
url = "http://www.prescod.net/pyximport",
license = "Python",
keywords = "pyrex import hook",
# scripts = ["pyxrun"],
# data_files = [("examples/multi_file_extension",
# ["README", "ccode.c", "test.pyx", "test.pyxbld"]),
# ("examples/dependencies",
# ["README", "test.pyx", "test.pyxdep", "header.h",
# "header2.h", "header3.h", "header4.h"])
# ],
py_modules = ["pyximport", "pyxbuild"])
"""Build a Pyrex file from .pyx source to .so loadable module using
the installed distutils infrastructure. Call:
out_fname = pyx_to_dll("foo.pyx")
"""
import os, md5
import distutils
from distutils.dist import Distribution
from distutils.errors import DistutilsArgError, DistutilsError, CCompilerError
from distutils.extension import Extension
from distutils.util import grok_environment_error
from Cython.Distutils import build_ext
import shutil
DEBUG = 0
def pyx_to_dll(filename, ext = None, force_rebuild = 0):
"""Compile a PYX file to a DLL and return the name of the generated .so
or .dll ."""
assert os.path.exists(filename)
path, name = os.path.split(filename)
if not ext:
modname, extension = os.path.splitext(name)
assert extension == ".pyx", extension
ext = Extension(name=modname, sources=[filename])
if DEBUG:
quiet = "--verbose"
else:
quiet = "--quiet"
args = [quiet, "build_ext"]
if force_rebuild:
args.append("--force")
dist = Distribution({"script_name": None, "script_args": args})
if not dist.ext_modules:
dist.ext_modules = []
dist.ext_modules.append(ext)
dist.cmdclass = {'build_ext': build_ext}
build = dist.get_command_obj('build')
build.build_base = os.path.join(path, "_pyxbld")
try:
ok = dist.parse_command_line()
except DistutilsArgError, msg:
raise
if DEBUG:
print "options (after parsing command line):"
dist.dump_option_dicts()
assert ok
try:
dist.run_commands()
return dist.get_command_obj("build_ext").get_outputs()[0]
except KeyboardInterrupt:
raise SystemExit, "interrupted"
except (IOError, os.error), exc:
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
else:
raise SystemExit, error
except (DistutilsError,
CCompilerError), msg:
if DEBUG:
raise
else:
raise SystemExit, "error: " + str(msg)
if __name__=="__main__":
pyx_to_dll("dummy.pyx")
import test
"""
Import hooks; when installed (with the install()) function, these hooks
allow importing .pyx files as if they were Python modules.
If you want the hook installed every time you run Python
you can add it to your Python version by adding these lines to
sitecustomize.py (which you can create from scratch in site-packages
if it doesn't exist there are somewhere else on your python path)
import pyximport
pyximport.install()
For instance on the Mac with Python 2.3 built from CVS, you could
create sitecustomize.py with only those two lines at
/usr/local/lib/python2.3/site-packages/sitecustomize.py .
Running this module as a top-level script will run a test and then print
the documentation.
This code was modeled on Quixote's ptl_import.
"""
import sys, os, shutil
import imp, ihooks, glob, md5
import __builtin__
import pyxbuild
from distutils.dep_util import newer
from distutils.extension import Extension
mod_name = "pyximport"
assert sys.hexversion >= 0x20000b1, "need Python 2.0b1 or later"
PYX_FILE_TYPE = 1011
PYX_EXT = ".pyx"
PYXDEP_EXT = ".pyxdep"
PYXBLD_EXT = ".pyxbld"
_test_files = []
class PyxHooks (ihooks.Hooks):
"""Import hook that declares our suffixes. Let install() install it."""
def get_suffixes (self):
# add our suffixes
return imp.get_suffixes() + [(PYX_EXT, "r", PYX_FILE_TYPE)]
# Performance problem: for every PYX file that is imported, we will
# invoke the whole distutils infrastructure even if the module is
# already built. It might be more efficient to only do it when the
# mod time of the .pyx is newer than the mod time of the .so but
# the question is how to get distutils to tell me the name of the .so
# before it builds it. Maybe it is easy...but maybe the peformance
# issue isn't real.
def _load_pyrex(name, filename):
"Load a pyrex file given a name and filename."
def get_distutils_extension(modname, pyxfilename):
extra = "_" + md5.md5(open(pyxfilename).read()).hexdigest()
# modname = modname + extra
extension_mod = handle_special_build(modname, pyxfilename)
if not extension_mod:
extension_mod = Extension(name = modname, sources=[pyxfilename])
return extension_mod
def handle_special_build(modname, pyxfilename):
special_build = os.path.splitext(pyxfilename)[0] + PYXBLD_EXT
if not os.path.exists(special_build):
ext = None
else:
globls = {}
locs = {}
# execfile(special_build, globls, locs)
# ext = locs["make_ext"](modname, pyxfilename)
mod = imp.load_source("XXXX", special_build, open(special_build))
ext = mod.make_ext(modname, pyxfilename)
assert ext and ext.sources, ("make_ext in %s did not return Extension"
% special_build)
ext.sources = [os.path.join(os.path.dirname(special_build), source)
for source in ext.sources]
return ext
def handle_dependencies(pyxfilename):
dependfile = os.path.splitext(pyxfilename)[0] + PYXDEP_EXT
# by default let distutils decide whether to rebuild on its own
# (it has a better idea of what the output file will be)
# but we know more about dependencies so force a rebuild if
# some of the dependencies are newer than the pyxfile.
if os.path.exists(dependfile):
depends = open(dependfile).readlines()
depends = [depend.strip() for depend in depends]
# gather dependencies in the "files" variable
# the dependency file is itself a dependency
files = [dependfile]
for depend in depends:
fullpath = os.path.join(os.path.dirname(dependfile),
depend)
files.extend(glob.glob(fullpath))
# only for unit testing to see we did the right thing
_test_files[:] = []
# if any file that the pyxfile depends upon is newer than
# the pyx file, 'touch' the pyx file so that distutils will
# be tricked into rebuilding it.
for file in files:
if newer(file, pyxfilename):
print "Rebuilding because of ", file
filetime = os.path.getmtime(file)
os.utime(pyxfilename, (filetime, filetime))
_test_files.append(file)
def build_module(name, pyxfilename):
assert os.path.exists(pyxfilename), (
"Path does not exist: %s" % pyxfilename)
handle_dependencies(pyxfilename)
extension_mod = get_distutils_extension(name, pyxfilename)
so_path = pyxbuild.pyx_to_dll(pyxfilename, extension_mod)
assert os.path.exists(so_path), "Cannot find: %s" % so_path
junkpath = os.path.join(os.path.dirname(so_path), name+"_*")
junkstuff = glob.glob(junkpath)
for path in junkstuff:
if path!=so_path:
try:
os.remove(path)
except IOError:
print "Couldn't remove ", path
return so_path
def load_module(name, pyxfilename):
so_path = build_module(name, pyxfilename)
mod = imp.load_dynamic(name, so_path)
assert mod.__file__ == so_path, (mod.__file__, so_path)
return mod
class PyxLoader (ihooks.ModuleLoader):
"""Load a module. It checks whether a file is a .pyx and returns it.
Otherwise it lets the ihooks base class handle it. Let install()
install it."""
def load_module (self, name, stuff):
# If it's a Pyrex file, load it specially.
if stuff[2][2] == PYX_FILE_TYPE:
file, pyxfilename, info = stuff
(suff, mode, type) = info
if file:
file.close()
return load_module(name, pyxfilename)
else:
# Otherwise, use the default handler for loading
return ihooks.ModuleLoader.load_module( self, name, stuff)
try:
import cimport
except ImportError:
cimport = None
class cModuleImporter(ihooks.ModuleImporter):
"""This was just left in from the Quixote implementation. I think
it allows a performance enhancement if you have the cimport module
from Quixote. Let install() install it."""
def __init__(self, loader=None):
self.loader = loader or ihooks.ModuleLoader()
cimport.set_loader(self.find_import_module)
def find_import_module(self, fullname, subname, path):
stuff = self.loader.find_module(subname, path)
if not stuff:
return None
return self.loader.load_module(fullname, stuff)
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = cimport.import_module
__builtin__.reload = cimport.reload_module
__builtin__.unload = self.unload
_installed = 0
def install():
"""Main entry point. call this to install the import hook in your
for a single Python process. If you want it to be installed whenever
you use Python, add it to your sitecustomize (as described above).
"""
global _installed
if not _installed:
hooks = PyxHooks()
loader = PyxLoader(hooks)
if cimport is not None:
importer = cModuleImporter(loader)
else:
importer = ihooks.ModuleImporter(loader)
ihooks.install(importer)
_installed = 1
def on_remove_file_error(func, path, excinfo):
print "Sorry! Could not remove a temp file:", path
print "Extra information."
print func, excinfo
print "You may want to delete this yourself when you get a chance."
def show_docs():
import __main__
__main__.__name__ = mod_name
for name in dir(__main__):
item = getattr(__main__, name)
try:
setattr(item, "__module__", mod_name)
except (AttributeError, TypeError):
pass
help(__main__)
if __name__ == '__main__':
show_docs()
import pyximport; pyximport.install()
import os, sys
import time, shutil
import tempfile
def make_tempdir():
tempdir = os.path.join(tempfile.gettempdir(), "pyrex_temp")
if os.path.exists(tempdir):
remove_tempdir(tempdir)
os.mkdir(tempdir)
return tempdir
def remove_tempdir(tempdir):
shutil.rmtree(tempdir, 0, on_remove_file_error)
def on_remove_file_error(func, path, excinfo):
print "Sorry! Could not remove a temp file:", path
print "Extra information."
print func, excinfo
print "You may want to delete this yourself when you get a chance."
def test():
tempdir = make_tempdir()
sys.path.append(tempdir)
filename = os.path.join(tempdir, "dummy.pyx")
open(filename, "w").write("print 'Hello world from the Pyrex install hook'")
import dummy
reload(dummy)
depend_filename = os.path.join(tempdir, "dummy.pyxdep")
depend_file = open(depend_filename, "w")
depend_file.write("*.txt\nfoo.bar")
depend_file.close()
build_filename = os.path.join(tempdir, "dummy.pyxbld")
build_file = open(build_filename, "w")
build_file.write("""
from distutils.extension import Extension
def make_ext(name, filename):
return Extension(name=name, sources=[filename])
""")
build_file.close()
open(os.path.join(tempdir, "foo.bar"), "w").write(" ")
open(os.path.join(tempdir, "1.txt"), "w").write(" ")
open(os.path.join(tempdir, "abc.txt"), "w").write(" ")
reload(dummy)
assert len(pyximport._test_files)==1, pyximport._test_files
reload(dummy)
time.sleep(1) # sleep a second to get safer mtimes
open(os.path.join(tempdir, "abc.txt"), "w").write(" ")
print "Here goes the reolad"
reload(dummy)
assert len(pyximport._test_files) == 1, pyximport._test_files
reload(dummy)
assert len(pyximport._test_files) ==0, pyximport._test_files
remove_tempdir(tempdir)
if __name__=="__main__":
test()
# reload seems to work for Python 2.3 but not 2.2.
import time, os, sys
import test_pyximport
# debugging the 2.2 problem
if 1:
from distutils import sysconfig
try:
sysconfig.set_python_build()
except AttributeError:
pass
import pyxbuild
print pyxbuild.distutils.sysconfig == sysconfig
def test():
tempdir = test_pyximport.make_tempdir()
sys.path.append(tempdir)
hello_file = os.path.join(tempdir, "hello.pyx")
open(hello_file, "w").write("x = 1; print x; before = 'before'\n")
import hello
assert hello.x == 1
time.sleep(1) # sleep to make sure that new "hello.pyx" has later
# timestamp than object file.
open(hello_file, "w").write("x = 2; print x; after = 'after'\n")
reload(hello)
assert hello.x == 2, "Reload should work on Python 2.3 but not 2.2"
test_pyximport.remove_tempdir(tempdir)
if __name__=="__main__":
test()
...@@ -101,3 +101,8 @@ setup( ...@@ -101,3 +101,8 @@ setup(
**setup_args **setup_args
) )
# Now install pyximport
os.chdir('pyximport')
os.spawnlp(os.P_WAIT, 'python', 'python', 'Setup.py', *sys.argv[1:])
os.chdir('..')
#cython: boundscheck=False # boundscheck = False
# ignoreme = OK
# This testcase is most useful if you inspect the generated C file
print 3 print 3
cimport python_dict as asadf, python_exc, cython as cy cimport python_dict as asadf, python_exc, cython as cy
def e(object[int, ndim=2] buf):
print buf[3, 2] # no bc
@cy.boundscheck(False) @cy.boundscheck(False)
def f(object[int, 2] buf): def f(object[int, ndim=2] buf):
print buf[3, 2] print buf[3, 2] # no bc
@cy.boundscheck(True) @cy.boundscheck(True)
def g(object[int, 2] buf): def g(object[int, ndim=2] buf):
# Please leave this comment, # The below line should have no meaning
#cython: this should have no special meaning # boundscheck = False
# even if the above line doesn't follow indentation. # even if the above line doesn't follow indentation.
print buf[3, 2] print buf[3, 2] # bc
def h(object[int, 2] buf): def h(object[int, ndim=2] buf):
print buf[3, 2] print buf[3, 2] # no bc
with cy.boundscheck(True): with cy.boundscheck(True):
print buf[3,2] print buf[3,2] # bc
from cython cimport boundscheck as bc from cython cimport boundscheck as bc
def i(object[int] buf): def i(object[int] buf):
with bc(True): with bc(True):
print buf[3] print buf[3] # bs
# nonexistant = True
# boundscheck = true
# boundscheck = 9
print 3
# Options should not be interpreted any longer:
# boundscheck = true
_ERRORS = u"""
3:0: boundscheck directive must be set to True or False
4:0: boundscheck directive must be set to True or False
"""
#cython: nonexistant
#cython: some=9
# The one below should NOT raise an error
#cython: boundscheck=True
# However this one should
#cython: boundscheck=sadf
print 3
#cython: boundscheck=True
_ERRORS = u"""
2:0: Expected "=" in option "nonexistant"
3:0: Unknown option: "some"
10:0: Must pass a boolean value for option "boundscheck"
"""
/* See bufaccess.pyx */ /* See bufaccess.pyx */
typedef short htypedef_short; typedef short td_h_short;
typedef double td_h_double;
typedef unsigned short td_h_ushort;
...@@ -41,13 +41,13 @@ def nousage(): ...@@ -41,13 +41,13 @@ def nousage():
""" """
The challenge here is just compilation. The challenge here is just compilation.
""" """
cdef object[int, 2] buf cdef object[int, ndim=2] buf
def printbuf(): def printbuf():
""" """
Just compilation. Just compilation.
""" """
cdef object[int, 2] buf cdef object[int, ndim=2] buf
print buf print buf
@testcase @testcase
...@@ -331,10 +331,10 @@ def explicitly_release_buffer(): ...@@ -331,10 +331,10 @@ def explicitly_release_buffer():
print "After release" print "After release"
# #
# Index bounds checking # Getting items and index bounds checking
# #
@testcase @testcase
def get_int_2d(object[int, 2] buf, int i, int j): def get_int_2d(object[int, ndim=2] buf, int i, int j):
""" """
>>> get_int_2d(C, 1, 1) >>> get_int_2d(C, 1, 1)
acquired C acquired C
...@@ -368,7 +368,7 @@ def get_int_2d(object[int, 2] buf, int i, int j): ...@@ -368,7 +368,7 @@ def get_int_2d(object[int, 2] buf, int i, int j):
return buf[i, j] return buf[i, j]
@testcase @testcase
def get_int_2d_uintindex(object[int, 2] buf, unsigned int i, unsigned int j): def get_int_2d_uintindex(object[int, ndim=2] buf, unsigned int i, unsigned int j):
""" """
Unsigned indexing: Unsigned indexing:
>>> get_int_2d_uintindex(C, 0, 0) >>> get_int_2d_uintindex(C, 0, 0)
...@@ -385,7 +385,7 @@ def get_int_2d_uintindex(object[int, 2] buf, unsigned int i, unsigned int j): ...@@ -385,7 +385,7 @@ def get_int_2d_uintindex(object[int, 2] buf, unsigned int i, unsigned int j):
return buf[i, j] return buf[i, j]
@testcase @testcase
def set_int_2d(object[int, 2] buf, int i, int j, int value): def set_int_2d(object[int, ndim=2] buf, int i, int j, int value):
""" """
Uses get_int_2d to read back the value afterwards. For pure Uses get_int_2d to read back the value afterwards. For pure
unit test, one should support reading in MockBuffer instead. unit test, one should support reading in MockBuffer instead.
...@@ -436,6 +436,15 @@ def set_int_2d(object[int, 2] buf, int i, int j, int value): ...@@ -436,6 +436,15 @@ def set_int_2d(object[int, 2] buf, int i, int j, int value):
""" """
buf[i, j] = value buf[i, j] = value
@testcase
def list_comprehension(object[int] buf, len):
"""
>>> list_comprehension(IntMockBuffer(None, [1,2,3]), 3)
1|2|3
"""
cdef int i
print "|".join([str(buf[i]) for i in range(len)])
# #
# Buffer type mismatch examples. Varying the type and access # Buffer type mismatch examples. Varying the type and access
# method simultaneously, the odds of an interaction is virtually # method simultaneously, the odds of an interaction is virtually
...@@ -447,7 +456,7 @@ def fmtst1(buf): ...@@ -447,7 +456,7 @@ def fmtst1(buf):
>>> fmtst1(IntMockBuffer("A", range(3))) >>> fmtst1(IntMockBuffer("A", range(3)))
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: Buffer datatype mismatch (rejecting on 'i') ValueError: Buffer datatype mismatch (expected 'f', got 'i')
""" """
cdef object[float] a = buf cdef object[float] a = buf
...@@ -457,11 +466,11 @@ def fmtst2(object[int] buf): ...@@ -457,11 +466,11 @@ def fmtst2(object[int] buf):
>>> fmtst2(FloatMockBuffer("A", range(3))) >>> fmtst2(FloatMockBuffer("A", range(3)))
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: Buffer datatype mismatch (rejecting on 'f') ValueError: Buffer datatype mismatch (expected 'i', got 'f')
""" """
@testcase @testcase
def ndim1(object[int, 2] buf): def ndim1(object[int, ndim=2] buf):
""" """
>>> ndim1(IntMockBuffer("A", range(3))) >>> ndim1(IntMockBuffer("A", range(3)))
Traceback (most recent call last): Traceback (most recent call last):
...@@ -483,7 +492,7 @@ def readonly(obj): ...@@ -483,7 +492,7 @@ def readonly(obj):
>>> [str(x) for x in R.recieved_flags] # Works in both py2 and py3 >>> [str(x) for x in R.recieved_flags] # Works in both py2 and py3
['FORMAT', 'INDIRECT', 'ND', 'STRIDES'] ['FORMAT', 'INDIRECT', 'ND', 'STRIDES']
""" """
cdef object[unsigned short int, 3] buf = obj cdef object[unsigned short int, ndim=3] buf = obj
print buf[2, 2, 1] print buf[2, 2, 1]
@testcase @testcase
...@@ -496,11 +505,11 @@ def writable(obj): ...@@ -496,11 +505,11 @@ def writable(obj):
>>> [str(x) for x in R.recieved_flags] # Py2/3 >>> [str(x) for x in R.recieved_flags] # Py2/3
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE'] ['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
""" """
cdef object[unsigned short int, 3] buf = obj cdef object[unsigned short int, ndim=3] buf = obj
buf[2, 2, 1] = 23 buf[2, 2, 1] = 23
@testcase @testcase
def strided(object[int, 1, 'strided'] buf): def strided(object[int, ndim=1, mode='strided'] buf):
""" """
>>> A = IntMockBuffer("A", range(4)) >>> A = IntMockBuffer("A", range(4))
>>> strided(A) >>> strided(A)
...@@ -596,12 +605,23 @@ TODO ...@@ -596,12 +605,23 @@ TODO
uc[0] = <int>3.14 uc[0] = <int>3.14
print uc[0] print uc[0]
cdef char* ch = "asfd"
cdef object[object] objbuf
objbuf[3] = ch
# #
# Testing that accessing data using various types of buffer access # Testing that accessing data using various types of buffer access
# all works. # all works.
# #
def printbuf_int(object[int] buf, shape):
# Utility func
cdef int i
for i in range(shape[0]):
print buf[i],
print 'END'
@testcase @testcase
def printbuf_int_2d(o, shape): def printbuf_int_2d(o, shape):
...@@ -628,7 +648,7 @@ def printbuf_int_2d(o, shape): ...@@ -628,7 +648,7 @@ def printbuf_int_2d(o, shape):
released A released A
""" """
# should make shape builtin # should make shape builtin
cdef object[int, 2] buf cdef object[int, ndim=2] buf
buf = o buf = o
cdef int i, j cdef int i, j
for i in range(shape[0]): for i in range(shape[0]):
...@@ -654,22 +674,43 @@ def printbuf_float(o, shape): ...@@ -654,22 +674,43 @@ def printbuf_float(o, shape):
print "END" print "END"
#
# Test assignments
#
@testcase
def inplace_operators(object[int] buf):
"""
>>> buf = IntMockBuffer(None, [2, 2])
>>> inplace_operators(buf)
>>> printbuf_int(buf, (2,))
0 3 END
"""
cdef int j = 0
buf[1] += 1
buf[j] *= 2
buf[0] -= 4
# #
# Typedefs # Typedefs
# #
ctypedef int cytypedef_int # Test three layers of typedefs going through a h file for plain int, and
# simply a header file typedef for floats and unsigned.
ctypedef int td_cy_int
cdef extern from "bufaccess.h": cdef extern from "bufaccess.h":
ctypedef cytypedef_int htypedef_short # Defined as short, but Cython doesn't know this! ctypedef td_cy_int td_h_short # Defined as short, but Cython doesn't know this!
ctypedef htypedef_short cytypedef2 ctypedef float td_h_double # Defined as double
ctypedef unsigned int td_h_ushort # Defined as unsigned short
ctypedef td_h_short td_h_cy_short
@testcase @testcase
def printbuf_cytypedef_int(object[cytypedef_int] buf, shape): def printbuf_td_cy_int(object[td_cy_int] buf, shape):
""" """
>>> printbuf_cytypedef_int(IntMockBuffer("A", range(3)), (3,)) >>> printbuf_td_cy_int(IntMockBuffer(None, range(3)), (3,))
acquired A
0 1 2 END 0 1 2 END
released A >>> printbuf_td_cy_int(ShortMockBuffer(None, range(3)), (3,))
>>> printbuf_cytypedef_int(ShortMockBuffer("B", range(3)), (3,))
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: Buffer datatype mismatch (rejecting on 'h') ValueError: Buffer datatype mismatch (rejecting on 'h')
...@@ -681,41 +722,66 @@ def printbuf_cytypedef_int(object[cytypedef_int] buf, shape): ...@@ -681,41 +722,66 @@ def printbuf_cytypedef_int(object[cytypedef_int] buf, shape):
print 'END' print 'END'
@testcase @testcase
def printbuf_htypedef_short(object[htypedef_short] buf, shape): def printbuf_td_h_short(object[td_h_short] buf, shape):
""" """
>>> printbuf_htypedef_short(ShortMockBuffer("A", range(3)), (3,)) >>> printbuf_td_h_short(ShortMockBuffer(None, range(3)), (3,))
acquired A
0 1 2 END 0 1 2 END
released A >>> printbuf_td_h_short(IntMockBuffer(None, range(3)), (3,))
>>> printbuf_htypedef_short(IntMockBuffer("B", range(3)), (3,))
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: Buffer datatype mismatch (rejecting on 'i') ValueError: Buffer datatype mismatch (rejecting on 'i')
""" """
cdef int i cdef int i
for i in range(shape[0]): for i in range(shape[0]):
print buf[i], print buf[i],
print 'END' print 'END'
@testcase @testcase
def printbuf_cytypedef2(object[cytypedef2] buf, shape): def printbuf_td_h_cy_short(object[td_h_cy_short] buf, shape):
""" """
>>> printbuf_cytypedef2(ShortMockBuffer("A", range(3)), (3,)) >>> printbuf_td_h_cy_short(ShortMockBuffer(None, range(3)), (3,))
acquired A
0 1 2 END 0 1 2 END
released A >>> printbuf_td_h_cy_short(IntMockBuffer(None, range(3)), (3,))
>>> printbuf_cytypedef2(IntMockBuffer("B", range(3)), (3,))
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: Buffer datatype mismatch (rejecting on 'i') ValueError: Buffer datatype mismatch (rejecting on 'i')
""" """
cdef int i
for i in range(shape[0]):
print buf[i],
print 'END'
@testcase
def printbuf_td_h_ushort(object[td_h_ushort] buf, shape):
"""
>>> printbuf_td_h_ushort(UnsignedShortMockBuffer(None, range(3)), (3,))
0 1 2 END
>>> printbuf_td_h_ushort(ShortMockBuffer(None, range(3)), (3,))
Traceback (most recent call last):
...
ValueError: Buffer datatype mismatch (rejecting on 'h')
"""
cdef int i
for i in range(shape[0]):
print buf[i],
print 'END'
@testcase
def printbuf_td_h_double(object[td_h_double] buf, shape):
"""
>>> printbuf_td_h_double(DoubleMockBuffer(None, [0.25, 1, 3.125]), (3,))
0.25 1.0 3.125 END
>>> printbuf_td_h_double(FloatMockBuffer(None, [0.25, 1, 3.125]), (3,))
Traceback (most recent call last):
...
ValueError: Buffer datatype mismatch (rejecting on 'f')
"""
cdef int i cdef int i
for i in range(shape[0]): for i in range(shape[0]):
print buf[i], print buf[i],
print 'END' print 'END'
# #
# Object access # Object access
# #
...@@ -932,13 +998,6 @@ cdef class MockBuffer: ...@@ -932,13 +998,6 @@ cdef class MockBuffer:
cdef get_default_format(self): cdef get_default_format(self):
print "ERROR, not subclassed", self.__class__ print "ERROR, not subclassed", self.__class__
cdef class FloatMockBuffer(MockBuffer):
cdef int write(self, char* buf, object value) except -1:
(<float*>buf)[0] = <float>value
return 0
cdef get_itemsize(self): return sizeof(float)
cdef get_default_format(self): return b"=f"
cdef class IntMockBuffer(MockBuffer): cdef class IntMockBuffer(MockBuffer):
cdef int write(self, char* buf, object value) except -1: cdef int write(self, char* buf, object value) except -1:
(<int*>buf)[0] = <int>value (<int*>buf)[0] = <int>value
...@@ -960,6 +1019,21 @@ cdef class UnsignedShortMockBuffer(MockBuffer): ...@@ -960,6 +1019,21 @@ cdef class UnsignedShortMockBuffer(MockBuffer):
cdef get_itemsize(self): return sizeof(unsigned short) cdef get_itemsize(self): return sizeof(unsigned short)
cdef get_default_format(self): return b"=1H" # Try with repeat count cdef get_default_format(self): return b"=1H" # Try with repeat count
cdef class FloatMockBuffer(MockBuffer):
cdef int write(self, char* buf, object value) except -1:
(<float*>buf)[0] = <float>value
return 0
cdef get_itemsize(self): return sizeof(float)
cdef get_default_format(self): return b"f"
cdef class DoubleMockBuffer(MockBuffer):
cdef int write(self, char* buf, object value) except -1:
(<double*>buf)[0] = <double>value
return 0
cdef get_itemsize(self): return sizeof(double)
cdef get_default_format(self): return b"d"
cdef extern from *: cdef extern from *:
void* addr_of_pyobject "(void*)"(object) void* addr_of_pyobject "(void*)"(object)
...@@ -1002,10 +1076,10 @@ def typedbuffer1(obj): ...@@ -1002,10 +1076,10 @@ def typedbuffer1(obj):
... ...
TypeError: Cannot convert int to bufaccess.IntMockBuffer TypeError: Cannot convert int to bufaccess.IntMockBuffer
""" """
cdef IntMockBuffer[int, 1] buf = obj cdef IntMockBuffer[int, ndim=1] buf = obj
@testcase @testcase
def typedbuffer2(IntMockBuffer[int, 1] obj): def typedbuffer2(IntMockBuffer[int, ndim=1] obj):
""" """
>>> typedbuffer2(IntMockBuffer("A", range(10))) >>> typedbuffer2(IntMockBuffer("A", range(10)))
acquired A acquired A
...@@ -1022,7 +1096,7 @@ def typedbuffer2(IntMockBuffer[int, 1] obj): ...@@ -1022,7 +1096,7 @@ def typedbuffer2(IntMockBuffer[int, 1] obj):
# Test __cythonbufferdefaults__ # Test __cythonbufferdefaults__
# #
@testcase @testcase
def bufdefaults1(IntStridedMockBuffer[int, 1] buf): def bufdefaults1(IntStridedMockBuffer[int, ndim=1] buf):
""" """
For IntStridedMockBuffer, mode should be For IntStridedMockBuffer, mode should be
"strided" by defaults which should show "strided" by defaults which should show
......
...@@ -7,7 +7,21 @@ __doc__ = """ ...@@ -7,7 +7,21 @@ __doc__ = """
>>> s.e is s >>> s.e is s
True True
>>> s.e = None >>> s.e = None
>>> s = Bot()
>>> s.e = s
>>> s.e = 1
Traceback (most recent call last):
TypeError: Cannot convert int to extmember.Bot
>>> s.e is s
True
>>> s.e = None
""" """
# declared in the pxd
cdef class Spam: cdef class Spam:
cdef public Spam e pass
# not declared in the pxd
cdef class Bot:
cdef public Bot e
...@@ -7,6 +7,22 @@ __doc__ = u""" ...@@ -7,6 +7,22 @@ __doc__ = u"""
>>> h(56, 7) >>> h(56, 7)
105.0 105.0
>>> arrays()
19
>>> attributes()
26 26 26
>>> smoketest()
10
>>> test_side_effects()
side effect 1
c side effect 2
side effect 3
c side effect 4
([0, 11, 102, 3, 4], [0, 1, 2, 13, 104])
""" """
def f(a,b): def f(a,b):
...@@ -26,3 +42,71 @@ def h(double a, double b): ...@@ -26,3 +42,71 @@ def h(double a, double b):
a += b a += b
a *= b a *= b
return a return a
cimport stdlib
def arrays():
cdef char* buf = <char*>stdlib.malloc(10)
cdef int i = 2
cdef object j = 2
buf[2] = 0
buf[i] += 2
buf[2] *= 10
buf[j] -= 1
print buf[2]
stdlib.free(buf)
cdef class A:
cdef attr
cdef int attr2
cdef char* buf
def __init__(self):
self.attr = 3
self.attr2 = 3
class B:
attr = 3
def attributes():
cdef A a = A()
b = B()
a.attr += 10
a.attr *= 2
a.attr2 += 10
a.attr2 *= 2
b.attr += 10
b.attr *= 2
print a.attr, a.attr2, b.attr
def get_2(): return 2
cdef int identity(int value): return value
def smoketest():
cdef char* buf = <char*>stdlib.malloc(10)
cdef A a = A()
a.buf = buf
a.buf[identity(1)] = 0
(a.buf + identity(4) - <int>(2*get_2() - 1))[get_2() - 2*identity(1)] += 10
print a.buf[1]
stdlib.free(buf)
def side_effect(x):
print "side effect", x
return x
cdef int c_side_effect(int x):
print "c side effect", x
return x
def test_side_effects():
a = range(5)
a[side_effect(1)] += 10
a[c_side_effect(2)] += 100
cdef int i
cdef int b[5]
for i from 0 <= i < 5:
b[i] = i
b[side_effect(3)] += 10
b[c_side_effect(4)] += 100
return a, [b[i] for i from 0 <= i < 5]
# cannot be named "numpy" in order to no clash with the numpy module! # cannot be named "numpy" in order to not clash with the numpy module!
cimport numpy cimport numpy as np
try: try:
import numpy import numpy as np
__doc__ = """ __doc__ = """
>>> basic() >>> basic()
...@@ -11,12 +11,176 @@ try: ...@@ -11,12 +11,176 @@ try:
[5 6 7 8 9]] [5 6 7 8 9]]
2 0 9 5 2 0 9 5
>>> three_dim()
[[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
<BLANKLINE>
[[ 8. 9. 10. 11.]
[ 12. 13. 14. 15.]]
<BLANKLINE>
[[ 16. 17. 18. 19.]
[ 20. 21. 22. 23.]]]
6.0 0.0 13.0 8.0
>>> obj_array()
[a 1 {}]
a 1 {}
Test various forms of slicing, picking etc.
>>> a = np.arange(10, dtype='l').reshape(2, 5)
>>> print_long_2d(a)
0 1 2 3 4
5 6 7 8 9
>>> print_long_2d(a[::-1, ::-1])
9 8 7 6 5
4 3 2 1 0
>>> print_long_2d(a[1:2, 1:3])
6 7
>>> print_long_2d(a[::2, ::2])
0 2 4
>>> print_long_2d(a[::4, :])
0 1 2 3 4
>>> print_long_2d(a[:, 1:5:2])
1 3
6 8
>>> print_long_2d(a[:, 5:1:-2])
4 2
9 7
>>> print_long_2d(a[:, [3, 1]])
3 1
8 6
>>> print_long_2d(a.T)
0 5
1 6
2 7
3 8
4 9
Write to slices
>>> b = a.copy()
>>> put_range_long_1d(b[:, 3])
>>> print b
[[0 1 2 0 4]
[5 6 7 1 9]]
>>> put_range_long_1d(b[::-1, 3])
>>> print b
[[0 1 2 1 4]
[5 6 7 0 9]]
>>> a = np.zeros(9, dtype='l')
>>> put_range_long_1d(a[1::3])
>>> print a
[0 0 0 0 1 0 0 2 0]
Write to picked subarrays. This should NOT change the original
array as picking creates a new mutable copy.
>>> a = np.zeros(10, dtype='l').reshape(2, 5)
>>> put_range_long_1d(a[[0, 0, 1, 1, 0], [0, 1, 2, 4, 3]])
>>> print a
[[0 0 0 0 0]
[0 0 0 0 0]]
>>> test_dtype('b', inc1_byte)
>>> test_dtype('B', inc1_ubyte)
>>> test_dtype('h', inc1_short)
>>> test_dtype('H', inc1_ushort)
>>> test_dtype('i', inc1_int)
>>> test_dtype('I', inc1_uint)
>>> test_dtype('l', inc1_long)
>>> test_dtype('L', inc1_ulong)
>>> test_dtype('f', inc1_float)
>>> test_dtype('d', inc1_double)
>>> test_dtype('g', inc1_longdouble)
>>> test_dtype('O', inc1_object)
>>> test_dtype(np.int, inc1_int_t)
>>> test_dtype(np.long, inc1_long_t)
>>> test_dtype(np.float, inc1_float_t)
>>> test_dtype(np.double, inc1_double_t)
>>> test_dtype(np.longdouble, inc1_longdouble_t)
>>> test_dtype(np.int32, inc1_int32_t)
>>> test_dtype(np.float64, inc1_float64_t)
Unsupported types:
>>> test_dtype(np.complex, inc1_byte)
Traceback (most recent call last):
...
ValueError: only objects, int and float dtypes supported for ndarray buffer access so far (dtype is 15)
>>> a = np.zeros((10,), dtype=np.dtype('i4,i4'))
>>> inc1_byte(a)
Traceback (most recent call last):
...
ValueError: only objects, int and float dtypes supported for ndarray buffer access so far (dtype is 20)
""" """
except: except:
__doc__ = "" __doc__ = ""
def basic(): def basic():
cdef object[int, 2] buf = numpy.arange(10).reshape((2, 5)) cdef object[int, ndim=2] buf = np.arange(10, dtype='i').reshape((2, 5))
print buf print buf
print buf[0, 2], buf[0, 0], buf[1, 4], buf[1, 0] print buf[0, 2], buf[0, 0], buf[1, 4], buf[1, 0]
def three_dim():
cdef object[double, ndim=3] buf = np.arange(24, dtype='d').reshape((3,2,4))
print buf
print buf[0, 1, 2], buf[0, 0, 0], buf[1, 1, 1], buf[1, 0, 0]
def obj_array():
cdef object[object, ndim=1] buf = np.array(["a", 1, {}])
print buf
print buf[0], buf[1], buf[2]
def print_long_2d(np.ndarray[long, ndim=2] arr):
cdef int i, j
for i in range(arr.shape[0]):
print " ".join([str(arr[i, j]) for j in range(arr.shape[1])])
def put_range_long_1d(np.ndarray[long] arr):
"""Writes 0,1,2,... to array and returns array"""
cdef int value = 0, i
for i in range(arr.shape[0]):
arr[i] = value
value += 1
# Exhaustive dtype tests -- increments element [1] by 1 for all dtypes
def inc1_byte(np.ndarray[char] arr): arr[1] += 1
def inc1_ubyte(np.ndarray[unsigned char] arr): arr[1] += 1
def inc1_short(np.ndarray[short] arr): arr[1] += 1
def inc1_ushort(np.ndarray[unsigned short] arr): arr[1] += 1
def inc1_int(np.ndarray[int] arr): arr[1] += 1
def inc1_uint(np.ndarray[unsigned int] arr): arr[1] += 1
def inc1_long(np.ndarray[long] arr): arr[1] += 1
def inc1_ulong(np.ndarray[unsigned long] arr): arr[1] += 1
def inc1_longlong(np.ndarray[long long] arr): arr[1] += 1
def inc1_ulonglong(np.ndarray[unsigned long long] arr): arr[1] += 1
def inc1_float(np.ndarray[float] arr): arr[1] += 1
def inc1_double(np.ndarray[double] arr): arr[1] += 1
def inc1_longdouble(np.ndarray[long double] arr): arr[1] += 1
def inc1_object(np.ndarray[object] arr):
o = arr[1]
o += 1
arr[1] = o # unfortunately, += segfaults for objects
def inc1_int_t(np.ndarray[np.int_t] arr): arr[1] += 1
def inc1_long_t(np.ndarray[np.long_t] arr): arr[1] += 1
def inc1_float_t(np.ndarray[np.float_t] arr): arr[1] += 1
def inc1_double_t(np.ndarray[np.double_t] arr): arr[1] += 1
def inc1_longdouble_t(np.ndarray[np.longdouble_t] arr): arr[1] += 1
# The tests below only work on platforms that has the given types
def inc1_int32_t(np.ndarray[np.int32_t] arr): arr[1] += 1
def inc1_float64_t(np.ndarray[np.float64_t] arr): arr[1] += 1
def test_dtype(dtype, inc1):
a = np.array([0, 10], dtype=dtype)
inc1(a)
if a[1] != 11: print "failed!"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment