Commit 50abb6db authored by Kevin Modzelewski's avatar Kevin Modzelewski

Copy over some code from CPython

parent 7f4bdb59
......@@ -52,7 +52,7 @@ static PyObject* wrap_unaryfunc(PyObject* self, PyObject* args, void* wrapped) {
return (*func)(self);
}
PyObject* Py_CallPythonNew(PyTypeObject* self, PyObject* args, PyObject* kwds) {
PyObject* slot_tp_new(PyTypeObject* self, PyObject* args, PyObject* kwds) noexcept {
try {
// TODO: runtime ICs?
Box* new_attr = typeLookup(self, _new_str, NULL);
......@@ -65,7 +65,7 @@ PyObject* Py_CallPythonNew(PyTypeObject* self, PyObject* args, PyObject* kwds) {
}
}
PyObject* Py_CallPythonCall(PyObject* self, PyObject* args, PyObject* kwds) {
PyObject* slot_tp_call(PyObject* self, PyObject* args, PyObject* kwds) noexcept {
try {
Py_FatalError("this function is untested");
......@@ -76,7 +76,7 @@ PyObject* Py_CallPythonCall(PyObject* self, PyObject* args, PyObject* kwds) {
}
}
PyObject* Py_CallPythonRepr(PyObject* self) {
PyObject* slot_tp_repr(PyObject* self) noexcept {
try {
return repr(self);
} catch (Box* e) {
......@@ -86,16 +86,36 @@ PyObject* Py_CallPythonRepr(PyObject* self) {
typedef wrapper_def slotdef;
static void** slotptr(BoxedClass* self, int offset) {
// TODO handle indices into the indirected portions (tp_as_sequence, etc)
char* ptr = reinterpret_cast<char*>(self);
return reinterpret_cast<void**>(ptr + offset);
static void** slotptr(BoxedClass* type, int offset) {
// We use the index into PyHeapTypeObject as the canonical way to represent offsets, even though we are not
// (currently) using that object representation
// copied from CPython:
/* Note: this depends on the order of the members of PyHeapTypeObject! */
assert(offset >= 0);
assert((size_t)offset < offsetof(PyHeapTypeObject, as_buffer));
char* ptr;
if ((size_t)offset >= offsetof(PyHeapTypeObject, as_sequence)) {
ptr = (char*)type->tp_as_sequence;
offset -= offsetof(PyHeapTypeObject, as_sequence);
} else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_mapping)) {
ptr = (char*)type->tp_as_mapping;
offset -= offsetof(PyHeapTypeObject, as_mapping);
} else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_number)) {
ptr = (char*)type->tp_as_number;
offset -= offsetof(PyHeapTypeObject, as_number);
} else {
ptr = (char*)type;
}
if (ptr != NULL)
ptr += offset;
return (void**)ptr;
}
static void update_one_slot(BoxedClass* self, const slotdef& p) {
// TODO: CPython version is significantly more sophisticated
void** ptr = slotptr(self, p.offset);
assert(ptr);
assert(ptr && "it is ok for this to be NULL (CPython handles that case) but I don't think it should happen?");
if (typeLookup(self, p.name, NULL))
*ptr = p.function;
......@@ -103,11 +123,62 @@ static void update_one_slot(BoxedClass* self, const slotdef& p) {
*ptr = NULL;
}
// Copied from CPython:
#define TPSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \
{ NAME, offsetof(PyTypeObject, SLOT), (void*)(FUNCTION), WRAPPER, PyDoc_STR(DOC), 0 }
#define FLSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC, FLAGS) \
{ NAME, offsetof(PyTypeObject, SLOT), (void*)(FUNCTION), WRAPPER, PyDoc_STR(DOC), FLAGS }
#define ETSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \
{ NAME, offsetof(PyHeapTypeObject, SLOT), (void*)(FUNCTION), WRAPPER, PyDoc_STR(DOC), 0 }
#define SQSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) ETSLOT(NAME, as_sequence.SLOT, FUNCTION, WRAPPER, DOC)
#define MPSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) ETSLOT(NAME, as_mapping.SLOT, FUNCTION, WRAPPER, DOC)
#define NBSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, DOC)
#define UNSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \
ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, "x." NAME "() <==> " DOC)
#define IBSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \
ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, "x." NAME "(y) <==> x" DOC "y")
#define BINSLOT(NAME, SLOT, FUNCTION, DOC) \
ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, "x." NAME "(y) <==> x" DOC "y")
#define RBINSLOT(NAME, SLOT, FUNCTION, DOC) \
ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_r, "x." NAME "(y) <==> y" DOC "x")
#define BINSLOTNOTINFIX(NAME, SLOT, FUNCTION, DOC) \
ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, "x." NAME "(y) <==> " DOC)
#define RBINSLOTNOTINFIX(NAME, SLOT, FUNCTION, DOC) \
ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_r, "x." NAME "(y) <==> " DOC)
static slotdef slotdefs[] = {
{ "__repr__", offsetof(PyTypeObject, tp_repr), (void*)&Py_CallPythonRepr, wrap_unaryfunc, 0 },
{ "__call__", offsetof(PyTypeObject, tp_call), (void*)&Py_CallPythonCall, (wrapperfunc)wrap_call,
PyWrapperFlag_KEYWORDS },
{ "__new__", offsetof(PyTypeObject, tp_new), (void*)&Py_CallPythonNew, NULL, 0 },
TPSLOT("__repr__", tp_repr, slot_tp_repr, wrap_unaryfunc, "x.__repr__() <==> repr(x)"),
FLSLOT("__call__", tp_call, slot_tp_call, (wrapperfunc)wrap_call, "x.__call__(...) <==> x(...)",
PyWrapperFlag_KEYWORDS),
TPSLOT("__new__", tp_new, slot_tp_new, NULL, ""),
#if 0
SQSLOT("__len__", sq_length, slot_sq_length, wrap_lenfunc, "x.__len__() <==> len(x)"),
/* Heap types defining __add__/__mul__ have sq_concat/sq_repeat == NULL.
The logic in abstract.c always falls back to nb_add/nb_multiply in
this case. Defining both the nb_* and the sq_* slots to call the
user-defined methods has unexpected side-effects, as shown by
test_descr.notimplemented() */
SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc, "x.__add__(y) <==> x+y"),
SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc, "x.__mul__(n) <==> x*n"),
SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc, "x.__rmul__(n) <==> n*x"),
SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item, "x.__getitem__(y) <==> x[y]"),
SQSLOT("__getslice__", sq_slice, slot_sq_slice, wrap_ssizessizeargfunc, "x.__getslice__(i, j) <==> x[i:j]\n\
\n\
Use of negative indices is not supported."),
SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem, "x.__setitem__(i, y) <==> x[i]=y"),
SQSLOT("__delitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_delitem, "x.__delitem__(y) <==> del x[y]"),
SQSLOT("__setslice__", sq_ass_slice, slot_sq_ass_slice, wrap_ssizessizeobjargproc,
"x.__setslice__(i, j, y) <==> x[i:j]=y\n\
\n\
Use of negative indices is not supported."),
SQSLOT("__delslice__", sq_ass_slice, slot_sq_ass_slice, wrap_delslice, "x.__delslice__(i, j) <==> del x[i:j]\n\
\n\
Use of negative indices is not supported."),
SQSLOT("__contains__", sq_contains, slot_sq_contains, wrap_objobjproc, "x.__contains__(y) <==> y in x"),
SQSLOT("__iadd__", sq_inplace_concat, NULL, wrap_binaryfunc, "x.__iadd__(y) <==> x+=y"),
SQSLOT("__imul__", sq_inplace_repeat, NULL, wrap_indexargfunc, "x.__imul__(y) <==> x*=y"),
#endif
};
static void init_slotdefs() {
......
......@@ -27,7 +27,7 @@ struct wrapper_def {
int offset;
void* function; // "generic" handler that gets put in the tp_* slot which proxies to the python version
wrapperfunc wrapper; // "wrapper" that ends up getting called by the Python-visible WrapperDescr
// exists in CPython: const char* doc
const char* doc;
int flags;
// exists in CPython: PyObject *name_strobj
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment