Commit 7c1ee4cc authored by Kevin Modzelewski's avatar Kevin Modzelewski Committed by GitHub

Merge pull request #1334 from kmod/threading

Threading fixes to get cffi-1.7 working
parents e9ced458 9b46190c
......@@ -20,12 +20,13 @@ typedef struct _is {
struct _is *next;
struct _ts *tstate_head;
PyObject *modules;
PyObject *builtins;
// Pyston change
// Note: any changes here need to show up in PyInterpreterState_Clear as well
#if 0
PyObject *modules;
PyObject *sysdict;
PyObject *builtins;
PyObject *modules_reloading;
PyObject *codec_search_path;
......@@ -136,10 +137,7 @@ PyAPI_FUNC(int) PyThreadState_SetAsyncExc(long, PyObject *) PYSTON_NOEXCEPT;
/* Variable and macro for in-line access to current thread state */
// Pyston change: use our internal name for this
//PyAPI_DATA(PyThreadState *) _PyThreadState_Current;
PyAPI_DATA(__thread PyThreadState) cur_thread_state;
#define _PyThreadState_Current (&cur_thread_state)
PyAPI_DATA(PyThreadState *) _PyThreadState_Current;
#ifdef Py_DEBUG
#define PyThreadState_GET() PyThreadState_Get()
......
......@@ -3164,6 +3164,17 @@ imp_load_dynamic(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "ss|O!:load_dynamic", &name, &pathname,
&PyFile_Type, &fob))
return NULL;
// Pyston change: warn if it doesn't look like a pyston-built so.
// Some libraries (cffi) want to load things with names that don't even end
// in ".so", so allow those through.
int len = strlen(name);
if (len >= 3 && strcmp(&name[len - 3], ".so") == 0 &&
(len < 10 || strcmp(&name[len - 10], ".pyston.so") != 0)) {
PyErr_Format(PyExc_ValueError, "Pyston refusing to load a non-pyston .so \"%.200s\"", name);
return NULL;
}
if (fob) {
fp = get_file(pathname, fob, "r");
if (fp == NULL)
......
......@@ -1664,11 +1664,11 @@ Value ASTInterpreter::visit_dict(AST_Dict* node) {
}
Value ASTInterpreter::visit_set(AST_Set* node) {
BoxedSet* set = (BoxedSet*)createSet();
try {
// insert the elements in reverse like cpython does
// important for {1, 1L}
llvm::SmallVector<RewriterVar*, 8> items;
BoxedSet* set = (BoxedSet*)createSet();
for (auto it = node->elts.rbegin(), it_end = node->elts.rend(); it != it_end; ++it) {
Value v = visit_expr(*it);
_setAddStolen(set, v.o);
......@@ -1676,7 +1676,8 @@ Value ASTInterpreter::visit_set(AST_Set* node) {
}
return Value(set, jit ? jit->emitCreateSet(items) : NULL);
} catch (ExcInfo e) {
RELEASE_ASSERT(0, "this leaks in case of an exception");
Py_DECREF(set);
throw e;
}
}
......
......@@ -37,8 +37,6 @@
namespace pyston {
DS_DEFINE_RWLOCK(codegen_rwlock);
FunctionMetadata::FunctionMetadata(int num_args, bool takes_varargs, bool takes_kwargs,
std::unique_ptr<SourceInfo> source)
: code_obj(NULL),
......
......@@ -90,8 +90,6 @@ extern GlobalState g;
// in runtime_hooks.cpp:
void initGlobalFuncs(GlobalState& g);
DS_DECLARE_RWLOCK(codegen_rwlock);
}
#endif
......@@ -341,30 +341,26 @@ CompiledFunction* compileFunction(FunctionMetadata* f, FunctionSpecialization* s
void compileAndRunModule(AST_Module* m, BoxedModule* bm) {
FunctionMetadata* md;
{ // scope for limiting the locked region:
LOCK_REGION(codegen_rwlock.asWrite());
Timer _t("for compileModule()");
Timer _t("for compileModule()");
const char* fn = PyModule_GetFilename(bm);
RELEASE_ASSERT(fn, "");
const char* fn = PyModule_GetFilename(bm);
RELEASE_ASSERT(fn, "");
FutureFlags future_flags = getFutureFlags(m->body, fn);
ScopingAnalysis* scoping = new ScopingAnalysis(m, true);
FutureFlags future_flags = getFutureFlags(m->body, fn);
ScopingAnalysis* scoping = new ScopingAnalysis(m, true);
auto fn_str = boxString(fn);
AUTO_DECREF(fn_str);
std::unique_ptr<SourceInfo> si(new SourceInfo(bm, scoping, future_flags, m, fn_str));
auto fn_str = boxString(fn);
AUTO_DECREF(fn_str);
std::unique_ptr<SourceInfo> si(new SourceInfo(bm, scoping, future_flags, m, fn_str));
static BoxedString* doc_str = getStaticString("__doc__");
bm->setattr(doc_str, autoDecref(si->getDocString()), NULL);
static BoxedString* doc_str = getStaticString("__doc__");
bm->setattr(doc_str, autoDecref(si->getDocString()), NULL);
static BoxedString* builtins_str = getStaticString("__builtins__");
if (!bm->hasattr(builtins_str))
bm->setattr(builtins_str, PyModule_GetDict(builtins_module), NULL);
static BoxedString* builtins_str = getStaticString("__builtins__");
if (!bm->hasattr(builtins_str))
bm->setattr(builtins_str, PyModule_GetDict(builtins_module), NULL);
md = new FunctionMetadata(0, false, false, std::move(si));
}
md = new FunctionMetadata(0, false, false, std::move(si));
UNAVOIDABLE_STAT_TIMER(t0, "us_timer_interpreted_module_toplevel");
Box* r = astInterpretFunction(md, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
......@@ -395,8 +391,6 @@ Box* evalOrExec(FunctionMetadata* md, Box* globals, Box* boxedLocals) {
static FunctionMetadata* compileForEvalOrExec(AST* source, llvm::ArrayRef<AST_stmt*> body, BoxedString* fn,
PyCompilerFlags* flags) {
LOCK_REGION(codegen_rwlock.asWrite());
Timer _t("for evalOrExec()");
ScopingAnalysis* scoping = new ScopingAnalysis(source, false);
......@@ -655,8 +649,6 @@ void exec(Box* boxedCode, Box* globals, Box* locals, FutureFlags caller_future_f
// TODO we should have logic like this at the CLFunc level that detects that we keep
// on creating functions with failing speculations, and then stop speculating.
void CompiledFunction::speculationFailed() {
LOCK_REGION(codegen_rwlock.asWrite());
this->times_speculation_failed++;
if (this->times_speculation_failed == 4) {
......@@ -735,8 +727,6 @@ ConcreteCompilerType* CompiledFunction::getReturnType() {
/// The cf must be an active version in its parents FunctionMetadata; the given
/// version will be replaced by the new version, which will be returned.
static CompiledFunction* _doReopt(CompiledFunction* cf, EffortLevel new_effort) {
LOCK_REGION(codegen_rwlock.asWrite());
assert(cf->md->versions.size());
assert(cf);
......@@ -773,8 +763,6 @@ static CompiledFunction* _doReopt(CompiledFunction* cf, EffortLevel new_effort)
static StatCounter stat_osrexits("num_osr_exits");
static StatCounter stat_osr_compiles("num_osr_compiles");
CompiledFunction* compilePartialFuncInternal(OSRExit* exit) {
LOCK_REGION(codegen_rwlock.asWrite());
assert(exit);
stat_osrexits.log();
......
......@@ -33,9 +33,15 @@
#include "runtime/objmodel.h" // _printStacktrace
#include "runtime/types.h"
extern "C" {
PyThreadState* _PyThreadState_Current;
}
namespace pyston {
namespace threading {
void _acquireGIL();
void _releaseGIL();
#ifdef WITH_THREAD
#include "pythread.h"
......@@ -56,15 +62,10 @@ static PyThread_type_lock head_mutex = NULL; /* Protects interp->tstate_head */
#define HEAD_UNLOCK() /* Nothing */
#endif
PyInterpreterState interpreter_state;
static PyInterpreterState interpreter_state;
std::unordered_set<PerThreadSetBase*> PerThreadSetBase::all_instances;
extern "C" {
__thread PyThreadState cur_thread_state = { NULL, &interpreter_state, NULL, 0, 1, NULL, NULL, NULL, NULL, 0,
NULL }; // not sure if we need to explicitly request zero-initialization
}
PthreadFastMutex threading_lock;
// Certain thread examination functions won't be valid for a brief
......@@ -75,50 +76,49 @@ PthreadFastMutex threading_lock;
// be checked while the threading_lock is held; might not be worth it.
int num_starting_threads(0);
// TODO: this is a holdover from our GC days, and now there's pretty much nothing left here
// and it should just get refactored out.
class ThreadStateInternal {
private:
bool saved;
ucontext_t ucontext;
bool holds_gil = false;
public:
void* stack_start;
pthread_t pthread_id;
PyThreadState* public_thread_state;
ThreadStateInternal(void* stack_start, pthread_t pthread_id, PyThreadState* tstate)
: saved(false), stack_start(stack_start), pthread_id(pthread_id), public_thread_state(tstate) {
ThreadStateInternal(pthread_t pthread_id, PyThreadState* tstate)
: pthread_id(pthread_id), public_thread_state(tstate) {
HEAD_LOCK();
tstate->next = interpreter_state.tstate_head;
interpreter_state.tstate_head = tstate;
HEAD_UNLOCK();
}
void saveCurrent() {
assert(!saved);
getcontext(&ucontext);
saved = true;
bool holdsGil() {
assert(pthread_self() == this->pthread_id);
return holds_gil;
}
void popCurrent() {
assert(saved);
saved = false;
}
void gilTaken() {
assert(pthread_self() == this->pthread_id);
bool isValid() { return saved; }
assert(!_PyThreadState_Current);
_PyThreadState_Current = public_thread_state;
// This is a quick and dirty way to determine if the current thread holds the gil:
// the only way it can't (at least for now) is if it had saved its threadstate.
// This only works when looking at a thread that is not actively acquiring or releasing
// the GIL, so for now just guard on it only being called for the current thread.
// TODO It's pretty brittle to reuse the saved flag like this.
bool holdsGil() {
assert(pthread_self() == this->pthread_id);
return !saved;
assert(!holds_gil);
holds_gil = true;
}
ucontext_t* getContext() { return &ucontext; }
void gilReleased() {
assert(pthread_self() == this->pthread_id);
assert(_PyThreadState_Current == public_thread_state);
_PyThreadState_Current = NULL;
assert(holds_gil);
holds_gil = false;
}
};
static std::unordered_map<pthread_t, ThreadStateInternal*> current_threads;
static __thread ThreadStateInternal* current_internal_thread_state = 0;
......@@ -137,24 +137,7 @@ static void registerThread(bool is_starting_thread) {
LOCK_REGION(&threading_lock);
pthread_attr_t thread_attrs;
int code = pthread_getattr_np(current_thread, &thread_attrs);
if (code)
err(1, NULL);
void* stack_start;
size_t stack_size;
code = pthread_attr_getstack(&thread_attrs, &stack_start, &stack_size);
RELEASE_ASSERT(code == 0, "");
pthread_attr_destroy(&thread_attrs);
#if STACK_GROWS_DOWN
void* stack_bottom = static_cast<char*>(stack_start) + stack_size;
#else
void* stack_bottom = stack_start;
#endif
current_internal_thread_state = new ThreadStateInternal(stack_bottom, current_thread, &cur_thread_state);
current_internal_thread_state = new ThreadStateInternal(current_thread, &cur_thread_state);
current_threads[current_thread] = current_internal_thread_state;
if (is_starting_thread)
......@@ -201,6 +184,7 @@ static void tstate_delete_common(PyThreadState* tstate) {
static void unregisterThread() {
tstate_delete_common(current_internal_thread_state->public_thread_state);
PyThreadState_Clear(current_internal_thread_state->public_thread_state);
assert(current_internal_thread_state->holdsGil());
{
pthread_t current_thread = pthread_self();
......@@ -210,6 +194,10 @@ static void unregisterThread() {
if (VERBOSITY() >= 2)
printf("thread tid=%ld exited\n", current_thread);
}
current_internal_thread_state->gilReleased();
_releaseGIL();
delete current_internal_thread_state;
current_internal_thread_state = 0;
}
......@@ -221,7 +209,7 @@ extern "C" PyGILState_STATE PyGILState_Ensure(void) noexcept {
if (current_internal_thread_state == NULL)
Py_FatalError("Couldn't create thread-state for new thread");
acquireGLRead();
endAllowThreads();
return PyGILState_UNLOCKED;
} else {
++cur_thread_state.gilstate_counter;
......@@ -241,20 +229,21 @@ extern "C" void PyGILState_Release(PyGILState_STATE oldstate) noexcept {
--cur_thread_state.gilstate_counter;
RELEASE_ASSERT(cur_thread_state.gilstate_counter >= 0, "");
if (oldstate == PyGILState_UNLOCKED) {
beginAllowThreads();
}
if (cur_thread_state.gilstate_counter == 0) {
assert(oldstate == PyGILState_UNLOCKED);
RELEASE_ASSERT(0, "this is currently untested");
// Pyston change:
unregisterThread();
return;
}
if (oldstate == PyGILState_UNLOCKED) {
beginAllowThreads();
}
}
extern "C" PyThreadState* PyGILState_GetThisThreadState(void) noexcept {
Py_FatalError("unimplemented");
return &cur_thread_state;
}
struct ThreadStartArgs {
......@@ -271,8 +260,7 @@ static void* _thread_start(void* _arg) {
delete arg;
registerThread(true);
threading::GLReadRegion _glock;
endAllowThreads();
assert(!PyErr_Occurred());
void* rtn = start_func(arg1, arg2, arg3);
......@@ -308,50 +296,6 @@ intptr_t start_thread(void* (*start_func)(Box*, Box*, Box*), Box* arg1, Box* arg
return thread_id;
}
// from https://www.sourceware.org/ml/guile/2000-07/msg00214.html
static void* find_stack() {
FILE* input;
char* line;
char* s;
size_t len;
char hex[9];
void* start;
void* end;
int dummy;
input = fopen("/proc/self/maps", "r");
if (input == NULL)
return NULL;
len = 0;
line = NULL;
while (getline(&line, &len, input) != -1) {
s = strchr(line, '-');
if (s == NULL)
return NULL;
*s++ = '\0';
start = (void*)strtoul(line, NULL, 16);
end = (void*)strtoul(s, NULL, 16);
if ((void*)&dummy >= start && (void*)&dummy <= end) {
free(line);
fclose(input);
#if STACK_GROWS_DOWN
return end;
#else
return start;
#endif
}
}
free(line);
fclose(input);
return NULL; /* not found =^P */
}
static long main_thread_id;
void registerMainThread() {
......@@ -363,8 +307,10 @@ void registerMainThread() {
assert(!interpreter_state.tstate_head);
assert(!current_internal_thread_state);
current_internal_thread_state = new ThreadStateInternal(find_stack(), pthread_self(), &cur_thread_state);
current_internal_thread_state = new ThreadStateInternal(pthread_self(), &cur_thread_state);
current_threads[pthread_self()] = current_internal_thread_state;
endAllowThreads();
}
/* Wait until threading._shutdown completes, provided
......@@ -408,35 +354,19 @@ bool isMainThread() {
// It also means that you're not allowed to do that much inside an AllowThreads region...
// TODO maybe we should let the client decide which way to handle it
extern "C" void beginAllowThreads() noexcept {
// I don't think it matters whether the GL release happens before or after the state
// saving; do it before, then, to reduce the amount we hold the GL:
releaseGLRead();
{
LOCK_REGION(&threading_lock);
assert(current_internal_thread_state);
current_internal_thread_state->gilReleased();
assert(current_internal_thread_state);
current_internal_thread_state->saveCurrent();
}
_releaseGIL();
}
extern "C" void endAllowThreads() noexcept {
{
LOCK_REGION(&threading_lock);
assert(current_internal_thread_state);
current_internal_thread_state->popCurrent();
}
_acquireGIL();
acquireGLRead();
assert(current_internal_thread_state);
current_internal_thread_state->gilTaken();
}
#if THREADING_USE_GIL
#if THREADING_USE_GRWL
#error "Can't turn on both the GIL and the GRWL!"
#endif
static pthread_mutex_t gil = PTHREAD_MUTEX_INITIALIZER;
std::atomic<int> threads_waiting_on_gil(0);
......@@ -494,7 +424,7 @@ extern "C" void PyEval_ReInitThreads() noexcept {
Py_DECREF(threading);
}
void acquireGLWrite() {
void _acquireGIL() {
threads_waiting_on_gil++;
pthread_mutex_lock(&gil);
threads_waiting_on_gil--;
......@@ -502,7 +432,7 @@ void acquireGLWrite() {
pthread_cond_signal(&gil_acquired);
}
void releaseGLWrite() {
void _releaseGIL() {
pthread_mutex_unlock(&gil);
}
......@@ -523,92 +453,16 @@ void _allowGLReadPreemption() {
if (!threads_waiting_on_gil.load(std::memory_order_seq_cst))
return;
current_internal_thread_state->gilReleased();
threads_waiting_on_gil++;
pthread_cond_wait(&gil_acquired, &gil);
threads_waiting_on_gil--;
pthread_cond_signal(&gil_acquired);
}
#elif THREADING_USE_GRWL
static pthread_rwlock_t grwl = PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP;
enum class GRWLHeldState {
N,
R,
W,
};
static __thread GRWLHeldState grwl_state = GRWLHeldState::N;
static std::atomic<int> writers_waiting(0);
void acquireGLRead() {
assert(grwl_state == GRWLHeldState::N);
pthread_rwlock_rdlock(&grwl);
grwl_state = GRWLHeldState::R;
}
void releaseGLRead() {
assert(grwl_state == GRWLHeldState::R);
pthread_rwlock_unlock(&grwl);
grwl_state = GRWLHeldState::N;
}
void acquireGLWrite() {
assert(grwl_state == GRWLHeldState::N);
writers_waiting++;
pthread_rwlock_wrlock(&grwl);
writers_waiting--;
grwl_state = GRWLHeldState::W;
}
void releaseGLWrite() {
assert(grwl_state == GRWLHeldState::W);
pthread_rwlock_unlock(&grwl);
grwl_state = GRWLHeldState::N;
}
void promoteGL() {
Timer _t2("promoting", /*min_usec=*/10000);
// Note: this is *not* the same semantics as normal promoting, on purpose.
releaseGLRead();
acquireGLWrite();
long promote_us = _t2.end();
static thread_local StatPerThreadCounter sc_promoting_us("grwl_promoting_us");
sc_promoting_us.log(promote_us);
current_internal_thread_state->gilTaken();
}
void demoteGL() {
releaseGLWrite();
acquireGLRead();
}
static __thread int gl_check_count = 0;
void allowGLReadPreemption() {
assert(grwl_state == GRWLHeldState::R);
// gl_check_count++;
// if (gl_check_count < 10)
// return;
// gl_check_count = 0;
if (__builtin_expect(!writers_waiting.load(std::memory_order_relaxed), 1))
return;
Timer _t2("preempted", /*min_usec=*/10000);
pthread_rwlock_unlock(&grwl);
// The GRWL is a writer-prefered rwlock, so this next statement will block even
// if the lock is in read mode:
pthread_rwlock_rdlock(&grwl);
long preempt_us = _t2.end();
static thread_local StatPerThreadCounter sc_preempting_us("grwl_preempt_us");
sc_preempting_us.log(preempt_us);
}
#endif
// We don't support CPython's TLS (yet?)
extern "C" void PyThread_ReInitTLS(void) noexcept {
// don't have to do anything since we don't support TLS
......@@ -655,10 +509,14 @@ extern "C" void PyInterpreterState_Clear(PyInterpreterState* interp) noexcept {
// Py_CLEAR(interp->codec_search_path);
// Py_CLEAR(interp->codec_search_cache);
// Py_CLEAR(interp->codec_error_registry);
// Py_CLEAR(interp->modules);
Py_CLEAR(interp->modules);
// Py_CLEAR(interp->modules_reloading);
// Py_CLEAR(interp->sysdict);
// Py_CLEAR(interp->builtins);
Py_CLEAR(interp->builtins);
}
extern "C" void PyThreadState_DeleteCurrent() noexcept {
Py_FatalError("unimplemented");
}
extern "C" void PyThreadState_Clear(PyThreadState* tstate) noexcept {
......@@ -684,5 +542,38 @@ extern "C" PyThreadState* PyThreadState_Next(PyThreadState* tstate) noexcept {
}
extern "C" void PyEval_AcquireThread(PyThreadState* tstate) noexcept {
RELEASE_ASSERT(tstate == &cur_thread_state, "");
endAllowThreads();
}
extern "C" void PyEval_ReleaseThread(PyThreadState* tstate) noexcept {
RELEASE_ASSERT(tstate == &cur_thread_state, "");
beginAllowThreads();
}
extern "C" PyThreadState* PyThreadState_Get(void) noexcept {
if (_PyThreadState_Current == NULL)
Py_FatalError("PyThreadState_Get: no current thread");
return _PyThreadState_Current;
}
extern "C" PyThreadState* PyEval_SaveThread(void) noexcept {
auto rtn = PyThreadState_GET();
assert(rtn);
beginAllowThreads();
return rtn;
}
extern "C" void PyEval_RestoreThread(PyThreadState* tstate) noexcept {
RELEASE_ASSERT(tstate == &cur_thread_state, "");
endAllowThreads();
}
} // namespace threading
__thread PyThreadState cur_thread_state
= { NULL, &threading::interpreter_state, NULL, 0, 1, NULL, NULL, NULL, NULL, 0, NULL };
} // namespace pyston
......@@ -21,6 +21,8 @@
#include <ucontext.h>
#include <vector>
#include "Python.h"
#include "core/common.h"
#include "core/thread_utils.h"
......@@ -33,6 +35,8 @@ extern int sigprof_pending;
void _printStacktrace();
#endif
extern __thread PyThreadState cur_thread_state;
namespace threading {
// Whether or not a second thread was ever started:
......@@ -47,35 +51,10 @@ void finishMainThread();
bool isMainThread();
#ifndef THREADING_USE_GIL
#define THREADING_USE_GIL 1
#define THREADING_USE_GRWL 0
#endif
#define THREADING_SAFE_DATASTRUCTURES THREADING_USE_GRWL
#if THREADING_SAFE_DATASTRUCTURES
#define DS_DEFINE_MUTEX(name) pyston::threading::PthreadFastMutex name
#define DS_DECLARE_RWLOCK(name) extern pyston::threading::PthreadRWLock name
#define DS_DEFINE_RWLOCK(name) pyston::threading::PthreadRWLock name
#define DS_DEFINE_SPINLOCK(name) pyston::threading::PthreadSpinLock name
#else
#define DS_DEFINE_MUTEX(name) pyston::threading::NopLock name
#define DS_DECLARE_RWLOCK(name) extern pyston::threading::NopLock name
#define DS_DEFINE_RWLOCK(name) pyston::threading::NopLock name
#define DS_DEFINE_SPINLOCK(name) pyston::threading::NopLock name
#endif
void acquireGLRead();
void releaseGLRead();
void acquireGLWrite();
void releaseGLWrite();
void _allowGLReadPreemption();
#define GIL_CHECK_INTERVAL 1000
// Note: this doesn't need to be an atomic, since it should
// only be accessed by the thread that holds the gil:
extern int gil_check_count;
......@@ -104,26 +83,8 @@ extern "C" inline void allowGLReadPreemption() {
_allowGLReadPreemption();
}
// Note: promoteGL is free to drop the lock and then reacquire
void promoteGL();
void demoteGL();
// Helper macro for creating a RAII wrapper around two functions.
#define MAKE_REGION(name, start, end) \
class name { \
public: \
name() { start(); } \
~name() { end(); } \
};
MAKE_REGION(GLReadRegion, acquireGLRead, releaseGLRead);
MAKE_REGION(GLPromoteRegion, promoteGL, demoteGL);
// MAKE_REGION(GLReadReleaseRegion, releaseGLRead, acquireGLRead);
// MAKE_REGION(GLWriteReleaseRegion, releaseGLWrite, acquireGLWrite);
#undef MAKE_REGION
extern "C" void beginAllowThreads() noexcept;
extern "C" void endAllowThreads() noexcept;
......@@ -134,37 +95,6 @@ public:
};
#if THREADING_USE_GIL
inline void acquireGLRead() {
acquireGLWrite();
}
inline void releaseGLRead() {
releaseGLWrite();
}
inline void promoteGL() {
}
inline void demoteGL() {
}
#endif
#if !THREADING_USE_GIL && !THREADING_USE_GRWL
inline void acquireGLRead() {
}
inline void releaseGLRead() {
}
inline void acquireGLWrite() {
}
inline void releaseGLWrite() {
}
inline void promoteGL() {
}
inline void demoteGL() {
}
extern "C" inline void allowGLReadPreemption() __attribute__((visibility("default")));
extern "C" inline void allowGLReadPreemption() {
}
#endif
extern bool forgot_refs_via_fork;
} // namespace threading
......
......@@ -369,7 +369,6 @@ static int main(int argc, char** argv) noexcept {
const char* fn = NULL;
threading::registerMainThread();
threading::acquireGLRead();
Py_SetProgramName(argv[0]);
......@@ -540,11 +539,6 @@ static int main(int argc, char** argv) noexcept {
threading::finishMainThread();
// Acquire the GIL to make sure we stop the other threads, since we will tear down
// data structures they are potentially running on.
// Note: we will purposefully not release the GIL on exiting.
threading::promoteGL();
_t.split("Py_Finalize");
Py_Finalize();
......
......@@ -2376,6 +2376,7 @@ void setupBuiltins() {
builtins_module = createModule(autoDecref(boxString("__builtin__")), NULL,
"Built-in functions, exceptions, and other objects.\n\nNoteworthy: None is "
"the `nil' object; Ellipsis represents `...' in slices.");
PyThreadState_GET()->interp->builtins = incref(builtins_module->getAttrWrapper());
ellipsis_cls
= BoxedClass::create(type_cls, object_cls, 0, 0, sizeof(Box), false, "ellipsis", false, NULL, NULL, false);
......
......@@ -795,6 +795,8 @@ static int _check_and_flush(FILE* stream) {
void setupSys() {
sys_modules_dict = new BoxedDict();
PyThreadState_GET()->interp->modules = incref(sys_modules_dict);
constants.push_back(sys_modules_dict);
// This is ok to call here because we've already created the sys_modules_dict
......
......@@ -1483,31 +1483,6 @@ extern "C" void PyEval_InitThreads(void) noexcept {
// nothing to do here
}
extern "C" void PyEval_AcquireThread(PyThreadState* tstate) noexcept {
Py_FatalError("Unimplemented");
}
extern "C" void PyEval_ReleaseThread(PyThreadState* tstate) noexcept {
Py_FatalError("Unimplemented");
}
extern "C" PyThreadState* PyThreadState_Get(void) noexcept {
if (_PyThreadState_Current == NULL)
Py_FatalError("PyThreadState_Get: no current thread");
return _PyThreadState_Current;
}
extern "C" PyThreadState* PyEval_SaveThread(void) noexcept {
beginAllowThreads();
return PyThreadState_GET();
}
extern "C" void PyEval_RestoreThread(PyThreadState* tstate) noexcept {
RELEASE_ASSERT(tstate == PyThreadState_GET(), "");
endAllowThreads();
}
extern "C" BORROWED(struct _frame*) PyEval_GetFrame(void) noexcept {
Box* frame = NULL;
try {
......
......@@ -139,8 +139,9 @@ extern "C" Box* import(int level, Box* from_imports, llvm::StringRef module_name
BoxedModule* importCExtension(BoxedString* full_name, const std::string& last_name, const std::string& path) {
void* handle = dlopen(path.c_str(), RTLD_NOW);
if (!handle) {
const char* s = dlerror();
// raiseExcHelper(ImportError, "%s", dlerror());
fprintf(stderr, "%s\n", dlerror());
fprintf(stderr, "%s\n", s);
exit(1);
}
assert(handle);
......
......@@ -3967,8 +3967,6 @@ static StatCounter slowpath_pickversion("slowpath_pickversion");
template <ExceptionStyle S>
static CompiledFunction* pickVersion(FunctionMetadata* f, int num_output_args, Box* oarg1, Box* oarg2, Box* oarg3,
Box** oargs) {
LOCK_REGION(codegen_rwlock.asWrite());
// if always_use_version is set use it even if the exception style does not match.
// But prefer using the correct style if both are available
if (f->always_use_version.get(S))
......
# Note: the expected counts here are set to match the CI, and I can't reproduce them locally
import os, sys, subprocess, shutil
sys.path.append(os.path.dirname(__file__) + "/../lib")
from test_helper import create_virtenv, run_test
ENV_NAME = "cffi17_test_env_" + os.path.basename(sys.executable)
SRC_DIR = os.path.abspath(os.path.join(ENV_NAME, "src"))
PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
PYTEST_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "py.test"))
def install_and_test_cffi():
shutil.rmtree(SRC_DIR, ignore_errors=True)
os.makedirs(SRC_DIR)
url = "https://pypi.python.org/packages/83/3c/00b553fd05ae32f27b3637f705c413c4ce71290aa9b4c4764df694e906d9/cffi-1.7.0.tar.gz#md5=34122a545060cee58bab88feab57006d"
subprocess.check_call(["wget", url], cwd=SRC_DIR)
subprocess.check_call(["tar", "-zxf", "cffi-1.7.0.tar.gz"], cwd=SRC_DIR)
CFFI_DIR = os.path.abspath(os.path.join(SRC_DIR, "cffi-1.7.0"))
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=CFFI_DIR)
# looks like clang 3.5 causes more errors like: 214 != -42 doing casts
if os.environ.has_key("CC") and "clang" in os.environ["CC"]:
expected = [{'xfailed': 4, 'failed': 3, 'skipped': 10, 'passed': 539}]
else:
expected = [{'xfailed': 4, 'failed': 2, 'skipped': 10, 'passed': 540}]
# Ideally we would run all the tests, but they take ~10min to run.
# dir_to_test = "."
# I just picked a subdirectory; I don't really know what it's testing.
dir_to_test = os.path.join(CFFI_DIR, "testing", "cffi1")
run_test([PYTEST_EXE, dir_to_test], cwd=CFFI_DIR, expected=expected)
create_virtenv(ENV_NAME, ["pytest==2.8.7", "py==1.4.31", "pycparser==2.14"], force_create = True)
install_and_test_cffi()
# Note: the expected counts here are set to match the CI, and I can't re[rpduce them locally
# Note: the expected counts here are set to match the CI, and I can't reproduce them locally
import os, sys, subprocess, shutil
sys.path.append(os.path.dirname(__file__) + "/../lib")
......@@ -29,6 +29,6 @@ def install_and_test_cffi():
else:
expected = [{ "failed": 11, "passed": 1668, "skipped": 73, "xfailed": 4}]
run_test([PYTEST_EXE], cwd=CFFI_DIR, expected=expected)
create_virtenv(ENV_NAME, ["pytest==2.8.7", "py==1.4.31", "pycparser==2.14"], force_create = True)
install_and_test_cffi()
......@@ -66,6 +66,8 @@ def run_test(cmd, cwd, expected, env = None):
print
print "Return code:", errcode
assert errcode in (0, 1), "\n\n%s\nTest process crashed" % output
if expected == result:
print "Received expected output"
else:
......
......@@ -540,3 +540,4 @@ False
set([1L]) set([1]) set([1]) set([1L])
set([1])
4
unhashable type: 'dict'
......@@ -274,3 +274,8 @@ from api_test import set_size
s = set([1, 2, 3, 4])
print(set_size(s))
try:
{{}}
except Exception as e:
print e
......@@ -25,12 +25,10 @@ namespace pyston {
class PystonTestEnvironment : public testing::Environment {
void SetUp() override {
threading::registerMainThread();
threading::acquireGLRead();
}
void TearDown() override {
threading::releaseGLRead();
// threading::releaseGLRead();
}
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment