Commit 877bab37 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent badfddf3
...@@ -50,7 +50,7 @@ static PyObject *pybuf_str; ...@@ -50,7 +50,7 @@ static PyObject *pybuf_str;
/* whether to pass old buffer instead of memoryview to .loadblk() / .storeblk() /* whether to pass old buffer instead of memoryview to .loadblk() / .storeblk()
* *
* on python2 < 2.7.10 memoreview object is not accepted in a lot of * on python2 < 2.7.10 memoryview object is not accepted in a lot of
* places, see e.g. http://bugs.python.org/issue22113 for struct.pack_into() * places, see e.g. http://bugs.python.org/issue22113 for struct.pack_into()
* *
* also on python 2.7.10, even latest numpy does not accept memoryview as * also on python 2.7.10, even latest numpy does not accept memoryview as
...@@ -171,7 +171,7 @@ void XPyObject_PrintReferrers(PyObject *obj, FILE *fp); ...@@ -171,7 +171,7 @@ void XPyObject_PrintReferrers(PyObject *obj, FILE *fp);
static int XPyFrame_IsCalleeOf(PyFrameObject *f, PyFrameObject *top); static int XPyFrame_IsCalleeOf(PyFrameObject *f, PyFrameObject *top);
/* buffer utilities: unpin buffer from its memory - make it zero-length /* buffer utilities: unpin buffer from its memory - make it zero-length
* pointing to NULL but staying a vailid python object */ * pointing to NULL but staying a valid python object */
#if PY_MAJOR_VERSION < 3 #if PY_MAJOR_VERSION < 3
void XPyBufferObject_Unpin(PyBufferObject *bufo); void XPyBufferObject_Unpin(PyBufferObject *bufo);
#endif #endif
...@@ -348,7 +348,7 @@ static /*const*/ PyMethodDef pyvma_methods[] = { ...@@ -348,7 +348,7 @@ static /*const*/ PyMethodDef pyvma_methods[] = {
{NULL} {NULL}
}; };
// XXX vvv better switch on various possibilities and find approptiate type // XXX vvv better switch on various possibilities and find appropriate type
// (e.g. on X32 uintptr_t will be 4 while long will be 8) // (e.g. on X32 uintptr_t will be 4 while long will be 8)
const int _ = const int _ =
BUILD_ASSERT_OR_ZERO(sizeof(uintptr_t) == sizeof(unsigned long)); BUILD_ASSERT_OR_ZERO(sizeof(uintptr_t) == sizeof(unsigned long));
...@@ -458,7 +458,7 @@ PyFunc(pyfileh_isdirty, "isdirty() - are there any changes to fileh memory at al ...@@ -458,7 +458,7 @@ PyFunc(pyfileh_isdirty, "isdirty() - are there any changes to fileh memory at al
if (!PyArg_ParseTuple(args, "")) if (!PyArg_ParseTuple(args, ""))
return NULL; return NULL;
/* NOTE not strictly neccessary to virt_lock() for checking ->dirty_pages not empty */ /* NOTE not strictly necessary to virt_lock() for checking ->dirty_pages not empty */
return PyBool_FromLong(!list_empty(&pyfileh->dirty_pages)); return PyBool_FromLong(!list_empty(&pyfileh->dirty_pages));
} }
...@@ -570,14 +570,14 @@ static int pybigfile_loadblk(BigFile *file, blk_t blk, void *buf) ...@@ -570,14 +570,14 @@ static int pybigfile_loadblk(BigFile *file, blk_t blk, void *buf)
* as the result - _we_ are the thread which holds the GIL and can call * as the result - _we_ are the thread which holds the GIL and can call
* python capi. */ * python capi. */
// XXX assert PyGILState_GetThisThreadState() != NULL // XXX assert PyGILState_GetThisThreadState() != NULL
// (i.e. pyton already knows this thread?) // (i.e. python already knows this thread?)
gstate = PyGILState_Ensure(); gstate = PyGILState_Ensure();
/* TODO write why we setup completly new thread state which looks like /* TODO write why we setup completely new thread state which looks like
* switching threads for python but stays at the same OS thread * switching threads for python but stays at the same OS thread
* *
* a) do not change current thread state in any way; * a) do not change current thread state in any way;
* b) to completly clear ts after loadblk (ex. for pybuf->refcnf to go to exactly 1) * b) to completely clear ts after loadblk (ex. for pybuf->refcnf to go to exactly 1)
*/ */
/* in python thread state - save what we'll possibly override /* in python thread state - save what we'll possibly override
...@@ -690,13 +690,13 @@ out: ...@@ -690,13 +690,13 @@ out:
* come here with gc.collecting=1 * come here with gc.collecting=1
* *
* NOTE also: while collecting garbage even more garbage can be * NOTE also: while collecting garbage even more garbage can be
* created due to arbitrary code run from undel __del__ of released * created due to arbitrary code run from under __del__ of released
* objects and weakref callbacks. This way after here GC collect * objects and weakref callbacks. This way after here GC collect
* even a single allocation could trigger GC, and thus arbitrary * even a single allocation could trigger GC, and thus arbitrary
* python code run, again */ * python code run, again */
PyGC_Collect(); PyGC_Collect();
/* garbage collection could result in running arbitraty code /* garbage collection could result in running arbitrary code
* because of finalizers. Print problems (if any) and make sure * because of finalizers. Print problems (if any) and make sure
* once again exception state is clear */ * once again exception state is clear */
if (PyErr_Occurred()) if (PyErr_Occurred())
......
...@@ -59,7 +59,7 @@ static int __ram_reclaim(RAM *ram); ...@@ -59,7 +59,7 @@ static int __ram_reclaim(RAM *ram);
/* global lock which protects manipulating virtmem data structures /* global lock which protects manipulating virtmem data structures
* *
* NOTE not scalable, but this is temporary solution - as we are going to move * NOTE not scalable, but this is temporary solution - as we are going to move
* memory managment back into the kernel, where it is done properly. */ * memory management back into the kernel, where it is done properly. */
static pthread_mutex_t virtmem_lock = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; static pthread_mutex_t virtmem_lock = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
static const VirtGilHooks *virtmem_gilhooks; static const VirtGilHooks *virtmem_gilhooks;
...@@ -243,7 +243,7 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen) ...@@ -243,7 +243,7 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen)
if (!vma->page_ismappedv) if (!vma->page_ismappedv)
goto fail; goto fail;
// XXX hardcoded - allow user choise? // XXX hardcoded - allow user choice?
vma->mmap_overlay = (fops->mmap_setup_read != NULL); vma->mmap_overlay = (fops->mmap_setup_read != NULL);
if (vma->mmap_overlay) { if (vma->mmap_overlay) {
...@@ -790,7 +790,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write) ...@@ -790,7 +790,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
/* (5b) page is currently being loaded by another thread - wait for load to complete /* (5b) page is currently being loaded by another thread - wait for load to complete
* *
* NOTE a page is protected from being concurently loaded by two threads at * NOTE a page is protected from being concurrently loaded by two threads at
* the same time via: * the same time via:
* *
* - virtmem lock - we get/put pages from fileh->pagemap only under it * - virtmem lock - we get/put pages from fileh->pagemap only under it
...@@ -805,7 +805,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write) ...@@ -805,7 +805,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
void *gilstate; void *gilstate;
virt_unlock(); virt_unlock();
gilstate = virt_gil_ensure_unlocked(); gilstate = virt_gil_ensure_unlocked();
usleep(10000); // XXX with 1000 uslepp still busywaits usleep(10000); // XXX with 1000 usleep still busywaits
virt_gil_retake_if_waslocked(gilstate); virt_gil_retake_if_waslocked(gilstate);
virt_lock(); virt_lock();
return VM_RETRY; return VM_RETRY;
...@@ -940,7 +940,7 @@ void page_decref(Page *page) ...@@ -940,7 +940,7 @@ void page_decref(Page *page)
void *page_mmap(Page *page, void *addr, int prot) void *page_mmap(Page *page, void *addr, int prot)
{ {
RAMH *ramh = page->ramh; RAMH *ramh = page->ramh;
// XXX better call ramh_mmap_page() without tinkering wih ramh_ops? // XXX better call ramh_mmap_page() without tinkering with ramh_ops?
return ramh->ramh_ops->mmap_page(ramh, page->ramh_pgoffset, addr, prot); return ramh->ramh_ops->mmap_page(ramh, page->ramh_pgoffset, addr, prot);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment