Commit 9f829258 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent aae0714e
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
// //
// XXX locking -> explain atMu + slaves and refer to "Locking" in wcfs.go // XXX locking -> explain atMu + slaves and refer to "Locking" in wcfs.go
// //
// Conn.atMu > Conn.filehMu > FileH.mu // Conn.atMu > Conn.filehMu > FileH.mmapMu
// //
// Several locks are RWMutex instead of just Mutex not only to allow more // Several locks are RWMutex instead of just Mutex not only to allow more
// concurrency, but, in the first place for correctness: pinner thread being // concurrency, but, in the first place for correctness: pinner thread being
...@@ -150,7 +150,7 @@ ...@@ -150,7 +150,7 @@
// //
// - wconn.atMu.R // - wconn.atMu.R
// - wconn.filehMu.R // - wconn.filehMu.R
// - fileh.mu (R:.mmaps W:.pinned) // - fileh.mmapMu (R:.mmaps W:.pinned)
// //
// - virt_lock // - virt_lock
// //
...@@ -442,12 +442,12 @@ error _Conn::__pin1(PinReq *req) { ...@@ -442,12 +442,12 @@ error _Conn::__pin1(PinReq *req) {
// Mapping.remmap_blk and Mapping.unmap under virt_lock locked. In those // Mapping.remmap_blk and Mapping.unmap under virt_lock locked. In those
// functions the order of locks is // functions the order of locks is
// //
// virt_lock, wconn.atMu.R, fileh.mu // virt_lock, wconn.atMu.R, fileh.mmapMu
// //
// So if we take virt_lock right around mmap._remmapblk(), the order of // So if we take virt_lock right around mmap._remmapblk(), the order of
// locks in pinner would be // locks in pinner would be
// //
// wconn.atMu.R, wconn.filehMu.R, fileh.mu, virt_lock // wconn.atMu.R, wconn.filehMu.R, fileh.mmapMu, virt_lock
// //
// which means there is AB-BA deadlock possibility. // which means there is AB-BA deadlock possibility.
// //
...@@ -489,9 +489,9 @@ error _Conn::__pin1(PinReq *req) { ...@@ -489,9 +489,9 @@ error _Conn::__pin1(PinReq *req) {
// our view to requested @at, and "closing" - due to other clients // our view to requested @at, and "closing" - due to other clients
// accessing wcfs/head/f simultaneously. // accessing wcfs/head/f simultaneously.
f->_mu.lock(); f->_mmapMu.lock();
defer([&]() { defer([&]() {
f->_mu.unlock(); f->_mmapMu.unlock();
}); });
for (auto mmap : f->_mmaps) { // TODO use ↑blk_start for binary search for (auto mmap : f->_mmaps) { // TODO use ↑blk_start for binary search
...@@ -635,7 +635,7 @@ error _Conn::resync(zodb::Tid at) { ...@@ -635,7 +635,7 @@ error _Conn::resync(zodb::Tid at) {
return E(fmt::errorf("wcfs bug: head/file size %% blksize != 0")); return E(fmt::errorf("wcfs bug: head/file size %% blksize != 0"));
// replace zero regions in f mappings in accordance to adjusted f._headfsize. // replace zero regions in f mappings in accordance to adjusted f._headfsize.
// NOTE it is ok to access f._mmaps without locking f._mu because we hold wconn.atMu.W // NOTE it is ok to access f._mmaps without locking f._mmapMu because we hold wconn.atMu.W
for (auto mmap : f->_mmaps) { for (auto mmap : f->_mmaps) {
//printf(" resync -> %s: unzero [%lu:%lu)", v(at), f->_headfsize/f->blksize, headfsize/f->blksize); //printf(" resync -> %s: unzero [%lu:%lu)", v(at), f->_headfsize/f->blksize, headfsize/f->blksize);
uint8_t *mem_unzero_start = min(mmap->mem_stop, uint8_t *mem_unzero_start = min(mmap->mem_stop,
...@@ -789,7 +789,7 @@ retry: ...@@ -789,7 +789,7 @@ retry:
}); });
// do the actuall open. // do the actuall open.
// we hold only wconn.atMu.R, but niether wconn.filehMu, nor f.mu . // we hold only wconn.atMu.R, but niether wconn.filehMu, nor f.mmapMu .
f->_openErr = f->_open(); f->_openErr = f->_open();
if (f->_openErr != nil) if (f->_openErr != nil)
return make_pair(nil, E(f->_openErr)); return make_pair(nil, E(f->_openErr));
...@@ -815,7 +815,7 @@ retry: ...@@ -815,7 +815,7 @@ retry:
// Called with: // Called with:
// - wconn.atMu held // - wconn.atMu held
// - wconn.filehMu not locked // - wconn.filehMu not locked
// - f.mu not locked // - f.mmapMu not locked
error _FileH::_open() { error _FileH::_open() {
_FileH* f = this; _FileH* f = this;
Conn wconn = f->wconn; Conn wconn = f->wconn;
...@@ -843,7 +843,7 @@ error _FileH::_open() { ...@@ -843,7 +843,7 @@ error _FileH::_open() {
v(f->_headf->name()), f->_headfsize, f->blksize); v(f->_headf->name()), f->_headfsize, f->blksize);
// start watching f // start watching f
// NOTE we are _not_ holding wconn.filehMu nor f.mu - only wconn.atMu to rely on wconn.at being stable. // NOTE we are _not_ holding wconn.filehMu nor f.mmapMu - only wconn.atMu to rely on wconn.at being stable.
// NOTE wcfs will reply "ok" only after wcfs/head/at ≥ wconn.at // NOTE wcfs will reply "ok" only after wcfs/head/at ≥ wconn.at
string ack; string ack;
tie(ack, err) = wconn->_wlink->sendReq(context::background(), tie(ack, err) = wconn->_wlink->sendReq(context::background(),
...@@ -941,9 +941,9 @@ error _FileH::_closeLocked(bool force) { ...@@ -941,9 +941,9 @@ error _FileH::_closeLocked(bool force) {
reterr1(fileh._headf->close()); reterr1(fileh._headf->close());
// change all fileh.mmaps to cause EFAULT on any access after fileh.close // change all fileh.mmaps to cause EFAULT on any access after fileh.close
fileh._mu.lock(); fileh._mmapMu.lock();
defer([&]() { defer([&]() {
fileh._mu.unlock(); fileh._mmapMu.unlock();
}); });
for (auto mmap : fileh._mmaps) { for (auto mmap : fileh._mmaps) {
...@@ -969,9 +969,9 @@ pair<Mapping, error> _FileH::mmap(int64_t blk_start, int64_t blk_len, VMA *vma) ...@@ -969,9 +969,9 @@ pair<Mapping, error> _FileH::mmap(int64_t blk_start, int64_t blk_len, VMA *vma)
// NOTE virtmem lock is held by virtmem caller // NOTE virtmem lock is held by virtmem caller
// XXX locking ok? // XXX locking ok?
f.wconn->_atMu.RLock(); // e.g. f._headfsize f.wconn->_atMu.RLock(); // e.g. f._headfsize
f._mu.lock(); // f._pinned, f._mmaps f._mmapMu.lock(); // f._pinned, f._mmaps
defer([&]() { defer([&]() {
f._mu.unlock(); f._mmapMu.unlock();
f.wconn->_atMu.RUnlock(); f.wconn->_atMu.RUnlock();
}); });
...@@ -1066,9 +1066,9 @@ error _Mapping::unmap() { ...@@ -1066,9 +1066,9 @@ error _Mapping::unmap() {
// NOTE virtmem lock is held by virtmem caller // NOTE virtmem lock is held by virtmem caller
f->wconn->_atMu.RLock(); f->wconn->_atMu.RLock();
f->_mu.lock(); // f._mmaps f->_mmapMu.lock();
defer([&]() { defer([&]() {
f->_mu.unlock(); f->_mmapMu.unlock();
f->wconn->_atMu.RUnlock(); f->wconn->_atMu.RUnlock();
}); });
...@@ -1091,7 +1091,7 @@ error _Mapping::unmap() { ...@@ -1091,7 +1091,7 @@ error _Mapping::unmap() {
// XXX clear other fields? // XXX clear other fields?
// XXX do it first? (to avoid pinner going through f.mmaps and hitting unmapped memory) // XXX do it first? (to avoid pinner going through f.mmaps and hitting unmapped memory)
// -> no need: both pinner and unmap lock on f.mu // -> no need: both pinner and unmap lock on f.mmapMu
//f->_mmaps.remove(mmap); //f->_mmaps.remove(mmap);
f->_mmaps.erase( f->_mmaps.erase(
std::remove(f->_mmaps.begin(), f->_mmaps.end(), mmap), std::remove(f->_mmaps.begin(), f->_mmaps.end(), mmap),
...@@ -1107,7 +1107,7 @@ error _Mapping::unmap() { ...@@ -1107,7 +1107,7 @@ error _Mapping::unmap() {
// //
// The following locks must be held by caller: // The following locks must be held by caller:
// - f.wconn.atMu // - f.wconn.atMu
// - f._mu XXX not needed? (f._mmaps and f._pinned are not used) // - f._mmapMu XXX not needed? (f._mmaps and f._pinned are not used)
error _Mapping::_remmapblk(int64_t blk, zodb::Tid at) { error _Mapping::_remmapblk(int64_t blk, zodb::Tid at) {
// XXX must not be called after Mapping is switched to efault? // XXX must not be called after Mapping is switched to efault?
...@@ -1172,9 +1172,9 @@ error _Mapping::remmap_blk(int64_t blk) { ...@@ -1172,9 +1172,9 @@ error _Mapping::remmap_blk(int64_t blk) {
// NOTE virtmem lock is held by virtmem caller // NOTE virtmem lock is held by virtmem caller
f->wconn->_atMu.RLock(); f->wconn->_atMu.RLock();
f->_mu.lock(); f->_mmapMu.lock();
defer([&]() { defer([&]() {
f->_mu.unlock(); f->_mmapMu.unlock();
f->wconn->_atMu.RUnlock(); f->wconn->_atMu.RUnlock();
}); });
......
...@@ -246,7 +246,7 @@ struct _FileH : object { ...@@ -246,7 +246,7 @@ struct _FileH : object {
// protected by .wconn._atMu // protected by .wconn._atMu
off_t _headfsize; off_t _headfsize;
sync::Mutex _mu; // atMu.W | atMu.R + _mu XXX -> mmapMu ? sync::Mutex _mmapMu; // atMu.W | atMu.R + _mmapMu
dict<int64_t, zodb::Tid> _pinned; // {} blk -> rev that wcfs already sent us for this file dict<int64_t, zodb::Tid> _pinned; // {} blk -> rev that wcfs already sent us for this file
vector<Mapping> _mmaps; // []Mapping ↑blk_start mappings of this file vector<Mapping> _mmaps; // []Mapping ↑blk_start mappings of this file
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment