Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
W
wendelin.core
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
Kirill Smelkov
wendelin.core
Commits
f447d0d1
Commit
f447d0d1
authored
Oct 08, 2018
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
f6e10c9d
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
29 additions
and
26 deletions
+29
-26
wcfs/wcfs.go
wcfs/wcfs.go
+29
-26
No files found.
wcfs/wcfs.go
View file @
f447d0d1
...
@@ -253,12 +253,12 @@ package main
...
@@ -253,12 +253,12 @@ package main
import
(
import
(
"context"
"context"
"flag"
"flag"
"fmt"
stdlog
"log"
"log"
"os"
"os"
"sync"
"sync"
"syscall"
"syscall"
log
"github.com/golang/glog"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/errgroup"
"lab.nexedi.com/kirr/go123/xcontext"
"lab.nexedi.com/kirr/go123/xcontext"
...
@@ -321,7 +321,7 @@ type BigFileData struct {
...
@@ -321,7 +321,7 @@ type BigFileData struct {
bigfile
*
BigFile
bigfile
*
BigFile
// inflight loadings of ZBigFile from ZODB.
// inflight loadings of ZBigFile from ZODB.
// successfull load results are kept here until blkdata is put to OS pagecache.
// successfull load results are kept here until blkdata is put
in
to OS pagecache.
loadMu
sync
.
Mutex
loadMu
sync
.
Mutex
loading
map
[
int64
]
*
blkLoadState
// #blk -> {... blkdata}
loading
map
[
int64
]
*
blkLoadState
// #blk -> {... blkdata}
}
}
...
@@ -347,7 +347,7 @@ type blkLoadState struct {
...
@@ -347,7 +347,7 @@ type blkLoadState struct {
func
(
bfroot
*
BigFileRoot
)
Mkdir
(
name
string
,
mode
uint32
,
fctx
*
fuse
.
Context
)
(
_
*
nodefs
.
Inode
,
status
fuse
.
Status
)
{
func
(
bfroot
*
BigFileRoot
)
Mkdir
(
name
string
,
mode
uint32
,
fctx
*
fuse
.
Context
)
(
_
*
nodefs
.
Inode
,
status
fuse
.
Status
)
{
oid
,
err
:=
zodb
.
ParseOid
(
name
)
oid
,
err
:=
zodb
.
ParseOid
(
name
)
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Print
f
(
"/bigfile: mkdir %q: not-oid"
,
name
)
log
.
Warning
f
(
"/bigfile: mkdir %q: not-oid"
,
name
)
return
nil
,
fuse
.
EINVAL
return
nil
,
fuse
.
EINVAL
}
}
...
@@ -382,7 +382,7 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
...
@@ -382,7 +382,7 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
zdb
:=
zodb
.
NewDB
(
bfroot
.
zstor
)
zdb
:=
zodb
.
NewDB
(
bfroot
.
zstor
)
zconn
,
err
:=
zdb
.
Open
(
ctx
,
&
zodb
.
ConnOptions
{})
// XXX .NoSync=true ?
zconn
,
err
:=
zdb
.
Open
(
ctx
,
&
zodb
.
ConnOptions
{})
// XXX .NoSync=true ?
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Print
f
(
"/bigfile: mkdir %q: %s"
,
name
,
err
)
log
.
Error
f
(
"/bigfile: mkdir %q: %s"
,
name
,
err
)
return
nil
,
fuse
.
EIO
return
nil
,
fuse
.
EIO
}
}
...
@@ -394,21 +394,21 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
...
@@ -394,21 +394,21 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
case
*
zodb
.
NoDataError
:
case
*
zodb
.
NoDataError
:
return
nil
,
fuse
.
EINVAL
// XXX what to do if it was existing and got deleted?
return
nil
,
fuse
.
EINVAL
// XXX what to do if it was existing and got deleted?
default
:
default
:
log
.
Print
f
(
"/bigfile: mkdir %q: %s"
,
name
,
err
)
log
.
Error
f
(
"/bigfile: mkdir %q: %s"
,
name
,
err
)
return
nil
,
fuse
.
EIO
return
nil
,
fuse
.
EIO
}
}
}
}
zbf
,
ok
:=
xzbf
.
(
*
ZBigFile
)
zbf
,
ok
:=
xzbf
.
(
*
ZBigFile
)
if
!
ok
{
if
!
ok
{
log
.
Print
f
(
"/bigfile: mkdir %q: %s is not a ZBigFile"
,
name
,
typeOf
(
xzbf
))
log
.
Warning
f
(
"/bigfile: mkdir %q: %s is not a ZBigFile"
,
name
,
typeOf
(
xzbf
))
return
nil
,
fuse
.
EINVAL
return
nil
,
fuse
.
EINVAL
}
}
// acticate ZBigFile and keep it this way
// acticate ZBigFile and keep it this way
err
=
zbf
.
PActivate
(
ctx
)
err
=
zbf
.
PActivate
(
ctx
)
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Print
f
(
"/bigfile: mkdir %q: %s"
,
name
,
err
)
log
.
Error
f
(
"/bigfile: mkdir %q: %s"
,
name
,
err
)
return
nil
,
fuse
.
EIO
return
nil
,
fuse
.
EIO
}
}
defer
func
()
{
defer
func
()
{
...
@@ -495,12 +495,13 @@ func (bfdata *BigFileData) Read(_ nodefs.File, dest []byte, off int64, fctx *fus
...
@@ -495,12 +495,13 @@ func (bfdata *BigFileData) Read(_ nodefs.File, dest []byte, off int64, fctx *fus
defer
cancel
()
defer
cancel
()
// widen read request to be aligned with blksize granularity
// widen read request to be aligned with blksize granularity
// (we can load only whole ZBlk* blocks)
end
:=
off
+
int64
(
len
(
dest
))
// XXX overflow?
end
:=
off
+
int64
(
len
(
dest
))
// XXX overflow?
aoff
:=
off
-
(
off
%
zbf
.
blksize
)
aoff
:=
off
-
(
off
%
zbf
.
blksize
)
aend
:=
end
+
(
zbf
.
blksize
-
(
end
%
zbf
.
blksize
))
aend
:=
end
+
(
zbf
.
blksize
-
(
end
%
zbf
.
blksize
))
dest
=
make
([]
byte
,
aend
-
aoff
)
dest
=
make
([]
byte
,
aend
-
aoff
)
// load all block(s) in parallel
//
read/
load all block(s) in parallel
wg
,
ctx
:=
errgroup
.
WithContext
(
ctx
)
wg
,
ctx
:=
errgroup
.
WithContext
(
ctx
)
for
blkoff
:=
aoff
;
blkoff
<
aend
;
blkoff
+=
zbf
.
blksize
{
for
blkoff
:=
aoff
;
blkoff
<
aend
;
blkoff
+=
zbf
.
blksize
{
blkoff
:=
blkoff
blkoff
:=
blkoff
...
@@ -512,7 +513,7 @@ func (bfdata *BigFileData) Read(_ nodefs.File, dest []byte, off int64, fctx *fus
...
@@ -512,7 +513,7 @@ func (bfdata *BigFileData) Read(_ nodefs.File, dest []byte, off int64, fctx *fus
err
:=
wg
.
Wait
()
err
:=
wg
.
Wait
()
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Print
f
(
"%s"
,
err
)
// XXX + /bigfile/XXX: read [a,b): -> ...
log
.
Error
f
(
"%s"
,
err
)
// XXX + /bigfile/XXX: read [a,b): -> ...
return
nil
,
fuse
.
EIO
return
nil
,
fuse
.
EIO
}
}
...
@@ -571,14 +572,14 @@ func (bfdata *BigFileData) readBlk(ctx context.Context, blk int64, dest []byte)
...
@@ -571,14 +572,14 @@ func (bfdata *BigFileData) readBlk(ctx context.Context, blk int64, dest []byte)
// store to kernel pagecache whole block that we've just loaded from database.
// store to kernel pagecache whole block that we've just loaded from database.
// This way, even if the user currently requested to read only small portion from it,
// This way, even if the user currently requested to read only small portion from it,
// it will prevent next e.g. consecutive user read request to again hit
// it will prevent next e.g. consecutive user read request to again hit
// the DB, and instead will be served by kernel from its cache.
// the DB, and instead will be served by kernel from its
page
cache.
//
//
// We cannot do this directly from reading goroutine - while reading
// We cannot do this directly from reading goroutine - while reading
// kernel FUSE is holding corresponging page in pagecache locked, and if
// kernel FUSE is holding corresponging page in pagecache locked, and if
// we would try to update that same page in
the
cache it would result
// we would try to update that same page in
page
cache it would result
// in deadlock inside kernel.
// in deadlock inside kernel.
//
//
// .loading cleanup is done once we are finished with putting the data into OS cache.
// .loading cleanup is done once we are finished with putting the data into OS
page
cache.
// If we do it earlier - a simultaneous read covered by the same block could result
// If we do it earlier - a simultaneous read covered by the same block could result
// into missing both kernel pagecache (if not yet updated) and empty .loading[blk],
// into missing both kernel pagecache (if not yet updated) and empty .loading[blk],
// and thus would trigger DB access again.
// and thus would trigger DB access again.
...
@@ -592,14 +593,15 @@ func (bfdata *BigFileData) readBlk(ctx context.Context, blk int64, dest []byte)
...
@@ -592,14 +593,15 @@ func (bfdata *BigFileData) readBlk(ctx context.Context, blk int64, dest []byte)
delete
(
bfdata
.
loading
,
blk
)
delete
(
bfdata
.
loading
,
blk
)
bfdata
.
loadMu
.
Unlock
()
bfdata
.
loadMu
.
Unlock
()
// XXX where to report error (-> log)
if
st
==
fuse
.
OK
{
return
// EINVAL | ENOENT -> bug
// ENOMEN - kernel is already under memory pressure - we must not keep here
if
st
!=
fuse
.
OK
{
return
fmt
.
Errorf
(
"bigfile %s: blk %d: -> pagecache: %s"
,
zbf
.
POid
(),
blk
,
st
)
}
}
// pagecache update failed, but it must not (we verified on startup that
// pagecache control is supported by kernel). We can correctly live on
// with the error, but data access will be likely very slow. Tell user
// about the problem.
log
.
Errorf
(
"BUG: bigfile %s: blk %d: -> pagecache: %s (ignoring, reading from bigfile will be very slow)"
,
zbf
.
POid
(),
blk
,
st
)
}()
}()
return
nil
return
nil
...
@@ -621,8 +623,8 @@ func (bf *BigFile) readAt() []byte {
...
@@ -621,8 +623,8 @@ func (bf *BigFile) readAt() []byte {
// LOBTree/LOBucket from live cache. We want to keep LOBTree/LOBucket always alive
// LOBTree/LOBucket from live cache. We want to keep LOBTree/LOBucket always alive
// becuse it is essentially the index where to find ZBigFile data.
// becuse it is essentially the index where to find ZBigFile data.
//
//
// For the data itself - we put it to kernel
cache and always deactivate from
// For the data itself - we put it to kernel
pagecache and always deactivate
// ZODB right after that.
//
from
ZODB right after that.
//
//
// TODO set it to Connection.CacheControl
// TODO set it to Connection.CacheControl
type
zodbCacheControl
struct
{}
type
zodbCacheControl
struct
{}
...
@@ -641,8 +643,6 @@ func (cc *zodbCacheControl) WantEvict(obj zodb.IPersistent) bool {
...
@@ -641,8 +643,6 @@ func (cc *zodbCacheControl) WantEvict(obj zodb.IPersistent) bool {
return
false
return
false
}
}
// XXX option to prevent starting if wcfs was already started ?
// FIXME gfsconn is tmp workaround for lack of way to retrieve FileSystemConnector from nodefs.Inode
// FIXME gfsconn is tmp workaround for lack of way to retrieve FileSystemConnector from nodefs.Inode
// TODO:
// TODO:
// - Inode += .Mount() -> nodefs.Mount
// - Inode += .Mount() -> nodefs.Mount
...
@@ -652,10 +652,14 @@ func (cc *zodbCacheControl) WantEvict(obj zodb.IPersistent) bool {
...
@@ -652,10 +652,14 @@ func (cc *zodbCacheControl) WantEvict(obj zodb.IPersistent) bool {
var
gfsconn
*
nodefs
.
FileSystemConnector
var
gfsconn
*
nodefs
.
FileSystemConnector
func
main
()
{
func
main
()
{
log
.
SetPrefix
(
"wcfs: "
)
stdlog
.
SetPrefix
(
"wcfs: "
)
log
.
CopyStandardLogTo
(
"WARNING"
)
// XXX -> "DEBUG" if -d ?
defer
log
.
Flush
()
debug
:=
flag
.
Bool
(
"d"
,
false
,
"debug"
)
debug
:=
flag
.
Bool
(
"d"
,
false
,
"debug"
)
autoexit
:=
flag
.
Bool
(
"autoexit"
,
false
,
"automatically stop service when there is no client activity"
)
autoexit
:=
flag
.
Bool
(
"autoexit"
,
false
,
"automatically stop service when there is no client activity"
)
// XXX option to prevent starting if wcfs was already started ?
flag
.
Parse
()
flag
.
Parse
()
if
len
(
flag
.
Args
())
!=
2
{
if
len
(
flag
.
Args
())
!=
2
{
log
.
Fatalf
(
"Usage: %s [OPTIONS] zurl mntpt"
,
os
.
Args
[
0
])
log
.
Fatalf
(
"Usage: %s [OPTIONS] zurl mntpt"
,
os
.
Args
[
0
])
...
@@ -671,7 +675,6 @@ func main() {
...
@@ -671,7 +675,6 @@ func main() {
}
}
defer
zstor
.
Close
()
defer
zstor
.
Close
()
// mount root
// mount root
opts
:=
&
fuse
.
MountOptions
{
opts
:=
&
fuse
.
MountOptions
{
FsName
:
zurl
,
FsName
:
zurl
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment