Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Jobs
Commits
Open sidebar
Kirill Smelkov
neo
Commits
517db5de
Commit
517db5de
authored
Jul 24, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
ab494bf9
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
52 additions
and
43 deletions
+52
-43
go/zodb/storage/fs1/filestorage.go
go/zodb/storage/fs1/filestorage.go
+42
-32
go/zodb/storage/fs1/fs1tools/reindex.go
go/zodb/storage/fs1/fs1tools/reindex.go
+7
-4
go/zodb/storage/fs1/fs1tools/tail_test.go
go/zodb/storage/fs1/fs1tools/tail_test.go
+2
-2
go/zodb/storage/fs1/fs1tools/testdata/1.fstail.ok
go/zodb/storage/fs1/fs1tools/testdata/1.fstail.ok
+0
-0
go/zodb/storage/fs1/index.go
go/zodb/storage/fs1/index.go
+1
-5
No files found.
go/zodb/storage/fs1/filestorage.go
View file @
517db5de
...
...
@@ -19,8 +19,8 @@
// Package fs1 provides so-called FileStorage v1 ZODB storage.
//
// FileStorage is a single file organized as a
append-only log of transactions
// with data changes. Every transaction record consists of:
// FileStorage is a single file organized as a
simple append-only log of
//
transactions
with data changes. Every transaction record consists of:
//
// - transaction record header represented by TxnHeader,
// - several data records corresponding to modified objects,
...
...
@@ -279,7 +279,7 @@ const (
// LenPrev == 0 prev record length could not be read
// LenPrev == -1 EOF backward
// LenPrev >= TxnHeaderFixSize LenPrev was read/checked normally
func
(
txnh
*
TxnHeader
)
Load
(
r
io
.
ReaderAt
/* *os.File */
,
pos
int64
,
flags
TxnLoadFlags
)
error
{
func
(
txnh
*
TxnHeader
)
Load
(
r
io
.
ReaderAt
,
pos
int64
,
flags
TxnLoadFlags
)
error
{
if
cap
(
txnh
.
workMem
)
<
txnXHeaderFixSize
{
txnh
.
workMem
=
make
([]
byte
,
txnXHeaderFixSize
,
256
/* to later avoid allocation for typical strings */
)
}
...
...
@@ -383,7 +383,7 @@ func (txnh *TxnHeader) Load(r io.ReaderAt /* *os.File */, pos int64, flags TxnLo
}
// loadStrings makes sure strings that are part of transaction header are loaded
func
(
txnh
*
TxnHeader
)
loadStrings
(
r
io
.
ReaderAt
/* *os.File */
)
error
{
func
(
txnh
*
TxnHeader
)
loadStrings
(
r
io
.
ReaderAt
)
error
{
// XXX make it no-op if strings are already loaded?
// we rely on Load leaving len(workMem) = sum of all strings length ...
...
...
@@ -437,7 +437,7 @@ func (txnh *TxnHeader) LoadPrev(r io.ReaderAt, flags TxnLoadFlags) error {
return
nil
}
// LoadNext reads and decodes next transaction record header
// LoadNext reads and decodes next transaction record header
.
// prerequisite: txnh .Pos and .Len should be already initialized by: XXX also .Tid
// - previous successful call to Load() initially XXX ^^^
// - TODO
...
...
@@ -492,7 +492,7 @@ func (dh *DataHeader) Len() int64 {
// Load reads and decodes data record header.
// pos: points to data header start
// no prerequisite requirements are made to previous dh state
func
(
dh
*
DataHeader
)
Load
(
r
io
.
ReaderAt
/* *os.File */
,
pos
int64
)
error
{
func
(
dh
*
DataHeader
)
Load
(
r
io
.
ReaderAt
,
pos
int64
)
error
{
dh
.
Pos
=
pos
// XXX .Len = 0 = read error ?
...
...
@@ -548,7 +548,7 @@ func (dh *DataHeader) Load(r io.ReaderAt /* *os.File */, pos int64) error {
// prerequisite: dh .Oid .Tid .PrevRevPos are initialized:
// - TODO describe how
// when there is no previous revision: io.EOF is returned
func
(
dh
*
DataHeader
)
LoadPrevRev
(
r
io
.
ReaderAt
/* *os.File */
)
error
{
func
(
dh
*
DataHeader
)
LoadPrevRev
(
r
io
.
ReaderAt
)
error
{
if
dh
.
PrevRevPos
==
0
{
return
io
.
EOF
// no more previous revisions
}
...
...
@@ -563,7 +563,7 @@ func (dh *DataHeader) LoadPrevRev(r io.ReaderAt /* *os.File */) error {
return
err
}
func
(
dh
*
DataHeader
)
loadPrevRev
(
r
io
.
ReaderAt
/* *os.File */
)
error
{
func
(
dh
*
DataHeader
)
loadPrevRev
(
r
io
.
ReaderAt
)
error
{
oid
:=
dh
.
Oid
tid
:=
dh
.
Tid
...
...
@@ -588,7 +588,7 @@ func (dh *DataHeader) loadPrevRev(r io.ReaderAt /* *os.File */) error {
// LoadBack reads and decodes data header for revision linked via back-pointer.
// prerequisite: dh XXX .DataLen == 0
// if link is to zero (means deleted record) io.EOF is returned
func
(
dh
*
DataHeader
)
LoadBack
(
r
io
.
ReaderAt
/* *os.File */
)
error
{
func
(
dh
*
DataHeader
)
LoadBack
(
r
io
.
ReaderAt
)
error
{
if
dh
.
DataLen
!=
0
{
bug
(
dh
,
"LoadBack() on non-backpointer data header"
)
}
...
...
@@ -638,7 +638,7 @@ func (dh *DataHeader) LoadBack(r io.ReaderAt /* *os.File */) error {
// LoadNext reads and decodes data header for next data record in the same transaction.
// prerequisite: dh .Pos .DataLen are initialized
// when there is no more data records: io.EOF is returned
func
(
dh
*
DataHeader
)
LoadNext
(
r
io
.
ReaderAt
/* *os.File */
,
txnh
*
TxnHeader
)
error
{
func
(
dh
*
DataHeader
)
LoadNext
(
r
io
.
ReaderAt
,
txnh
*
TxnHeader
)
error
{
err
:=
dh
.
loadNext
(
r
,
txnh
)
if
err
!=
nil
&&
err
!=
io
.
EOF
{
err
=
txnh
.
err
(
"iterating"
,
err
)
...
...
@@ -646,7 +646,7 @@ func (dh *DataHeader) LoadNext(r io.ReaderAt /* *os.File */, txnh *TxnHeader) er
return
err
}
func
(
dh
*
DataHeader
)
loadNext
(
r
io
.
ReaderAt
/* *os.File */
,
txnh
*
TxnHeader
)
error
{
func
(
dh
*
DataHeader
)
loadNext
(
r
io
.
ReaderAt
,
txnh
*
TxnHeader
)
error
{
// position of txn tail - right after last data record byte
txnTailPos
:=
txnh
.
Pos
+
txnh
.
Len
-
8
...
...
@@ -680,11 +680,11 @@ func (dh *DataHeader) loadNext(r io.ReaderAt /* *os.File */, txnh *TxnHeader) er
}
// LoadData loads data for the data record taking backpointers into account.
// Data is loaded into *buf, which, if needed, is reallocated to hold
all loading data size XXX
// Data is loaded into *buf, which, if needed, is reallocated to hold
whole loading data size.
// NOTE on success dh state is changed to data header of original data transaction
// NOTE "deleted" records are indicated via returning *buf=nil
// TODO buf -> slab
func
(
dh
*
DataHeader
)
LoadData
(
r
io
.
ReaderAt
/* *os.File */
,
buf
*
[]
byte
)
error
{
func
(
dh
*
DataHeader
)
LoadData
(
r
io
.
ReaderAt
,
buf
*
[]
byte
)
error
{
// scan via backpointers
for
dh
.
DataLen
==
0
{
err
:=
dh
.
LoadBack
(
r
)
...
...
@@ -707,6 +707,12 @@ func (dh *DataHeader) LoadData(r io.ReaderAt /* *os.File */, buf *[]byte) error
return
nil
}
// --- FileStorage ---
func
(
fs
*
FileStorage
)
StorageName
()
string
{
return
"FileStorage v1"
}
// open opens FileStorage without loading index
func
open
(
path
string
)
(
*
FileStorage
,
error
)
{
fs
:=
&
FileStorage
{}
...
...
@@ -797,6 +803,16 @@ func Open(ctx context.Context, path string) (*FileStorage, error) {
return
fs
,
nil
}
func
(
fs
*
FileStorage
)
Close
()
error
{
// TODO dump index
err
:=
fs
.
file
.
Close
()
if
err
!=
nil
{
return
err
}
fs
.
file
=
nil
return
nil
}
func
(
fs
*
FileStorage
)
LastTid
()
(
zodb
.
Tid
,
error
)
{
// XXX check we have transactions at all
...
...
@@ -876,22 +892,8 @@ func (fs *FileStorage) Load(xid zodb.Xid) (data []byte, tid zodb.Tid, err error)
return
data
,
tid
,
nil
}
func
(
fs
*
FileStorage
)
Close
()
error
{
// TODO dump index
err
:=
fs
.
file
.
Close
()
if
err
!=
nil
{
return
err
}
fs
.
file
=
nil
return
nil
}
func
(
fs
*
FileStorage
)
StorageName
()
string
{
return
"FileStorage v1"
}
// iteration
// zodb.IStorage iteration
type
iterFlags
int
const
(
...
...
@@ -1090,7 +1092,6 @@ func (fs *FileStorage) Iterate(tidMin, tidMax zodb.Tid) zodb.IStorageIterator {
func
(
fs
*
FileStorage
)
computeIndex
(
ctx
context
.
Context
)
(
index
*
Index
,
err
error
)
{
// TODO err ctx <file>: <reindex>:
// XXX handle ctx cancel
index
=
IndexNew
()
index
.
TopPos
=
txnValidFrom
...
...
@@ -1099,12 +1100,18 @@ func (fs *FileStorage) computeIndex(ctx context.Context) (index *Index, err erro
fsSeq
:=
xbufio
.
NewSeqReaderAt
(
fs
.
file
)
// pre-setup txnh so that txnh.LoadNext starts loading from the beginning of file
//txnh := &TxnHeader{Pos: 0, Len: index.TopPos, TxnInfo: zodb.TxnInfo{Tid: 0}}
txnh
:=
&
TxnHeader
{
Pos
:
index
.
TopPos
,
Len
:
-
2
}
// XXX -2
dh
:=
&
DataHeader
{}
loop
:
for
{
// check ctx cancel once per transaction
select
{
case
<-
ctx
.
Done
()
:
return
nil
,
ctx
.
Err
()
default
:
}
// XXX merge logic into LoadNext/LoadPrev
switch
txnh
.
Len
{
case
-
2
:
...
...
@@ -1118,6 +1125,8 @@ loop:
break
}
// XXX check txnh.Status != TxnInprogress
index
.
TopPos
=
txnh
.
Pos
+
txnh
.
Len
// first data iteration will go to first data record
...
...
@@ -1160,13 +1169,14 @@ func (fs *FileStorage) loadIndex() error {
}
fs
.
index
=
index
return
nil
}
// saveIndex flushes in-RAM index to disk
func
(
fs
*
FileStorage
)
saveIndex
()
error
{
// XXX lock?
err
=
fs
.
index
.
SaveFile
(
fs
.
file
.
Name
()
+
".index"
)
err
:
=
fs
.
index
.
SaveFile
(
fs
.
file
.
Name
()
+
".index"
)
return
err
}
...
...
@@ -1195,7 +1205,7 @@ func (fs *FileStorage) VerifyIndex(ctx context.Context) error {
}
if
!
indexOk
.
Equal
(
fs
.
index
)
{
err
=
&
ErrIndexCorrupt
{
index
:
fs
.
index
,
indexOk
:
indexOk
}
err
=
&
IndexCorruptError
{
index
:
fs
.
index
,
indexOk
:
indexOk
}
}
return
err
...
...
go/zodb/storage/fs1/fs1tools/reindex.go
View file @
517db5de
...
...
@@ -20,36 +20,39 @@
package
fs1tools
import
(
"context"
"flag"
"fmt"
"io"
"os"
"log"
"lab.nexedi.com/kirr/neo/go/zodb/storage/fs1"
)
// Reindex rebuilds index for FileStorage file @ path
func
Reindex
(
path
string
)
error
{
// XXX open read-only
fs
,
err
:=
fs1
.
Open
(
contex
.
Background
(),
path
,
fs1
.
OpenWithoutIndex
)
fs
,
err
:=
fs1
.
Open
(
contex
t
.
Background
(),
path
)
// XXX
, fs1.OpenWithoutIndex)
if
err
!=
nil
{
return
nil
// XXX err ctx
}
defer
fs
.
Close
()
// XXX err
err
=
fs
.
Reindex
()
err
=
fs
.
Reindex
(
nil
)
return
err
// XXX ok?
}
// VerifyIndexFor verifies that on-disk index for FileStorage file @ path is correct
func
VerifyIndexFor
(
path
string
)
error
{
// XXX open read-only
fs
,
err
:=
fs1
.
Open
(
contex
.
Background
(),
path
,
0
)
fs
,
err
:=
fs1
.
Open
(
contex
t
.
Background
(),
path
)
// XXX
, 0)
if
err
!=
nil
{
return
nil
// XXX err ctx
}
defer
fs
.
Close
()
// XXX err
err
=
fs
.
VerifyIndex
()
err
=
fs
.
VerifyIndex
(
nil
)
return
err
//fs.Index()
//fs.ComputeIndex
...
...
go/zodb/storage/fs1/fs1tools/tail_test.go
View file @
517db5de
...
...
@@ -19,7 +19,7 @@
package
fs1tools
//go:generate sh -c "python2 -m ZODB.scripts.fstail -n 1000000 ../
../testdata/1.fs >testdata/1.fsdump
.ok"
//go:generate sh -c "python2 -m ZODB.scripts.fstail -n 1000000 ../
testdata/1.fs >testdata/1.fstail
.ok"
import
(
"bytes"
...
...
@@ -54,7 +54,7 @@ func TestTail(t *testing.T) {
t
.
Fatal
(
err
)
}
dumpOk
:=
loadFile
(
t
,
"testdata/1.fs
dump
.ok"
)
dumpOk
:=
loadFile
(
t
,
"testdata/1.fs
tail
.ok"
)
if
dumpOk
!=
buf
.
String
()
{
t
.
Errorf
(
"dump different:
\n
%v"
,
diff
(
dumpOk
,
buf
.
String
()))
...
...
go/zodb/storage/fs1/fs1tools/testdata/1.fs
dump
.ok
→
go/zodb/storage/fs1/fs1tools/testdata/1.fs
tail
.ok
View file @
517db5de
File moved
go/zodb/storage/fs1/index.go
View file @
517db5de
...
...
@@ -17,9 +17,6 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// XXX partly based on code from ZODB ?
// TODO link to format in zodb/py
package
fs1
// FileStorage v1. Index
...
...
@@ -75,7 +72,6 @@ func IndexNew() *Index {
// fsBucket:
// oid[6:8]oid[6:8]oid[6:8]...pos[2:8]pos[2:8]pos[2:8]...
const
(
oidPrefixMask
zodb
.
Oid
=
(
1
<<
64
-
1
)
^
(
1
<<
16
-
1
)
// 0xffffffffffff0000
posInvalidMask
uint64
=
(
1
<<
64
-
1
)
^
(
1
<<
48
-
1
)
// 0xffff000000000000
...
...
@@ -362,7 +358,7 @@ func (a *Index) Equal(b *Index) bool {
return
treeEqual
(
a
.
Tree
,
b
.
Tree
)
}
// treeEqual returns whether two
trees
are the same
// treeEqual returns whether two
fsb.Tree
are the same
func
treeEqual
(
a
,
b
*
fsb
.
Tree
)
bool
{
if
a
.
Len
()
!=
b
.
Len
()
{
return
false
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment