Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Levin Zimmermann
neoppod
Commits
f71eebf7
Commit
f71eebf7
authored
Nov 07, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
5dd3d1ab
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
84 additions
and
99 deletions
+84
-99
go/zodb/storage/cache.go
go/zodb/storage/cache.go
+4
-7
go/zodb/storage/cache_test.go
go/zodb/storage/cache_test.go
+0
-29
go/zodb/storage/doc.go
go/zodb/storage/doc.go
+1
-1
go/zodb/storage/fs1/filestorage.go
go/zodb/storage/fs1/filestorage.go
+25
-30
go/zodb/storage/fs1/format.go
go/zodb/storage/fs1/format.go
+21
-20
go/zodb/storage/fs1/fs1tools/dump.go
go/zodb/storage/fs1/fs1tools/dump.go
+5
-4
go/zodb/storage/fs1/fsb/fsb.go
go/zodb/storage/fs1/fsb/fsb.go
+1
-0
go/zodb/storage/fs1/fsb/gen-fsbtree
go/zodb/storage/fs1/fsb/gen-fsbtree
+19
-0
go/zodb/storage/fs1/py/gen-testdata
go/zodb/storage/fs1/py/gen-testdata
+1
-1
go/zodb/zodbtools/catobj.go
go/zodb/zodbtools/catobj.go
+3
-3
go/zodb/zodbtools/dump_test.go
go/zodb/zodbtools/dump_test.go
+1
-1
go/zodb/zodbtools/info.go
go/zodb/zodbtools/info.go
+1
-1
go/zodb/zodbtools/main.go
go/zodb/zodbtools/main.go
+2
-2
No files found.
go/zodb/storage/cache.go
View file @
f71eebf7
...
@@ -34,6 +34,7 @@ import (
...
@@ -34,6 +34,7 @@ import (
"lab.nexedi.com/kirr/neo/go/xcommon/xcontainer/list"
"lab.nexedi.com/kirr/neo/go/xcommon/xcontainer/list"
)
)
// XXX managing LRU under 1 big gcMu might be bad for scalability.
// TODO maintain nhit / nmiss + way to read cache stats
// TODO maintain nhit / nmiss + way to read cache stats
// Cache adds RAM caching layer over a storage.
// Cache adds RAM caching layer over a storage.
...
@@ -67,10 +68,6 @@ type oidCacheEntry struct {
...
@@ -67,10 +68,6 @@ type oidCacheEntry struct {
//
//
// NOTE ^^^ .serial = 0 while loading is in progress
// NOTE ^^^ .serial = 0 while loading is in progress
// NOTE ^^^ .serial = 0 if .err != nil
// NOTE ^^^ .serial = 0 if .err != nil
//
// XXX or?
// cached revisions in descending order
// .before > .serial >= next.before > next.serial ?
rcev
[]
*
revCacheEntry
rcev
[]
*
revCacheEntry
}
}
...
@@ -105,7 +102,7 @@ type revCacheEntry struct {
...
@@ -105,7 +102,7 @@ type revCacheEntry struct {
}
}
// StorLoader represents loading part of a storage.
// StorLoader represents loading part of a storage.
// XXX -> zodb?
// XXX -> zodb
.IStorageLoader (or zodb.Loader ?)
?
type
StorLoader
interface
{
type
StorLoader
interface
{
Load
(
ctx
context
.
Context
,
xid
zodb
.
Xid
)
(
buf
*
zodb
.
Buf
,
serial
zodb
.
Tid
,
err
error
)
Load
(
ctx
context
.
Context
,
xid
zodb
.
Xid
)
(
buf
*
zodb
.
Buf
,
serial
zodb
.
Tid
,
err
error
)
}
}
...
@@ -208,7 +205,7 @@ func (c *Cache) Prefetch(ctx context.Context, xid zodb.Xid) {
...
@@ -208,7 +205,7 @@ func (c *Cache) Prefetch(ctx context.Context, xid zodb.Xid) {
// rce will become ready.
// rce will become ready.
//
//
// rceNew indicates whether rce is new and so loading on it has not been
// rceNew indicates whether rce is new and so loading on it has not been
// initiated yet. If so
rce should be loaded with
loadRCE.
// initiated yet. If so
the caller should proceed to loading rce via
loadRCE.
func
(
c
*
Cache
)
lookupRCE
(
xid
zodb
.
Xid
)
(
rce
*
revCacheEntry
,
rceNew
bool
)
{
func
(
c
*
Cache
)
lookupRCE
(
xid
zodb
.
Xid
)
(
rce
*
revCacheEntry
,
rceNew
bool
)
{
// loadSerial(serial) -> loadBefore(serial+1)
// loadSerial(serial) -> loadBefore(serial+1)
before
:=
xid
.
Tid
before
:=
xid
.
Tid
...
@@ -463,7 +460,7 @@ func (c *Cache) gcsignal() {
...
@@ -463,7 +460,7 @@ func (c *Cache) gcsignal() {
default
:
default
:
// also ok - .gcCh is created with size 1 so if we could not
// also ok - .gcCh is created with size 1 so if we could not
// put something to it - there is already 1 element in there
// put something to it - there is already 1 element in there
// and so gc will get signal to run
// and so gc will get signal to run
.
}
}
}
}
...
...
go/zodb/storage/cache_test.go
View file @
f71eebf7
...
@@ -37,8 +37,6 @@ import (
...
@@ -37,8 +37,6 @@ import (
// tStorage implements read-only storage for cache testing
// tStorage implements read-only storage for cache testing
type
tStorage
struct
{
type
tStorage
struct
{
//txnv []tTxnRecord // transactions; .tid↑
// oid -> [](.serial↑, .data)
// oid -> [](.serial↑, .data)
dataMap
map
[
zodb
.
Oid
][]
tOidData
dataMap
map
[
zodb
.
Oid
][]
tOidData
}
}
...
@@ -139,7 +137,6 @@ func TestCache(t *testing.T) {
...
@@ -139,7 +137,6 @@ func TestCache(t *testing.T) {
__
:=
Checker
{
t
}
__
:=
Checker
{
t
}
ok1
:=
func
(
v
bool
)
{
t
.
Helper
();
__
.
ok1
(
v
)
}
ok1
:=
func
(
v
bool
)
{
t
.
Helper
();
__
.
ok1
(
v
)
}
//eq := func(a, b interface{}) { t.Helper(); __.assertEq(a, b) }
hello
:=
[]
byte
(
"hello"
)
hello
:=
[]
byte
(
"hello"
)
world
:=
[]
byte
(
"world!!"
)
world
:=
[]
byte
(
"world!!"
)
...
@@ -592,29 +589,3 @@ func (c *Checker) assertEq(a, b interface{}) {
...
@@ -592,29 +589,3 @@ func (c *Checker) assertEq(a, b interface{}) {
c
.
t
.
Fatal
(
"!eq:
\n
"
,
pretty
.
Compare
(
a
,
b
))
c
.
t
.
Fatal
(
"!eq:
\n
"
,
pretty
.
Compare
(
a
,
b
))
}
}
}
}
/*
type tTxnRecord struct {
tid zodb.Tid
// data records for oid changed in transaction
// .oid↑
datav []tDataRecord
}
type tDataRecord struct {
oid zodb.Oid
data []byte
}
if xid.TidBefore {
// find max txn with .tid < xid.Tid
n := len(s.txnv)
i := n - 1 - sort.Search(n, func(i int) bool {
return s.txnv[n - 1 - i].tid < xid.Tid
})
if i == -1 {
// XXX xid.Tid < all .tid - no such transaction
}
}
*/
go/zodb/storage/doc.go
View file @
f71eebf7
...
@@ -17,5 +17,5 @@
...
@@ -17,5 +17,5 @@
// See COPYING file for full licensing terms.
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// See https://www.nexedi.com/licensing for rationale and options.
// Package storage provides common
bits related to ZODB storages. XXX text
// Package storage provides common
infrastructure related to ZODB storages.
package
storage
package
storage
go/zodb/storage/fs1/filestorage.go
View file @
f71eebf7
...
@@ -51,7 +51,7 @@
...
@@ -51,7 +51,7 @@
// https://github.com/zopefoundation/ZODB/blob/a89485c1/src/ZODB/fsIndex.py
// https://github.com/zopefoundation/ZODB/blob/a89485c1/src/ZODB/fsIndex.py
// https://github.com/zopefoundation/ZODB/commit/1bb14faf
// https://github.com/zopefoundation/ZODB/commit/1bb14faf
//
//
// Unless one is doing something FileStorage-specific, it is advice
s
not to use
// Unless one is doing something FileStorage-specific, it is advice
d
not to use
// fs1 package directly, and instead link-in lab.nexedi.com/kirr/neo/go/zodb/wks,
// fs1 package directly, and instead link-in lab.nexedi.com/kirr/neo/go/zodb/wks,
// open storage by zodb.OpenStorage and use it by way of zodb.IStorage interface.
// open storage by zodb.OpenStorage and use it by way of zodb.IStorage interface.
//
//
...
@@ -89,10 +89,6 @@ type FileStorage struct {
...
@@ -89,10 +89,6 @@ type FileStorage struct {
// XXX keep loaded with LoadNoStrings ?
// XXX keep loaded with LoadNoStrings ?
txnhMin
TxnHeader
txnhMin
TxnHeader
txnhMax
TxnHeader
txnhMax
TxnHeader
// XXX topPos = txnhMax.Pos + txnhMax.Len
//topPos int64 // position pointing just past last committed transaction
// // (= size(.file) when no commit is in progress)
}
}
// IStorage
// IStorage
...
@@ -103,12 +99,14 @@ func (fs *FileStorage) StorageName() string {
...
@@ -103,12 +99,14 @@ func (fs *FileStorage) StorageName() string {
}
}
// open opens FileStorage without loading index
// open opens FileStorage without loading index
//
// TODO read-write support
func
open
(
path
string
)
(
*
FileStorage
,
error
)
{
func
open
(
path
string
)
(
*
FileStorage
,
error
)
{
fs
:=
&
FileStorage
{}
fs
:=
&
FileStorage
{}
f
,
err
:=
os
.
Open
(
path
)
// XXX opens in O_RDONLY
f
,
err
:=
os
.
Open
(
path
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
// XXX err more context ?
return
nil
,
err
}
}
fs
.
file
=
f
fs
.
file
=
f
...
@@ -119,22 +117,12 @@ func open(path string) (*FileStorage, error) {
...
@@ -119,22 +117,12 @@ func open(path string) (*FileStorage, error) {
return
nil
,
err
return
nil
,
err
}
}
/*
// TODO recreate index if missing / not sane (cancel this job on ctx.Done)
// TODO verify index sane / topPos matches
topPos, index, err := LoadIndexFile(path + ".index")
if err != nil {
panic(err) // XXX err
}
fs.index = index
*/
// determine topPos from file size
// determine topPos from file size
// if it is invalid (e.g. a transaction committed only half-way) we'll catch it
// if it is invalid (e.g. a transaction committed only half-way) we'll catch it
// while loading/recreating index XXX recheck this logic
// while loading/recreating index XXX recheck this logic
fi
,
err
:=
f
.
Stat
()
fi
,
err
:=
f
.
Stat
()
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
// XXX err ctx
return
nil
,
err
}
}
topPos
:=
fi
.
Size
()
topPos
:=
fi
.
Size
()
...
@@ -142,19 +130,19 @@ func open(path string) (*FileStorage, error) {
...
@@ -142,19 +130,19 @@ func open(path string) (*FileStorage, error) {
// FIXME support empty file case -> then both txnhMin and txnhMax stays invalid
// FIXME support empty file case -> then both txnhMin and txnhMax stays invalid
err
=
fs
.
txnhMin
.
Load
(
f
,
txnValidFrom
,
LoadAll
)
// XXX txnValidFrom here -> ?
err
=
fs
.
txnhMin
.
Load
(
f
,
txnValidFrom
,
LoadAll
)
// XXX txnValidFrom here -> ?
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
// XXX +context
return
nil
,
err
}
}
err
=
fs
.
txnhMax
.
Load
(
f
,
topPos
,
LoadAll
)
err
=
fs
.
txnhMax
.
Load
(
f
,
topPos
,
LoadAll
)
// expect EOF but .LenPrev must be good
// expect EOF but .LenPrev must be good
// FIXME ^^^ it will be no EOF if a txn was committed only partially
// FIXME ^^^ it will be no EOF if a txn was committed only partially
if
err
!=
io
.
EOF
{
if
err
!=
io
.
EOF
{
if
err
==
nil
{
if
err
==
nil
{
err
=
fmt
.
Errorf
(
"
no EOF after topPos"
)
// XXX err context
err
=
fmt
.
Errorf
(
"
%s: no EOF after topPos"
,
f
.
Name
())
}
}
return
nil
,
err
// XXX +context
return
nil
,
fmt
.
Errorf
(
"%s: %s"
,
f
.
Name
(),
err
)
}
}
if
fs
.
txnhMax
.
LenPrev
<=
0
{
if
fs
.
txnhMax
.
LenPrev
<=
0
{
panic
(
"could not read LenPrev @topPos"
)
// XXX err
return
nil
,
fmt
.
Errorf
(
"%s: could not read LenPrev @%d (last transaction)"
,
f
.
Name
(),
fs
.
txnhMax
.
Pos
)
}
}
err
=
fs
.
txnhMax
.
LoadPrev
(
f
,
LoadAll
)
err
=
fs
.
txnhMax
.
LoadPrev
(
f
,
LoadAll
)
...
@@ -166,35 +154,39 @@ func open(path string) (*FileStorage, error) {
...
@@ -166,35 +154,39 @@ func open(path string) (*FileStorage, error) {
return
fs
,
nil
return
fs
,
nil
}
}
// Open opens FileStorage XXX text
// Open opens FileStorage @path.
//
// TODO read-write support
func
Open
(
ctx
context
.
Context
,
path
string
)
(
*
FileStorage
,
error
)
{
func
Open
(
ctx
context
.
Context
,
path
string
)
(
*
FileStorage
,
error
)
{
// open data file
fs
,
err
:=
open
(
path
)
fs
,
err
:=
open
(
path
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
// load/rebuild index
err
=
fs
.
loadIndex
()
err
=
fs
.
loadIndex
()
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Print
(
err
)
log
.
Print
(
err
)
log
.
Printf
(
"%s: index recompute..."
,
path
)
log
.
Printf
(
"%s: index recompute..."
,
path
)
fs
.
index
,
err
=
fs
.
computeIndex
(
ctx
)
// XXX better .reindex() which saves it?
// XXX if !ro -> .reindex() which saves it
fs
.
index
,
err
=
fs
.
computeIndex
(
ctx
)
if
err
!=
nil
{
if
err
!=
nil
{
fs
.
file
.
Close
()
// XXX lclose
fs
.
file
.
Close
()
// XXX lclose
return
nil
,
err
return
nil
,
err
}
}
}
}
// TODO verify index sane / topPos matches
// TODO verify index is sane / topPos matches
// XXX place
if
fs
.
index
.
TopPos
!=
fs
.
txnhMax
.
Pos
+
fs
.
txnhMax
.
Len
{
if
fs
.
index
.
TopPos
!=
fs
.
txnhMax
.
Pos
+
fs
.
txnhMax
.
Len
{
panic
(
"inconsistent index topPos"
)
// XXX
panic
(
"
TODO:
inconsistent index topPos"
)
// XXX
}
}
return
fs
,
nil
return
fs
,
nil
}
}
func
(
fs
*
FileStorage
)
Close
()
error
{
func
(
fs
*
FileStorage
)
Close
()
error
{
// TODO dump index
// TODO dump index
(if !ro ?)
err
:=
fs
.
file
.
Close
()
err
:=
fs
.
file
.
Close
()
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
...
@@ -530,8 +522,8 @@ func (fs *FileStorage) computeIndex(ctx context.Context) (index *Index, err erro
...
@@ -530,8 +522,8 @@ func (fs *FileStorage) computeIndex(ctx context.Context) (index *Index, err erro
// loadIndex loads on-disk index to RAM
// loadIndex loads on-disk index to RAM
func
(
fs
*
FileStorage
)
loadIndex
()
(
err
error
)
{
func
(
fs
*
FileStorage
)
loadIndex
()
(
err
error
)
{
// XXX LoadIndexFile already contains "%s: index load"
// XXX lock?
// XXX lock?
// XXX LoadIndexFile already contains "%s: index load"
defer
xerr
.
Contextf
(
&
err
,
"%s"
,
fs
.
file
.
Name
())
defer
xerr
.
Contextf
(
&
err
,
"%s"
,
fs
.
file
.
Name
())
index
,
err
:=
LoadIndexFile
(
fs
.
file
.
Name
()
+
".index"
)
index
,
err
:=
LoadIndexFile
(
fs
.
file
.
Name
()
+
".index"
)
...
@@ -563,7 +555,8 @@ func (fs *FileStorage) saveIndex() (err error) {
...
@@ -563,7 +555,8 @@ func (fs *FileStorage) saveIndex() (err error) {
return
nil
return
nil
}
}
// IndexCorruptError is the error returned when index verification fails
// indexCorruptError is the error returned when index verification fails.
//
// XXX but io errors during verification return not this
// XXX but io errors during verification return not this
type
indexCorruptError
struct
{
type
indexCorruptError
struct
{
index
*
Index
index
*
Index
...
@@ -576,6 +569,7 @@ func (e *indexCorruptError) Error() string {
...
@@ -576,6 +569,7 @@ func (e *indexCorruptError) Error() string {
}
}
// VerifyIndex verifies that index is correct
// VerifyIndex verifies that index is correct
//
// XXX -> not exported @ fs1
// XXX -> not exported @ fs1
func
(
fs
*
FileStorage
)
verifyIndex
(
ctx
context
.
Context
)
error
{
func
(
fs
*
FileStorage
)
verifyIndex
(
ctx
context
.
Context
)
error
{
// XXX lock appends?
// XXX lock appends?
...
@@ -596,6 +590,7 @@ func (fs *FileStorage) verifyIndex(ctx context.Context) error {
...
@@ -596,6 +590,7 @@ func (fs *FileStorage) verifyIndex(ctx context.Context) error {
// Reindex rebuilds the index
// Reindex rebuilds the index
//
// XXX -> not exported @ fs1
// XXX -> not exported @ fs1
func
(
fs
*
FileStorage
)
reindex
(
ctx
context
.
Context
)
error
{
func
(
fs
*
FileStorage
)
reindex
(
ctx
context
.
Context
)
error
{
// XXX lock appends?
// XXX lock appends?
...
...
go/zodb/storage/fs1/format.go
View file @
f71eebf7
...
@@ -94,38 +94,38 @@ const (
...
@@ -94,38 +94,38 @@ const (
lenIterStart
int64
=
-
0x1111111111111112
// = 0xeeeeeeeeeeeeeeee if unsigned
lenIterStart
int64
=
-
0x1111111111111112
// = 0xeeeeeeeeeeeeeeee if unsigned
)
)
//
ErrTxnRecord
is returned on transaction record read / decode errors
//
TxnError
is returned on transaction record read / decode errors
type
ErrTxnRecord
struct
{
type
TxnError
struct
{
Pos
int64
// position of transaction record
Pos
int64
// position of transaction record
Subj
string
// about what .Err is
Subj
string
// about what .Err is
Err
error
// actual error
Err
error
// actual error
}
}
func
(
e
*
ErrTxnRecord
)
Error
()
string
{
func
(
e
*
TxnError
)
Error
()
string
{
return
fmt
.
Sprintf
(
"transaction record @%v: %v: %v"
,
e
.
Pos
,
e
.
Subj
,
e
.
Err
)
return
fmt
.
Sprintf
(
"transaction record @%v: %v: %v"
,
e
.
Pos
,
e
.
Subj
,
e
.
Err
)
}
}
// err creates
ErrTxnRecord
for transaction located at txnh.Pos
// err creates
TxnError
for transaction located at txnh.Pos
func
(
txnh
*
TxnHeader
)
err
(
subj
string
,
err
error
)
error
{
func
(
txnh
*
TxnHeader
)
err
(
subj
string
,
err
error
)
error
{
return
&
ErrTxnRecord
{
txnh
.
Pos
,
subj
,
err
}
return
&
TxnError
{
txnh
.
Pos
,
subj
,
err
}
}
}
//
ErrDataRecord
is returned on data record read / decode errors
//
DataError
is returned on data record read / decode errors
type
ErrDataRecord
struct
{
type
DataError
struct
{
Pos
int64
// position of data record
Pos
int64
// position of data record
Subj
string
// about what .Err is
Subj
string
// about what .Err is
Err
error
// actual error
Err
error
// actual error
}
}
func
(
e
*
ErrDataRecord
)
Error
()
string
{
func
(
e
*
DataError
)
Error
()
string
{
return
fmt
.
Sprintf
(
"data record @%v: %v: %v"
,
e
.
Pos
,
e
.
Subj
,
e
.
Err
)
return
fmt
.
Sprintf
(
"data record @%v: %v: %v"
,
e
.
Pos
,
e
.
Subj
,
e
.
Err
)
}
}
// err creates
ErrDataRecord
for data record located at dh.Pos
// err creates
DataError
for data record located at dh.Pos
// XXX add link to containing txn? (check whether we can do it on data access) ?
// XXX add link to containing txn? (check whether we can do it on data access) ?
func
(
dh
*
DataHeader
)
err
(
subj
string
,
err
error
)
error
{
func
(
dh
*
DataHeader
)
err
(
subj
string
,
err
error
)
error
{
return
&
ErrDataRecord
{
dh
.
Pos
,
subj
,
err
}
return
&
DataError
{
dh
.
Pos
,
subj
,
err
}
}
}
...
@@ -159,12 +159,10 @@ func (fh *FileHeader) Load(r io.ReaderAt) error {
...
@@ -159,12 +159,10 @@ func (fh *FileHeader) Load(r io.ReaderAt) error {
_
,
err
:=
r
.
ReadAt
(
fh
.
Magic
[
:
],
0
)
_
,
err
:=
r
.
ReadAt
(
fh
.
Magic
[
:
],
0
)
err
=
okEOF
(
err
)
err
=
okEOF
(
err
)
if
err
!=
nil
{
if
err
!=
nil
{
//return fh.err("read", err)
return
err
return
err
// XXX err more context
}
}
if
string
(
fh
.
Magic
[
:
])
!=
Magic
{
if
string
(
fh
.
Magic
[
:
])
!=
Magic
{
return
fmt
.
Errorf
(
"%s: invalid magic %q"
,
xio
.
Name
(
r
),
fh
.
Magic
)
// XXX -> decode err
return
fmt
.
Errorf
(
"%s: invalid magic %q"
,
xio
.
Name
(
r
),
fh
.
Magic
)
//return decodeErr(fh, "invalid magic %q", fh.Magic)
}
}
return
nil
return
nil
...
@@ -385,7 +383,10 @@ func (txnh *TxnHeader) LoadPrev(r io.ReaderAt, flags TxnLoadFlags) error {
...
@@ -385,7 +383,10 @@ func (txnh *TxnHeader) LoadPrev(r io.ReaderAt, flags TxnLoadFlags) error {
err
:=
txnh
.
Load
(
r
,
txnh
.
Pos
-
lenPrev
,
flags
)
err
:=
txnh
.
Load
(
r
,
txnh
.
Pos
-
lenPrev
,
flags
)
if
err
!=
nil
{
if
err
!=
nil
{
// EOF forward is unexpected here
// EOF forward is unexpected here
return
noEOF
(
err
)
if
err
==
io
.
EOF
{
err
=
txnh
.
err
(
"read"
,
io
.
ErrUnexpectedEOF
)
}
return
err
}
}
if
txnh
.
Len
!=
lenPrev
{
if
txnh
.
Len
!=
lenPrev
{
...
@@ -527,7 +528,7 @@ func (dh *DataHeader) LoadPrevRev(r io.ReaderAt) error {
...
@@ -527,7 +528,7 @@ func (dh *DataHeader) LoadPrevRev(r io.ReaderAt) error {
err
:=
dh
.
loadPrevRev
(
r
)
err
:=
dh
.
loadPrevRev
(
r
)
if
err
!=
nil
{
if
err
!=
nil
{
// data record @...: loading prev rev: data record @...: ...
// data record @...: loading prev rev: data record @...: ...
err
=
&
ErrDataRecord
{
posCur
,
"loading prev rev"
,
err
}
err
=
&
DataError
{
posCur
,
"loading prev rev"
,
err
}
}
}
return
err
return
err
}
}
...
@@ -542,12 +543,12 @@ func (dh *DataHeader) loadPrevRev(r io.ReaderAt) error {
...
@@ -542,12 +543,12 @@ func (dh *DataHeader) loadPrevRev(r io.ReaderAt) error {
}
}
if
dh
.
Oid
!=
oid
{
if
dh
.
Oid
!=
oid
{
// XXX vvv valid only if
ErrDataRecord
prints oid
// XXX vvv valid only if
DataError
prints oid
return
decodeErr
(
dh
,
"oid mismatch"
)
return
decodeErr
(
dh
,
"oid mismatch"
)
}
}
if
dh
.
Tid
>=
tid
{
if
dh
.
Tid
>=
tid
{
// XXX vvv valid only if
ErrDataRecord
prints tid
// XXX vvv valid only if
DataError
prints tid
return
decodeErr
(
dh
,
"tid mismatch"
)
return
decodeErr
(
dh
,
"tid mismatch"
)
}
}
...
@@ -614,7 +615,7 @@ func (dh *DataHeader) LoadBack(r io.ReaderAt) error {
...
@@ -614,7 +615,7 @@ func (dh *DataHeader) LoadBack(r io.ReaderAt) error {
}()
}()
if
err
!=
nil
{
if
err
!=
nil
{
err
=
&
ErrDataRecord
{
posCur
,
"loading back rev"
,
err
}
err
=
&
DataError
{
posCur
,
"loading back rev"
,
err
}
}
}
return
err
return
err
...
@@ -644,7 +645,7 @@ func (dh *DataHeader) loadNext(r io.ReaderAt, txnh *TxnHeader) error {
...
@@ -644,7 +645,7 @@ func (dh *DataHeader) loadNext(r io.ReaderAt, txnh *TxnHeader) error {
}
}
if
nextPos
+
DataHeaderSize
>
txnTailPos
{
if
nextPos
+
DataHeaderSize
>
txnTailPos
{
return
&
ErrDataRecord
{
nextPos
,
"decode"
,
fmt
.
Errorf
(
"data record header overlaps txn boundary"
)}
// XXX
return
&
DataError
{
nextPos
,
"decode"
,
fmt
.
Errorf
(
"data record header overlaps txn boundary"
)}
// XXX
}
}
err
:=
dh
.
Load
(
r
,
nextPos
)
err
:=
dh
.
Load
(
r
,
nextPos
)
...
...
go/zodb/storage/fs1/fs1tools/dump.go
View file @
f71eebf7
...
@@ -58,7 +58,7 @@ type Dumper interface {
...
@@ -58,7 +58,7 @@ type Dumper interface {
// To do so it reads file header and then iterates over all transactions in the file.
// To do so it reads file header and then iterates over all transactions in the file.
// The logic to actually output information and, if needed read/process data, is implemented by Dumper d.
// The logic to actually output information and, if needed read/process data, is implemented by Dumper d.
func
Dump
(
w
io
.
Writer
,
path
string
,
dir
fs1
.
IterDir
,
d
Dumper
)
(
err
error
)
{
func
Dump
(
w
io
.
Writer
,
path
string
,
dir
fs1
.
IterDir
,
d
Dumper
)
(
err
error
)
{
defer
xerr
.
Contextf
(
&
err
,
"%s: %s"
,
path
,
d
.
DumperName
())
// XXX ok?
defer
xerr
.
Contextf
(
&
err
,
"%s: %s"
,
d
.
DumperName
(),
path
)
it
,
f
,
err
:=
fs1
.
IterateFile
(
path
,
dir
)
it
,
f
,
err
:=
fs1
.
IterateFile
(
path
,
dir
)
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -359,9 +359,10 @@ func (d *DumperFsTail) DumpTxn(buf *xfmt.Buffer, it *fs1.Iter) error {
...
@@ -359,9 +359,10 @@ func (d *DumperFsTail) DumpTxn(buf *xfmt.Buffer, it *fs1.Iter) error {
d
.
data
=
xbytes
.
Realloc64
(
d
.
data
,
dataLen
)
d
.
data
=
xbytes
.
Realloc64
(
d
.
data
,
dataLen
)
_
,
err
:=
it
.
R
.
ReadAt
(
d
.
data
,
txnh
.
DataPos
())
_
,
err
:=
it
.
R
.
ReadAt
(
d
.
data
,
txnh
.
DataPos
())
if
err
!=
nil
{
if
err
!=
nil
{
// XXX -> txnh.Err(...) ?
if
err
==
io
.
EOF
{
// XXX err = noEOF(err)
err
=
io
.
ErrUnexpectedEOF
// XXX -> noEOF(err)
return
&
fs1
.
ErrTxnRecord
{
txnh
.
Pos
,
"read data payload"
,
err
}
}
return
&
fs1
.
TxnError
{
txnh
.
Pos
,
"read data payload"
,
err
}
}
}
// print information about read txn record
// print information about read txn record
...
...
go/zodb/storage/fs1/fsb/fsb.go
View file @
f71eebf7
...
@@ -18,6 +18,7 @@
...
@@ -18,6 +18,7 @@
// See https://www.nexedi.com/licensing for rationale and options.
// See https://www.nexedi.com/licensing for rationale and options.
// Package fsb specializes cznic/b.Tree for FileStorage index needs.
// Package fsb specializes cznic/b.Tree for FileStorage index needs.
//
// See gen-fsbtree for details.
// See gen-fsbtree for details.
package
fsb
package
fsb
...
...
go/zodb/storage/fs1/fsb/gen-fsbtree
View file @
f71eebf7
#
!/bin/bash -e
#
!/bin/bash -e
#
generate
b
.
Tree
with
compile
-
time
KEY
=
zodb
.
Oid
,
VALUE
=
int64
,
tuned
kd
and
direct
oidCmp
calls
#
generate
b
.
Tree
with
compile
-
time
KEY
=
zodb
.
Oid
,
VALUE
=
int64
,
tuned
kd
and
direct
oidCmp
calls
#
Copyright
(
C
)
2017
Nexedi
SA
and
Contributors
.
#
Kirill
Smelkov
<
kirr
@
nexedi
.
com
>
#
#
This
program
is
free
software
:
you
can
Use
,
Study
,
Modify
and
Redistribute
#
it
under
the
terms
of
the
GNU
General
Public
License
version
3
,
or
(
at
your
#
option
)
any
later
version
,
as
published
by
the
Free
Software
Foundation
.
#
#
You
can
also
Link
and
Combine
this
program
with
other
software
covered
by
#
the
terms
of
any
of
the
Free
Software
licenses
or
any
of
the
Open
Source
#
Initiative
approved
licenses
and
Convey
the
resulting
work
.
Corresponding
#
source
of
such
a
combination
shall
include
the
source
code
for
all
other
#
software
used
.
#
#
This
program
is
distributed
WITHOUT
ANY
WARRANTY
;
without
even
the
implied
#
warranty
of
MERCHANTABILITY
or
FITNESS
FOR
A
PARTICULAR
PURPOSE
.
#
#
See
COPYING
file
for
full
licensing
terms
.
#
See
https
://
www
.
nexedi
.
com
/
licensing
for
rationale
and
options
.
KEY
=
zodb
.
Oid
KEY
=
zodb
.
Oid
VALUE
=
int64
VALUE
=
int64
...
...
go/zodb/storage/fs1/py/gen-testdata
View file @
f71eebf7
...
@@ -18,7 +18,7 @@
...
@@ -18,7 +18,7 @@
#
#
# See COPYING file for full licensing terms.
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
# See https://www.nexedi.com/licensing for rationale and options.
"""generate reference database and index for tests"""
"""generate reference
fs1
database and index for tests"""
from
ZODB.FileStorage
import
FileStorage
from
ZODB.FileStorage
import
FileStorage
from
zodbtools.test.gen_testdata
import
gen_testdb
from
zodbtools.test.gen_testdata
import
gen_testdb
...
...
go/zodb/zodbtools/catobj.go
View file @
f71eebf7
...
@@ -41,9 +41,9 @@ func Catobj(ctx context.Context, w io.Writer, stor zodb.IStorage, xid zodb.Xid)
...
@@ -41,9 +41,9 @@ func Catobj(ctx context.Context, w io.Writer, stor zodb.IStorage, xid zodb.Xid)
return
err
return
err
}
}
_
,
err
=
w
.
Write
(
buf
.
Data
)
// NOTE deleted data are returned as err by Load
_
,
err
=
w
.
Write
(
buf
.
Data
)
// NOTE deleted data are returned as err by Load
buf
.
Release
()
buf
.
Release
()
return
err
// XXX err ctx ?
return
err
// XXX err ctx ?
}
}
// Dumpobj dumps content of one ZODB object with zodbdump-like header
// Dumpobj dumps content of one ZODB object with zodbdump-like header
...
@@ -59,7 +59,7 @@ func Dumpobj(ctx context.Context, w io.Writer, stor zodb.IStorage, xid zodb.Xid,
...
@@ -59,7 +59,7 @@ func Dumpobj(ctx context.Context, w io.Writer, stor zodb.IStorage, xid zodb.Xid,
objInfo
.
Oid
=
xid
.
Oid
objInfo
.
Oid
=
xid
.
Oid
objInfo
.
Tid
=
tid
objInfo
.
Tid
=
tid
objInfo
.
Data
=
buf
.
Data
objInfo
.
Data
=
buf
.
Data
objInfo
.
DataTid
=
tid
// XXX generally wrong
objInfo
.
DataTid
=
tid
// XXX generally wrong
d
:=
dumper
{
W
:
w
,
HashOnly
:
hashOnly
}
d
:=
dumper
{
W
:
w
,
HashOnly
:
hashOnly
}
err
=
d
.
DumpData
(
&
objInfo
)
err
=
d
.
DumpData
(
&
objInfo
)
...
...
go/zodb/zodbtools/dump_test.go
View file @
f71eebf7
...
@@ -49,7 +49,7 @@ func loadZdumpPy(t *testing.T, path string) string {
...
@@ -49,7 +49,7 @@ func loadZdumpPy(t *testing.T, path string) string {
// bugs. Here we want to compare output ideally bit-to-bit but those
// bugs. Here we want to compare output ideally bit-to-bit but those
// \v vs \x0b glitches prevents that to be done directly. So here we
// \v vs \x0b glitches prevents that to be done directly. So here we
// are with this ugly hack:
// are with this ugly hack:
var
pyNoBackLetter
=
[]
struct
{
backNoLetterRe
,
backLetter
string
}
{
var
pyNoBackLetter
=
[]
struct
{
backNoLetterRe
,
backLetter
string
}
{
{
`\\x07`
,
`\a`
},
{
`\\x07`
,
`\a`
},
{
`\\x08`
,
`\b`
},
{
`\\x08`
,
`\b`
},
{
`\\x0b`
,
`\v`
},
{
`\\x0b`
,
`\v`
},
...
...
go/zodb/zodbtools/info.go
View file @
f71eebf7
...
@@ -75,7 +75,7 @@ func Info(ctx context.Context, w io.Writer, stor zodb.IStorage, parameterv []str
...
@@ -75,7 +75,7 @@ func Info(ctx context.Context, w io.Writer, stor zodb.IStorage, parameterv []str
out
:=
""
out
:=
""
if
wantnames
{
if
wantnames
{
out
+=
parameter
+
"="
out
+=
parameter
+
"="
}
}
value
,
err
:=
getParam
(
ctx
,
stor
)
value
,
err
:=
getParam
(
ctx
,
stor
)
if
err
!=
nil
{
if
err
!=
nil
{
...
...
go/zodb/zodbtools/main.go
View file @
f71eebf7
...
@@ -27,8 +27,8 @@ var commands = prog.CommandRegistry{
...
@@ -27,8 +27,8 @@ var commands = prog.CommandRegistry{
// NOTE the order commands are listed here is the order how they will appear in help
// NOTE the order commands are listed here is the order how they will appear in help
// TODO analyze ?
// TODO analyze ?
// TODO cmp
// TODO cmp
{
"info"
,
infoSummary
,
infoUsage
,
infoMain
},
{
"info"
,
infoSummary
,
infoUsage
,
infoMain
},
{
"dump"
,
dumpSummary
,
dumpUsage
,
dumpMain
},
{
"dump"
,
dumpSummary
,
dumpUsage
,
dumpMain
},
{
"catobj"
,
catobjSummary
,
catobjUsage
,
catobjMain
},
{
"catobj"
,
catobjSummary
,
catobjUsage
,
catobjMain
},
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment