Commit aebdd447 authored by Kirill Smelkov's avatar Kirill Smelkov

Merge branch 'master' into y/nodefs-cancel

Not notable patches - we only sync to pick up fixes for the tests and
debug print adjustment to print flags with stable order.

See nexedi/wendelin.core!16 (comment 188001)
for context.

* master:
  fs: return children in insertion order
  fs: move children map into separate struct
  fuse: TestDirectMount: ignore `Optional` field
  fuse: Don't print default if flags != 0
  fuse: Print debug flags in stable order
  fs: increase the limit for TestFileFdLeak
  fuse/test: Fix TestLargeDirRead
  .github: also test GOMAXPROCS=1
parents eb4d413d 0b3e1fde
...@@ -16,6 +16,9 @@ jobs: ...@@ -16,6 +16,9 @@ jobs:
- "1.18.x" # Golang upstream stable - "1.18.x" # Golang upstream stable
- "1.19.x" # Golang upstream stable - "1.19.x" # Golang upstream stable
- "1.20.x" # Golang upstream stable - "1.20.x" # Golang upstream stable
GOMAXPROCS:
- "" # Use all cpus (default).
- "1" # Single-cpu mode. Some failures are only visible like this.
# Don't cancel everything when one Go version fails # Don't cancel everything when one Go version fails
fail-fast: false fail-fast: false
runs-on: ubuntu-latest runs-on: ubuntu-latest
...@@ -35,4 +38,4 @@ jobs: ...@@ -35,4 +38,4 @@ jobs:
- run: echo user_allow_other | sudo tee -a /etc/fuse.conf - run: echo user_allow_other | sudo tee -a /etc/fuse.conf
# Actual test steps are in all.bash # Actual test steps are in all.bash
- run: ./all.bash - run: GOMAXPROCS="${{ matrix.GOMAXPROCS }}" ./all.bash
...@@ -96,7 +96,7 @@ type Inode struct { ...@@ -96,7 +96,7 @@ type Inode struct {
// Children of this Inode. // Children of this Inode.
// When you change this, you MUST increment changeCounter. // When you change this, you MUST increment changeCounter.
children map[string]*Inode children inodeChildren
// Parents of this Inode. Can be more than one due to hard links. // Parents of this Inode. Can be more than one due to hard links.
// When you change this, you MUST increment changeCounter. // When you change this, you MUST increment changeCounter.
...@@ -122,7 +122,7 @@ func initInode(n *Inode, ops InodeEmbedder, attr StableAttr, bridge *rawBridge, ...@@ -122,7 +122,7 @@ func initInode(n *Inode, ops InodeEmbedder, attr StableAttr, bridge *rawBridge,
n.persistent = persistent n.persistent = persistent
n.nodeId = nodeId n.nodeId = nodeId
if attr.Mode == fuse.S_IFDIR { if attr.Mode == fuse.S_IFDIR {
n.children = make(map[string]*Inode) n.children.init()
} }
} }
...@@ -169,12 +169,8 @@ func modeStr(m uint32) string { ...@@ -169,12 +169,8 @@ func modeStr(m uint32) string {
func (n *Inode) String() string { func (n *Inode) String() string {
n.mu.Lock() n.mu.Lock()
defer n.mu.Unlock() defer n.mu.Unlock()
var ss []string
for nm, ch := range n.children {
ss = append(ss, fmt.Sprintf("%q=i%d[%s]", nm, ch.stableAttr.Ino, modeStr(ch.stableAttr.Mode)))
}
return fmt.Sprintf("i%d (%s): %s", n.stableAttr.Ino, modeStr(n.stableAttr.Mode), strings.Join(ss, ",")) return fmt.Sprintf("i%d (%s): %s", n.stableAttr.Ino, modeStr(n.stableAttr.Mode), n.children.String())
} }
// sortNodes rearranges inode group in consistent order. // sortNodes rearranges inode group in consistent order.
...@@ -319,17 +315,13 @@ func (n *Inode) Path(root *Inode) string { ...@@ -319,17 +315,13 @@ func (n *Inode) Path(root *Inode) string {
// but it could be also valid if only iparent is locked and ichild was just // but it could be also valid if only iparent is locked and ichild was just
// created and only one goroutine keeps referencing it. // created and only one goroutine keeps referencing it.
func (iparent *Inode) setEntry(name string, ichild *Inode) { func (iparent *Inode) setEntry(name string, ichild *Inode) {
newParent := parentData{name, iparent}
if ichild.stableAttr.Mode == syscall.S_IFDIR { if ichild.stableAttr.Mode == syscall.S_IFDIR {
// Directories cannot have more than one parent. Clear the map. // Directories cannot have more than one parent. Clear the map.
// This special-case is neccessary because ichild may still have a // This special-case is neccessary because ichild may still have a
// parent that was forgotten (i.e. removed from bridge.inoMap). // parent that was forgotten (i.e. removed from bridge.inoMap).
ichild.parents.clear() ichild.parents.clear()
} }
ichild.parents.add(newParent) iparent.children.set(iparent, name, ichild)
iparent.children[name] = ichild
ichild.changeCounter++
iparent.changeCounter++
} }
// NewPersistentInode returns an Inode whose lifetime is not in // NewPersistentInode returns an Inode whose lifetime is not in
...@@ -402,7 +394,7 @@ retry: ...@@ -402,7 +394,7 @@ retry:
lockme = append(lockme[:0], n) lockme = append(lockme[:0], n)
parents = parents[:0] parents = parents[:0]
nChange := n.changeCounter nChange := n.changeCounter
live = n.lookupCount > 0 || len(n.children) > 0 || n.persistent live = n.lookupCount > 0 || n.children.len() > 0 || n.persistent
for _, p := range n.parents.all() { for _, p := range n.parents.all() {
parents = append(parents, p) parents = append(parents, p)
lockme = append(lockme, p.parent) lockme = append(lockme, p.parent)
...@@ -422,15 +414,12 @@ retry: ...@@ -422,15 +414,12 @@ retry:
} }
for _, p := range parents { for _, p := range parents {
if p.parent.children[p.name] != n { if p.parent.children.get(p.name) != n {
// another node has replaced us already // another node has replaced us already
continue continue
} }
delete(p.parent.children, p.name) p.parent.children.del(p.parent, p.name)
p.parent.changeCounter++
} }
n.parents.clear()
n.changeCounter++
if n.lookupCount != 0 { if n.lookupCount != 0 {
log.Panicf("n%d %p lookupCount changed: %d", n.nodeId, n, n.lookupCount) log.Panicf("n%d %p lookupCount changed: %d", n.nodeId, n, n.lookupCount)
...@@ -453,7 +442,7 @@ retry: ...@@ -453,7 +442,7 @@ retry:
func (n *Inode) GetChild(name string) *Inode { func (n *Inode) GetChild(name string) *Inode {
n.mu.Lock() n.mu.Lock()
defer n.mu.Unlock() defer n.mu.Unlock()
return n.children[name] return n.children.get(name)
} }
// AddChild adds a child to this node. If overwrite is false, fail if // AddChild adds a child to this node. If overwrite is false, fail if
...@@ -466,13 +455,10 @@ func (n *Inode) AddChild(name string, ch *Inode, overwrite bool) (success bool) ...@@ -466,13 +455,10 @@ func (n *Inode) AddChild(name string, ch *Inode, overwrite bool) (success bool)
retry: retry:
for { for {
lockNode2(n, ch) lockNode2(n, ch)
prev, ok := n.children[name] prev := n.children.get(name)
parentCounter := n.changeCounter parentCounter := n.changeCounter
if !ok { if prev == nil {
n.children[name] = ch n.children.set(n, name, ch)
ch.parents.add(parentData{name, n})
n.changeCounter++
ch.changeCounter++
unlockNode2(n, ch) unlockNode2(n, ch)
return true return true
} }
...@@ -489,10 +475,7 @@ retry: ...@@ -489,10 +475,7 @@ retry:
} }
prev.parents.delete(parentData{name, n}) prev.parents.delete(parentData{name, n})
n.children[name] = ch n.children.set(n, name, ch)
ch.parents.add(parentData{name, n})
n.changeCounter++
ch.changeCounter++
prev.changeCounter++ prev.changeCounter++
unlockNodes(lockme[:]...) unlockNodes(lockme[:]...)
...@@ -504,16 +487,7 @@ retry: ...@@ -504,16 +487,7 @@ retry:
func (n *Inode) Children() map[string]*Inode { func (n *Inode) Children() map[string]*Inode {
n.mu.Lock() n.mu.Lock()
defer n.mu.Unlock() defer n.mu.Unlock()
r := make(map[string]*Inode, len(n.children)) return n.children.toMap()
for k, v := range n.children {
r[k] = v
}
return r
}
type childEntry struct {
Name string
Inode *Inode
} }
// childrenList returns the list of children of this directory Inode. // childrenList returns the list of children of this directory Inode.
...@@ -522,28 +496,7 @@ type childEntry struct { ...@@ -522,28 +496,7 @@ type childEntry struct {
func (n *Inode) childrenList() []childEntry { func (n *Inode) childrenList() []childEntry {
n.mu.Lock() n.mu.Lock()
defer n.mu.Unlock() defer n.mu.Unlock()
r := make([]childEntry, 0, 2*len(n.children)) return n.children.list()
// The spec doesn't guarantee this, but as long as maps remain
// backed by hash tables, the simplest mechanism for
// randomization is picking a random start index. We undo this
// here by picking a deterministic start index again. If the
// Go runtime ever implements a memory moving GC, we might
// have to look at the keys instead.
minNode := ^uintptr(0)
minIdx := -1
for k, v := range n.children {
if p := uintptr(unsafe.Pointer(v)); p < minNode {
minIdx = len(r)
minNode = p
}
r = append(r, childEntry{Name: k, Inode: v})
}
if minIdx > 0 {
r = append(r[minIdx:], r[:minIdx]...)
}
return r
} }
// Parents returns a parent of this Inode, or nil if this Inode is // Parents returns a parent of this Inode, or nil if this Inode is
...@@ -589,7 +542,7 @@ retry: ...@@ -589,7 +542,7 @@ retry:
lockme = append(lockme[:0], n) lockme = append(lockme[:0], n)
nChange := n.changeCounter nChange := n.changeCounter
for _, nm := range names { for _, nm := range names {
ch := n.children[nm] ch := n.children.get(nm)
if ch == nil { if ch == nil {
n.mu.Unlock() n.mu.Unlock()
return false, true return false, true
...@@ -599,21 +552,17 @@ retry: ...@@ -599,21 +552,17 @@ retry:
n.mu.Unlock() n.mu.Unlock()
lockNodes(lockme...) lockNodes(lockme...)
if n.changeCounter != nChange { if n.changeCounter != nChange {
unlockNodes(lockme...) unlockNodes(lockme...)
continue retry continue retry
} }
for _, nm := range names { for _, nm := range names {
ch := n.children[nm] n.children.del(n, nm)
delete(n.children, nm)
ch.parents.delete(parentData{nm, n})
ch.changeCounter++
} }
n.changeCounter++
live = n.lookupCount > 0 || len(n.children) > 0 || n.persistent live = n.lookupCount > 0 || n.children.len() > 0 || n.persistent
unlockNodes(lockme...) unlockNodes(lockme...)
// removal successful // removal successful
...@@ -642,8 +591,8 @@ retry: ...@@ -642,8 +591,8 @@ retry:
counter1 := n.changeCounter counter1 := n.changeCounter
counter2 := newParent.changeCounter counter2 := newParent.changeCounter
oldChild := n.children[old] oldChild := n.children.get(old)
destChild := newParent.children[newName] destChild := newParent.children.get(newName)
unlockNode2(n, newParent) unlockNode2(n, newParent)
if destChild != nil && !overwrite { if destChild != nil && !overwrite {
...@@ -657,27 +606,17 @@ retry: ...@@ -657,27 +606,17 @@ retry:
} }
if oldChild != nil { if oldChild != nil {
delete(n.children, old) n.children.del(n, old)
oldChild.parents.delete(parentData{old, n})
n.changeCounter++
oldChild.changeCounter++
} }
if destChild != nil { if destChild != nil {
// This can cause the child to be slated for // This can cause the child to be slated for
// removal; see below // removal; see below
delete(newParent.children, newName) newParent.children.del(newParent, newName)
destChild.parents.delete(parentData{newName, newParent})
destChild.changeCounter++
newParent.changeCounter++
} }
if oldChild != nil { if oldChild != nil {
newParent.children[newName] = oldChild newParent.children.set(newParent, newName, oldChild)
newParent.changeCounter++
oldChild.parents.add(parentData{newName, newParent})
oldChild.changeCounter++
} }
unlockNodes(n, newParent, oldChild, destChild) unlockNodes(n, newParent, oldChild, destChild)
...@@ -699,8 +638,8 @@ retry: ...@@ -699,8 +638,8 @@ retry:
counter1 := oldParent.changeCounter counter1 := oldParent.changeCounter
counter2 := newParent.changeCounter counter2 := newParent.changeCounter
oldChild := oldParent.children[oldName] oldChild := oldParent.children.get(oldName)
destChild := newParent.children[newName] destChild := newParent.children.get(newName)
unlockNode2(oldParent, newParent) unlockNode2(oldParent, newParent)
if destChild == oldChild { if destChild == oldChild {
...@@ -715,34 +654,20 @@ retry: ...@@ -715,34 +654,20 @@ retry:
// Detach // Detach
if oldChild != nil { if oldChild != nil {
delete(oldParent.children, oldName) oldParent.children.del(oldParent, oldName)
oldChild.parents.delete(parentData{oldName, oldParent})
oldParent.changeCounter++
oldChild.changeCounter++
} }
if destChild != nil { if destChild != nil {
delete(newParent.children, newName) newParent.children.del(newParent, newName)
destChild.parents.delete(parentData{newName, newParent})
destChild.changeCounter++
newParent.changeCounter++
} }
// Attach // Attach
if oldChild != nil { if oldChild != nil {
newParent.children[newName] = oldChild newParent.children.set(newParent, newName, oldChild)
newParent.changeCounter++
oldChild.parents.add(parentData{newName, newParent})
oldChild.changeCounter++
} }
if destChild != nil { if destChild != nil {
oldParent.children[oldName] = destChild oldParent.children.set(oldParent, oldName, destChild)
oldParent.changeCounter++
destChild.parents.add(parentData{oldName, oldParent})
destChild.changeCounter++
} }
unlockNodes(oldParent, newParent, oldChild, destChild) unlockNodes(oldParent, newParent, oldChild, destChild)
return return
......
// Copyright 2023 the Go-FUSE Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fs
import (
"fmt"
"strings"
)
type childEntry struct {
Name string
Inode *Inode
}
// inodeChildren is a hashmap with deterministic ordering. It is
// important to return the children in a deterministic order for 2
// reasons:
//
// 1. if the ordering is non-deterministic, multiple concurrent
// readdirs can lead to cache corruption (see issue #391)
//
// 2. it simplifies the implementation of directory seeking: the NFS
// protocol doesn't open and close directories. Instead, a directory
// read must always be continued from a previously handed out offset.
//
// By storing the entries in insertion order, and marking them with a
// int64 logical timestamp, the logical timestamp can serve as readdir
// cookie.
type inodeChildren struct {
// index into children slice.
childrenMap map[string]int
children []childEntry
}
func (c *inodeChildren) init() {
c.childrenMap = make(map[string]int)
}
func (c *inodeChildren) String() string {
var ss []string
for _, e := range c.children {
ch := e.Inode
ss = append(ss, fmt.Sprintf("%q=i%d[%s]", e.Name, ch.stableAttr.Ino, modeStr(ch.stableAttr.Mode)))
}
return strings.Join(ss, ",")
}
func (c *inodeChildren) get(name string) *Inode {
idx, ok := c.childrenMap[name]
if !ok {
return nil
}
return c.children[idx].Inode
}
func (c *inodeChildren) compact() {
nc := make([]childEntry, 0, 2*len(c.childrenMap)+1)
nm := make(map[string]int, len(nc))
for _, e := range c.children {
if e.Inode == nil {
continue
}
nm[e.Name] = len(nc)
nc = append(nc, e)
}
c.childrenMap = nm
c.children = nc
}
func (c *inodeChildren) set(parent *Inode, name string, ch *Inode) {
idx, ok := c.childrenMap[name]
if !ok {
if cap(c.children) == len(c.children) {
c.compact()
}
idx = len(c.children)
c.children = append(c.children, childEntry{})
}
c.childrenMap[name] = idx
c.children[idx] = childEntry{Name: name, Inode: ch}
parent.changeCounter++
ch.parents.add(parentData{name, parent})
ch.changeCounter++
}
func (c *inodeChildren) len() int {
return len(c.childrenMap)
}
func (c *inodeChildren) toMap() map[string]*Inode {
r := make(map[string]*Inode, len(c.childrenMap))
for _, e := range c.children {
if e.Inode != nil {
r[e.Name] = e.Inode
}
}
return r
}
func (c *inodeChildren) del(parent *Inode, name string) {
idx, ok := c.childrenMap[name]
if !ok {
return
}
ch := c.children[idx].Inode
delete(c.childrenMap, name)
c.children[idx] = childEntry{}
ch.parents.delete(parentData{name, parent})
ch.changeCounter++
parent.changeCounter++
}
func (c *inodeChildren) list() []childEntry {
r := make([]childEntry, 0, len(c.childrenMap))
for _, e := range c.children {
if e.Inode != nil {
r = append(r, e)
}
}
return r
}
...@@ -163,7 +163,7 @@ func TestBasic(t *testing.T) { ...@@ -163,7 +163,7 @@ func TestBasic(t *testing.T) {
func TestFileFdLeak(t *testing.T) { func TestFileFdLeak(t *testing.T) {
tc := newTestCase(t, &testOptions{ tc := newTestCase(t, &testOptions{
suppressDebug: true, suppressDebug: false,
attrCache: true, attrCache: true,
entryCache: true, entryCache: true,
}) })
...@@ -174,8 +174,9 @@ func TestFileFdLeak(t *testing.T) { ...@@ -174,8 +174,9 @@ func TestFileFdLeak(t *testing.T) {
bridge := tc.rawFS.(*rawBridge) bridge := tc.rawFS.(*rawBridge)
tc = nil tc = nil
if got := len(bridge.files); got > 3 { // posixtest.FdLeak also uses 15 as a limit.
t.Errorf("found %d used file handles, should be <= 3", got) if got, want := len(bridge.files), 15; got > want {
t.Errorf("found %d used file handles, should be <= %d", got, want)
} }
} }
......
...@@ -138,7 +138,6 @@ func mountCheckOptions(t *testing.T, opts MountOptions) (info mountinfo.Info) { ...@@ -138,7 +138,6 @@ func mountCheckOptions(t *testing.T, opts MountOptions) (info mountinfo.Info) {
Source: orig.Source, Source: orig.Source,
FSType: orig.FSType, FSType: orig.FSType,
VFSOptions: orig.VFSOptions, VFSOptions: orig.VFSOptions,
Optional: orig.Optional,
} }
// server needs to run for Unmount to work // server needs to run for Unmount to work
go srv.Serve() go srv.Serve()
......
...@@ -12,14 +12,14 @@ import ( ...@@ -12,14 +12,14 @@ import (
) )
var ( var (
writeFlagNames = map[int64]string{ writeFlagNames = newFlagNames(map[int64]string{
WRITE_CACHE: "CACHE", WRITE_CACHE: "CACHE",
WRITE_LOCKOWNER: "LOCKOWNER", WRITE_LOCKOWNER: "LOCKOWNER",
} })
readFlagNames = map[int64]string{ readFlagNames = newFlagNames(map[int64]string{
READ_LOCKOWNER: "LOCKOWNER", READ_LOCKOWNER: "LOCKOWNER",
} })
initFlagNames = map[int64]string{ initFlagNames = newFlagNames(map[int64]string{
CAP_ASYNC_READ: "ASYNC_READ", CAP_ASYNC_READ: "ASYNC_READ",
CAP_POSIX_LOCKS: "POSIX_LOCKS", CAP_POSIX_LOCKS: "POSIX_LOCKS",
CAP_FILE_OPS: "FILE_OPS", CAP_FILE_OPS: "FILE_OPS",
...@@ -46,11 +46,11 @@ var ( ...@@ -46,11 +46,11 @@ var (
CAP_CACHE_SYMLINKS: "CACHE_SYMLINKS", CAP_CACHE_SYMLINKS: "CACHE_SYMLINKS",
CAP_NO_OPENDIR_SUPPORT: "NO_OPENDIR_SUPPORT", CAP_NO_OPENDIR_SUPPORT: "NO_OPENDIR_SUPPORT",
CAP_EXPLICIT_INVAL_DATA: "EXPLICIT_INVAL_DATA", CAP_EXPLICIT_INVAL_DATA: "EXPLICIT_INVAL_DATA",
} })
releaseFlagNames = map[int64]string{ releaseFlagNames = newFlagNames(map[int64]string{
RELEASE_FLUSH: "FLUSH", RELEASE_FLUSH: "FLUSH",
} })
openFlagNames = map[int64]string{ openFlagNames = newFlagNames(map[int64]string{
int64(os.O_WRONLY): "WRONLY", int64(os.O_WRONLY): "WRONLY",
int64(os.O_RDWR): "RDWR", int64(os.O_RDWR): "RDWR",
int64(os.O_APPEND): "APPEND", int64(os.O_APPEND): "APPEND",
...@@ -64,38 +64,81 @@ var ( ...@@ -64,38 +64,81 @@ var (
int64(syscall.O_CLOEXEC): "CLOEXEC", int64(syscall.O_CLOEXEC): "CLOEXEC",
int64(syscall.O_DIRECTORY): "DIRECTORY", int64(syscall.O_DIRECTORY): "DIRECTORY",
} })
fuseOpenFlagNames = map[int64]string{ fuseOpenFlagNames = newFlagNames(map[int64]string{
FOPEN_DIRECT_IO: "DIRECT", FOPEN_DIRECT_IO: "DIRECT",
FOPEN_KEEP_CACHE: "CACHE", FOPEN_KEEP_CACHE: "CACHE",
FOPEN_NONSEEKABLE: "NONSEEK", FOPEN_NONSEEKABLE: "NONSEEK",
FOPEN_CACHE_DIR: "CACHE_DIR", FOPEN_CACHE_DIR: "CACHE_DIR",
FOPEN_STREAM: "STREAM", FOPEN_STREAM: "STREAM",
} })
accessFlagName = map[int64]string{ accessFlagName = newFlagNames(map[int64]string{
X_OK: "x", X_OK: "x",
W_OK: "w", W_OK: "w",
R_OK: "r", R_OK: "r",
} })
getAttrFlagNames = map[int64]string{ getAttrFlagNames = newFlagNames(map[int64]string{
FUSE_GETATTR_FH: "FH", FUSE_GETATTR_FH: "FH",
} })
) )
func flagString(names map[int64]string, fl int64, def string) string { // flagNames associate flag bits to their names.
type flagNames [64]flagNameEntry
// flagNameEntry describes one flag value.
//
// Usually a flag constitues only one bit, but, for example at least O_SYNC and
// O_TMPFILE are represented by a value with two bits set. To handle such
// situations we map all bits of a flag to the same flagNameEntry.
type flagNameEntry struct {
bits int64
name string
}
// newFlagNames creates flagNames from flag->name map.
func newFlagNames(names map[int64]string) *flagNames {
var v flagNames
for flag, name := range names {
v.set(flag, name)
}
return &v
}
// set associates flag value with name.
func (names *flagNames) set(flag int64, name string) {
entry := flagNameEntry{bits: flag, name: name}
for i := 0; i < 64; i++ {
if flag&(1<<i) != 0 {
if ie := names[i]; ie.bits != 0 {
panic(fmt.Sprintf("%s (%x) overlaps with %s (%x)", name, flag, ie.name, ie.bits))
}
names[i] = entry
}
}
}
func flagString(names *flagNames, fl int64, def string) string {
s := []string{} s := []string{}
for k, v := range names { // emit flags in their numeric order
if fl&k != 0 { for i := range names {
s = append(s, v) entry := &names[i]
fl ^= k if entry.bits == 0 {
continue
}
if fl&entry.bits == entry.bits {
s = append(s, entry.name)
fl ^= entry.bits
if fl == 0 {
break
} }
} }
if len(s) == 0 && def != "" {
s = []string{def}
} }
if fl != 0 { if fl != 0 {
s = append(s, fmt.Sprintf("0x%x", fl)) s = append(s, fmt.Sprintf("0x%x", fl))
} }
if len(s) == 0 && def != "" {
return def
}
return strings.Join(s, ",") return strings.Join(s, ",")
} }
......
...@@ -9,9 +9,9 @@ import ( ...@@ -9,9 +9,9 @@ import (
) )
func init() { func init() {
initFlagNames[CAP_XTIMES] = "XTIMES" initFlagNames.set(CAP_XTIMES, "XTIMES")
initFlagNames[CAP_VOL_RENAME] = "VOL_RENAME" initFlagNames.set(CAP_VOL_RENAME, "VOL_RENAME")
initFlagNames[CAP_CASE_INSENSITIVE] = "CASE_INSENSITIVE" initFlagNames.set(CAP_CASE_INSENSITIVE, "CASE_INSENSITIVE")
} }
func (a *Attr) string() string { func (a *Attr) string() string {
......
...@@ -10,9 +10,9 @@ import ( ...@@ -10,9 +10,9 @@ import (
) )
func init() { func init() {
openFlagNames[syscall.O_DIRECT] = "DIRECT" openFlagNames.set(syscall.O_DIRECT, "DIRECT")
openFlagNames[syscall.O_LARGEFILE] = "LARGEFILE" openFlagNames.set(syscall.O_LARGEFILE, "LARGEFILE")
openFlagNames[syscall_O_NOATIME] = "NOATIME" openFlagNames.set(syscall_O_NOATIME, "NOATIME")
} }
func (a *Attr) string() string { func (a *Attr) string() string {
......
// Copyright 2023 the Go-FUSE Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fuse
import (
"testing"
)
// verify that flagString always formats flags in the same order.
func TestFlagStringOrder(t *testing.T) {
var flags int64 = CAP_ASYNC_READ | CAP_SPLICE_WRITE | CAP_READDIRPLUS | CAP_MAX_PAGES | CAP_EXPLICIT_INVAL_DATA
want := "ASYNC_READ,SPLICE_WRITE,READDIRPLUS,MAX_PAGES,EXPLICIT_INVAL_DATA"
// loop many times to check for sure the order is untied from map iteration order
for i := 0; i < 100; i++ {
have := flagString(initFlagNames, flags, "")
if have != want {
t.Fatalf("flagString:\nhave: %q\nwant: %q", have, want)
}
}
}
// verify how flagString handles provided default.
func TestFlagStringDefault(t *testing.T) {
names := newFlagNames(map[int64]string{
1: "AAA",
2: "BBB",
4: "CCC",
})
testv := []struct {
flags int64
def string
strok string
}{
{0, "", ""},
{0, "X", "X"},
{1, "X", "AAA"},
{5, "X", "AAA,CCC"},
{8, "X", "0x8"},
{9, "X", "AAA,0x8"},
}
for _, test := range testv {
str := flagString(names, test.flags, test.def)
if str != test.strok {
t.Errorf("flagString(%x, %q) -> got %q ; want %q",
test.flags, test.def, str, test.strok)
}
}
}
...@@ -648,10 +648,13 @@ func TestLargeDirRead(t *testing.T) { ...@@ -648,10 +648,13 @@ func TestLargeDirRead(t *testing.T) {
nameSet := make(map[string]bool) nameSet := make(map[string]bool)
for i := 0; i < created; i++ { for i := 0; i < created; i++ {
// Should vary file name length. // Should vary file name length.
base := fmt.Sprintf("file%d%s", i, base := fmt.Sprintf("file%d.%s", i,
randomLengthString(len(longname))) randomLengthString(len(longname)))
name := filepath.Join(subdir, base) name := filepath.Join(subdir, base)
if nameSet[base] {
panic(fmt.Sprintf("duplicate name %q", base))
}
nameSet[base] = true nameSet[base] = true
tc.WriteFile(name, []byte("bla"), 0777) tc.WriteFile(name, []byte("bla"), 0777)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment