Commit debf798b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://oss.sgi.com:8090/oss/git/xfs-2.6

* git://oss.sgi.com:8090/oss/git/xfs-2.6: (71 commits)
  [XFS] Sync up one/two other minor changes missed in previous merges.
  [XFS] Reenable the noikeep (delete inode cluster space) option by default.
  [XFS] Check that a page has dirty buffers before finding it acceptable for
  [XFS] Fixup naming inconsistencies found by Pekka Enberg and one from Jan
  [XFS] Explain the race closed by the addition of vn_iowait() to the start
  [XFS] Fixing the error caused by the conflict between DIO Write's
  [XFS] Fixing KDB's xrwtrc command, also added the current process id into
  [XFS] Fix compiler warning from xfs_file_compat_invis_ioctl prototype. 
  [XFS] remove bogus INT_GET for u8 variables in xfs_dir_leaf.c 
  [XFS] endianess annotations for xfs_da_node_hdr_t 
  [XFS] endianess annotations for xfs_da_node_entry_t 
  [XFS] store xfs_attr_inactive_list_t in native endian 
  [XFS] store xfs_attr_sf_sort in native endian 
  [XFS] endianess annotations for xfs_attr_shortform_t 
  [XFS] endianess annotations for xfs_attr_leaf_name_remote_t 
  [XFS] endianess annotations for xfs_attr_leaf_name_local_t 
  [XFS] endianess annotations for xfs_attr_leaf_entry_t 
  [XFS] endianess annotations for xfs_attr_leaf_hdr_t 
  [XFS] remove bogus INT_GET on u8 variables in xfs_dir2_block.c 
  [XFS] endianess annotations for xfs_da_blkinfo_t 
  ...
parents b0e6e962 4d74f423
#
# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
# Copyright (c) 2000-2005 Silicon Graphics, Inc.
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of version 2 of the GNU General Public License as
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
#
# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
......@@ -36,7 +22,7 @@ XFS_LINUX := linux-2.6
ifeq ($(CONFIG_XFS_DEBUG),y)
EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING
endif
ifeq ($(CONFIG_XFS_TRACE),y)
EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
......@@ -50,7 +36,7 @@ ifeq ($(CONFIG_XFS_TRACE),y)
EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
EXTRA_CFLAGS += -DXFS_LOG_TRACE
EXTRA_CFLAGS += -DXFS_RW_TRACE
EXTRA_CFLAGS += -DPAGEBUF_TRACE
EXTRA_CFLAGS += -DXFS_BUF_TRACE
EXTRA_CFLAGS += -DXFS_VNODE_TRACE
endif
......
......@@ -23,17 +23,8 @@
#include <linux/mm.h>
/*
* memory management routines
* Process flags handling
*/
#define KM_SLEEP 0x0001u
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
typedef unsigned long xfs_pflags_t;
#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
......@@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t;
*(NSTATEP) = *(OSTATEP); \
} while (0)
static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags)
/*
* General memory allocation interfaces
*/
#define KM_SLEEP 0x0001u
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
/*
* We use a special process flag to avoid recursive callbacks into
* the filesystem during transactions. We will also issue our own
* warnings, so we explicitly skip any generic ones (silly of us).
*/
static inline gfp_t
kmem_flags_convert(unsigned int __nocast flags)
{
gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */
gfp_t lflags;
#ifdef DEBUG
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
printk(KERN_WARNING
"XFS: memory allocation with wrong flags (%x)\n", flags);
BUG();
}
#endif
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
if (flags & KM_NOSLEEP) {
lflags |= GFP_ATOMIC;
lflags = GFP_ATOMIC | __GFP_NOWARN;
} else {
lflags |= GFP_KERNEL;
/* avoid recusive callbacks to filesystem during transactions */
lflags = GFP_KERNEL | __GFP_NOWARN;
if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
lflags &= ~__GFP_FS;
}
return lflags;
}
static __inline kmem_zone_t *
extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t);
/*
* Zone interfaces
*/
#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
#define KM_ZONE_SPREAD 0
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
static inline kmem_zone_t *
kmem_zone_init(int size, char *zone_name)
{
return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
}
static __inline void
static inline kmem_zone_t *
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
void (*construct)(void *, kmem_zone_t *, unsigned long))
{
return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
}
static inline void
kmem_zone_free(kmem_zone_t *zone, void *ptr)
{
kmem_cache_free(zone, ptr);
}
static __inline void
static inline void
kmem_zone_destroy(kmem_zone_t *zone)
{
if (zone && kmem_cache_destroy(zone))
BUG();
}
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t);
/*
* Low memory cache shrinkers
*/
typedef struct shrinker *kmem_shaker_t;
typedef int (*kmem_shake_func_t)(int, gfp_t);
static __inline kmem_shaker_t
static inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
return set_shrinker(DEFAULT_SEEKS, sfunc);
}
static __inline void
static inline void
kmem_shake_deregister(kmem_shaker_t shrinker)
{
remove_shrinker(shrinker);
}
static __inline int
static inline int
kmem_shake_allow(gfp_t gfp_mask)
{
return (gfp_mask & __GFP_WAIT);
......
This diff is collapsed.
......@@ -40,7 +40,7 @@ typedef struct xfs_ioend {
struct work_struct io_work; /* xfsdatad work queue */
} xfs_ioend_t;
extern struct address_space_operations linvfs_aops;
extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern struct address_space_operations xfs_address_space_operations;
extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
#endif /* __XFS_IOPS_H__ */
......@@ -1806,13 +1806,12 @@ xfs_flush_buftarg(
int __init
xfs_buf_init(void)
{
int error = -ENOMEM;
#ifdef XFS_BUF_TRACE
xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
#endif
xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
KM_ZONE_HWALIGN, NULL);
if (!xfs_buf_zone)
goto out_free_trace_buf;
......@@ -1840,7 +1839,7 @@ xfs_buf_init(void)
#ifdef XFS_BUF_TRACE
ktrace_free(xfs_buf_trace_buf);
#endif
return error;
return -ENOMEM;
}
void
......
......@@ -25,6 +25,8 @@
#include "xfs_mount.h"
#include "xfs_export.h"
STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
/*
* XFS encodes and decodes the fileid portion of NFS filehandles
* itself instead of letting the generic NFS code do it. This
......@@ -37,7 +39,7 @@
*/
STATIC struct dentry *
linvfs_decode_fh(
xfs_fs_decode_fh(
struct super_block *sb,
__u32 *fh,
int fh_len,
......@@ -78,12 +80,12 @@ linvfs_decode_fh(
}
fh = (__u32 *)&ifid;
return find_exported_dentry(sb, fh, parent, acceptable, context);
return sb->s_export_op->find_exported_dentry(sb, fh, parent, acceptable, context);
}
STATIC int
linvfs_encode_fh(
xfs_fs_encode_fh(
struct dentry *dentry,
__u32 *fh,
int *max_len,
......@@ -95,7 +97,7 @@ linvfs_encode_fh(
int len;
int is64 = 0;
#if XFS_BIG_INUMS
vfs_t *vfs = LINVFS_GET_VFS(inode->i_sb);
vfs_t *vfs = vfs_from_sb(inode->i_sb);
if (!(vfs->vfs_flag & VFS_32BITINODES)) {
/* filesystem may contain 64bit inode numbers */
......@@ -130,21 +132,21 @@ linvfs_encode_fh(
}
STATIC struct dentry *
linvfs_get_dentry(
xfs_fs_get_dentry(
struct super_block *sb,
void *data)
{
vnode_t *vp;
struct inode *inode;
struct dentry *result;
vfs_t *vfsp = LINVFS_GET_VFS(sb);
vfs_t *vfsp = vfs_from_sb(sb);
int error;
VFS_VGET(vfsp, &vp, (fid_t *)data, error);
if (error || vp == NULL)
return ERR_PTR(-ESTALE) ;
inode = LINVFS_GET_IP(vp);
inode = vn_to_inode(vp);
result = d_alloc_anon(inode);
if (!result) {
iput(inode);
......@@ -154,25 +156,20 @@ linvfs_get_dentry(
}
STATIC struct dentry *
linvfs_get_parent(
xfs_fs_get_parent(
struct dentry *child)
{
int error;
vnode_t *vp, *cvp;
struct dentry *parent;
struct dentry dotdot;
dotdot.d_name.name = "..";
dotdot.d_name.len = 2;
dotdot.d_inode = NULL;
cvp = NULL;
vp = LINVFS_GET_VP(child->d_inode);
vp = vn_from_inode(child->d_inode);
VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error);
if (unlikely(error))
return ERR_PTR(-error);
parent = d_alloc_anon(LINVFS_GET_IP(cvp));
parent = d_alloc_anon(vn_to_inode(cvp));
if (unlikely(!parent)) {
VN_RELE(cvp);
return ERR_PTR(-ENOMEM);
......@@ -180,9 +177,9 @@ linvfs_get_parent(
return parent;
}
struct export_operations linvfs_export_ops = {
.decode_fh = linvfs_decode_fh,
.encode_fh = linvfs_encode_fh,
.get_parent = linvfs_get_parent,
.get_dentry = linvfs_get_dentry,
struct export_operations xfs_export_operations = {
.decode_fh = xfs_fs_decode_fh,
.encode_fh = xfs_fs_encode_fh,
.get_parent = xfs_fs_get_parent,
.get_dentry = xfs_fs_get_dentry,
};
This diff is collapsed.
......@@ -57,7 +57,7 @@ fs_tosspages(
int fiopt)
{
vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = LINVFS_GET_IP(vp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp))
truncate_inode_pages(ip->i_mapping, first);
......@@ -76,7 +76,7 @@ fs_flushinval_pages(
int fiopt)
{
vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = LINVFS_GET_IP(vp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp)) {
filemap_write_and_wait(ip->i_mapping);
......@@ -98,7 +98,7 @@ fs_flush_pages(
int fiopt)
{
vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = LINVFS_GET_IP(vp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp)) {
filemap_fdatawrite(ip->i_mapping);
......
......@@ -138,7 +138,7 @@ xfs_find_handle(
}
/* we need the vnode */
vp = LINVFS_GET_VP(inode);
vp = vn_from_inode(inode);
/* now we can grab the fsid */
memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
......@@ -256,7 +256,7 @@ xfs_vget_fsop_handlereq(
}
vpp = XFS_ITOV(ip);
inodep = LINVFS_GET_IP(vpp);
inodep = vn_to_inode(vpp);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
*vp = vpp;
......@@ -344,7 +344,7 @@ xfs_open_by_handle(
return -XFS_ERROR(-PTR_ERR(filp));
}
if (inode->i_mode & S_IFREG)
filp->f_op = &linvfs_invis_file_operations;
filp->f_op = &xfs_invis_file_operations;
fd_install(new_fd, filp);
return new_fd;
......@@ -715,7 +715,7 @@ xfs_ioctl(
xfs_inode_t *ip;
xfs_mount_t *mp;
vp = LINVFS_GET_VP(inode);
vp = vn_from_inode(inode);
vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
......@@ -1160,105 +1160,129 @@ xfs_ioc_xattr(
void __user *arg)
{
struct fsxattr fa;
vattr_t va;
int error;
struct vattr *vattr;
int error = 0;
int attr_flags;
unsigned int flags;
vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
if (unlikely(!vattr))
return -ENOMEM;
switch (cmd) {
case XFS_IOC_FSGETXATTR: {
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
XFS_AT_NEXTENTS | XFS_AT_PROJID;
VOP_GETATTR(vp, &va, 0, NULL, error);
if (error)
return -error;
VOP_GETATTR(vp, vattr, 0, NULL, error);
if (unlikely(error)) {
error = -error;
break;
}
fa.fsx_xflags = va.va_xflags;
fa.fsx_extsize = va.va_extsize;
fa.fsx_nextents = va.va_nextents;
fa.fsx_projid = va.va_projid;
fa.fsx_xflags = vattr->va_xflags;
fa.fsx_extsize = vattr->va_extsize;
fa.fsx_nextents = vattr->va_nextents;
fa.fsx_projid = vattr->va_projid;
if (copy_to_user(arg, &fa, sizeof(fa)))
return -XFS_ERROR(EFAULT);
return 0;
if (copy_to_user(arg, &fa, sizeof(fa))) {
error = -EFAULT;
break;
}
break;
}
case XFS_IOC_FSSETXATTR: {
if (copy_from_user(&fa, arg, sizeof(fa)))
return -XFS_ERROR(EFAULT);
if (copy_from_user(&fa, arg, sizeof(fa))) {
error = -EFAULT;
break;
}
attr_flags = 0;
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= ATTR_NONBLOCK;
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
va.va_xflags = fa.fsx_xflags;
va.va_extsize = fa.fsx_extsize;
va.va_projid = fa.fsx_projid;
vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
vattr->va_xflags = fa.fsx_xflags;
vattr->va_extsize = fa.fsx_extsize;
vattr->va_projid = fa.fsx_projid;
VOP_SETATTR(vp, &va, attr_flags, NULL, error);
if (!error)
vn_revalidate(vp); /* update Linux inode flags */
return -error;
VOP_SETATTR(vp, vattr, attr_flags, NULL, error);
if (likely(!error))
__vn_revalidate(vp, vattr); /* update flags */
error = -error;
break;
}
case XFS_IOC_FSGETXATTRA: {
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
XFS_AT_ANEXTENTS | XFS_AT_PROJID;
VOP_GETATTR(vp, &va, 0, NULL, error);
if (error)
return -error;
VOP_GETATTR(vp, vattr, 0, NULL, error);
if (unlikely(error)) {
error = -error;
break;
}
fa.fsx_xflags = va.va_xflags;
fa.fsx_extsize = va.va_extsize;
fa.fsx_nextents = va.va_anextents;
fa.fsx_projid = va.va_projid;
fa.fsx_xflags = vattr->va_xflags;
fa.fsx_extsize = vattr->va_extsize;
fa.fsx_nextents = vattr->va_anextents;
fa.fsx_projid = vattr->va_projid;
if (copy_to_user(arg, &fa, sizeof(fa)))
return -XFS_ERROR(EFAULT);
return 0;
if (copy_to_user(arg, &fa, sizeof(fa))) {
error = -EFAULT;
break;
}
break;
}
case XFS_IOC_GETXFLAGS: {
flags = xfs_di2lxflags(ip->i_d.di_flags);
if (copy_to_user(arg, &flags, sizeof(flags)))
return -XFS_ERROR(EFAULT);
return 0;
error = -EFAULT;
break;
}
case XFS_IOC_SETXFLAGS: {
if (copy_from_user(&flags, arg, sizeof(flags)))
return -XFS_ERROR(EFAULT);
if (copy_from_user(&flags, arg, sizeof(flags))) {
error = -EFAULT;
break;
}
if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
LINUX_XFLAG_SYNC))
return -XFS_ERROR(EOPNOTSUPP);
LINUX_XFLAG_SYNC)) {
error = -EOPNOTSUPP;
break;
}
attr_flags = 0;
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= ATTR_NONBLOCK;
va.va_mask = XFS_AT_XFLAGS;
va.va_xflags = xfs_merge_ioc_xflags(flags,
vattr->va_mask = XFS_AT_XFLAGS;
vattr->va_xflags = xfs_merge_ioc_xflags(flags,
xfs_ip2xflags(ip));
VOP_SETATTR(vp, &va, attr_flags, NULL, error);
if (!error)
vn_revalidate(vp); /* update Linux inode flags */
return -error;
VOP_SETATTR(vp, vattr, attr_flags, NULL, error);
if (likely(!error))
__vn_revalidate(vp, vattr); /* update flags */
error = -error;
break;
}
case XFS_IOC_GETVERSION: {
flags = LINVFS_GET_IP(vp)->i_generation;
flags = vn_to_inode(vp)->i_generation;
if (copy_to_user(arg, &flags, sizeof(flags)))
return -XFS_ERROR(EFAULT);
return 0;
error = -EFAULT;
break;
}
default:
return -ENOTTY;
error = -ENOTTY;
break;
}
kfree(vattr);
return error;
}
STATIC int
......
......@@ -107,11 +107,11 @@ xfs_ioctl32_bulkstat(
#endif
STATIC long
__linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
{
int error;
struct inode *inode = f->f_dentry->d_inode;
vnode_t *vp = LINVFS_GET_VP(inode);
vnode_t *vp = vn_to_inode(inode);
switch (cmd) {
case XFS_IOC_DIOINFO:
......@@ -196,19 +196,19 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
}
long
linvfs_compat_ioctl(
xfs_file_compat_ioctl(
struct file *f,
unsigned cmd,
unsigned long arg)
{
return __linvfs_compat_ioctl(0, f, cmd, arg);
return xfs_compat_ioctl(0, f, cmd, arg);
}
long
linvfs_compat_invis_ioctl(
xfs_file_compat_invis_ioctl(
struct file *f,
unsigned cmd,
unsigned long arg)
{
return __linvfs_compat_ioctl(IO_INVIS, f, cmd, arg);
return xfs_compat_ioctl(IO_INVIS, f, cmd, arg);
}
......@@ -18,7 +18,7 @@
#ifndef __XFS_IOCTL32_H__
#define __XFS_IOCTL32_H__
extern long linvfs_compat_ioctl(struct file *, unsigned, unsigned long);
extern long linvfs_compat_invis_ioctl(struct file *f, unsigned, unsigned long);
extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long);
extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long);
#endif /* __XFS_IOCTL32_H__ */
This diff is collapsed.
......@@ -18,13 +18,13 @@
#ifndef __XFS_IOPS_H__
#define __XFS_IOPS_H__
extern struct inode_operations linvfs_file_inode_operations;
extern struct inode_operations linvfs_dir_inode_operations;
extern struct inode_operations linvfs_symlink_inode_operations;
extern struct inode_operations xfs_inode_operations;
extern struct inode_operations xfs_dir_inode_operations;
extern struct inode_operations xfs_symlink_inode_operations;
extern struct file_operations linvfs_file_operations;
extern struct file_operations linvfs_invis_file_operations;
extern struct file_operations linvfs_dir_operations;
extern struct file_operations xfs_file_operations;
extern struct file_operations xfs_dir_file_operations;
extern struct file_operations xfs_invis_file_operations;
extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
int, unsigned int, void __user *);
......
......@@ -73,6 +73,9 @@
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/sort.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <asm/page.h>
#include <asm/div64.h>
......@@ -100,6 +103,11 @@
*/
#undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */
#define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */
#ifdef CONFIG_SMP
#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
#else
#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
#endif
/*
* State flag for unwritten extent buffers.
......@@ -226,7 +234,7 @@ BUFFER_FNS(PrivateStart, unwritten);
#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
#define xfs_stack_trace() dump_stack()
#define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
(-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off)))
#define xfs_statvfs_fsid(statp, mp) \
({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
__kernel_fsid_t *fsid = &(statp)->f_fsid; \
......
......@@ -83,7 +83,7 @@ xfs_rw_enter_trace(
(void *)((unsigned long)ioflags),
(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
(void *)NULL,
(void *)((unsigned long)current_pid()),
(void *)NULL,
(void *)NULL,
(void *)NULL,
......@@ -113,7 +113,7 @@ xfs_inval_cached_trace(
(void *)((unsigned long)(first & 0xffffffff)),
(void *)((unsigned long)((last >> 32) & 0xffffffff)),
(void *)((unsigned long)(last & 0xffffffff)),
(void *)NULL,
(void *)((unsigned long)current_pid()),
(void *)NULL,
(void *)NULL,
(void *)NULL,
......@@ -249,9 +249,8 @@ xfs_read(
if (n < size)
size = n;
if (XFS_FORCED_SHUTDOWN(mp)) {
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
}
if (unlikely(ioflags & IO_ISDIRECT))
mutex_lock(&inode->i_mutex);
......@@ -267,10 +266,14 @@ xfs_read(
dmflags, &locktype);
if (ret) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
goto unlock_isem;
goto unlock_mutex;
}
}
if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp)))
VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)),
-1, FI_REMAPF_LOCKED);
xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
(void *)iovp, segs, *offset, ioflags);
ret = __generic_file_aio_read(iocb, iovp, segs, offset);
......@@ -281,7 +284,7 @@ xfs_read(
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
unlock_isem:
unlock_mutex:
if (unlikely(ioflags & IO_ISDIRECT))
mutex_unlock(&inode->i_mutex);
return ret;
......@@ -432,7 +435,7 @@ xfs_zero_eof(
xfs_fsize_t isize, /* current inode size */
xfs_fsize_t end_size) /* terminal inode size */
{
struct inode *ip = LINVFS_GET_IP(vp);
struct inode *ip = vn_to_inode(vp);
xfs_fileoff_t start_zero_fsb;
xfs_fileoff_t end_zero_fsb;
xfs_fileoff_t zero_count_fsb;
......@@ -573,7 +576,7 @@ xfs_write(
vrwlock_t locktype;
size_t ocount = 0, count;
loff_t pos;
int need_isem = 1, need_flush = 0;
int need_i_mutex = 1, need_flush = 0;
XFS_STATS_INC(xs_write_calls);
......@@ -622,14 +625,14 @@ xfs_write(
return XFS_ERROR(-EINVAL);
if (!VN_CACHED(vp) && pos < i_size_read(inode))
need_isem = 0;
need_i_mutex = 0;
if (VN_CACHED(vp))
need_flush = 1;
}
relock:
if (need_isem) {
if (need_i_mutex) {
iolock = XFS_IOLOCK_EXCL;
locktype = VRWLOCK_WRITE;
......@@ -651,7 +654,7 @@ xfs_write(
S_ISBLK(inode->i_mode));
if (error) {
xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
goto out_unlock_isem;
goto out_unlock_mutex;
}
new_size = pos + count;
......@@ -663,7 +666,7 @@ xfs_write(
loff_t savedsize = pos;
int dmflags = FILP_DELAY_FLAG(file);
if (need_isem)
if (need_i_mutex)
dmflags |= DM_FLAGS_IMUX;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
......@@ -672,7 +675,7 @@ xfs_write(
dmflags, &locktype);
if (error) {
xfs_iunlock(xip, iolock);
goto out_unlock_isem;
goto out_unlock_mutex;
}
xfs_ilock(xip, XFS_ILOCK_EXCL);
eventsent = 1;
......@@ -710,7 +713,7 @@ xfs_write(
isize, pos + count);
if (error) {
xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
goto out_unlock_isem;
goto out_unlock_mutex;
}
}
xfs_iunlock(xip, XFS_ILOCK_EXCL);
......@@ -731,7 +734,7 @@ xfs_write(
error = -remove_suid(file->f_dentry);
if (unlikely(error)) {
xfs_iunlock(xip, iolock);
goto out_unlock_isem;
goto out_unlock_mutex;
}
}
......@@ -747,14 +750,14 @@ xfs_write(
-1, FI_REMAPF_LOCKED);
}
if (need_isem) {
if (need_i_mutex) {
/* demote the lock now the cached pages are gone */
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
mutex_unlock(&inode->i_mutex);
iolock = XFS_IOLOCK_SHARED;
locktype = VRWLOCK_WRITE_DIRECT;
need_isem = 0;
need_i_mutex = 0;
}
xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
......@@ -772,7 +775,7 @@ xfs_write(
pos += ret;
count -= ret;
need_isem = 1;
need_i_mutex = 1;
ioflags &= ~IO_ISDIRECT;
xfs_iunlock(xip, iolock);
goto relock;
......@@ -794,14 +797,14 @@ xfs_write(
!(ioflags & IO_INVIS)) {
xfs_rwunlock(bdp, locktype);
if (need_isem)
if (need_i_mutex)
mutex_unlock(&inode->i_mutex);
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
goto out_nounlocks;
if (need_isem)
if (need_i_mutex)
mutex_lock(&inode->i_mutex);
xfs_rwlock(bdp, locktype);
pos = xip->i_d.di_size;
......@@ -907,7 +910,7 @@ xfs_write(
}
xfs_rwunlock(bdp, locktype);
if (need_isem)
if (need_i_mutex)
mutex_unlock(&inode->i_mutex);
error = sync_page_range(inode, mapping, pos, ret);
......@@ -918,8 +921,8 @@ xfs_write(
out_unlock_internal:
xfs_rwunlock(bdp, locktype);
out_unlock_isem:
if (need_isem)
out_unlock_mutex:
if (need_i_mutex)
mutex_unlock(&inode->i_mutex);
out_nounlocks:
return -error;
......
This diff is collapsed.
......@@ -98,11 +98,6 @@ extern void xfs_qm_exit(void);
XFS_DMAPI_STRING \
XFS_DBG_STRING /* DBG must be last */
#define LINVFS_GET_VFS(s) \
(vfs_t *)((s)->s_fs_info)
#define LINVFS_SET_VFS(s, vfsp) \
((s)->s_fs_info = vfsp)
struct xfs_inode;
struct xfs_mount;
struct xfs_buftarg;
......@@ -120,6 +115,6 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *,
extern void xfs_blkdev_put(struct block_device *);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern struct export_operations linvfs_export_ops;
extern struct export_operations xfs_export_operations;
#endif /* __XFS_SUPER_H__ */
......@@ -227,7 +227,8 @@ vfs_freeze(
}
vfs_t *
vfs_allocate( void )
vfs_allocate(
struct super_block *sb)
{
struct vfs *vfsp;
......@@ -236,9 +237,23 @@ vfs_allocate( void )
INIT_LIST_HEAD(&vfsp->vfs_sync_list);
spin_lock_init(&vfsp->vfs_sync_lock);
init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
vfsp->vfs_super = sb;
sb->s_fs_info = vfsp;
if (sb->s_flags & MS_RDONLY)
vfsp->vfs_flag |= VFS_RDONLY;
return vfsp;
}
vfs_t *
vfs_from_sb(
struct super_block *sb)
{
return (vfs_t *)sb->s_fs_info;
}
void
vfs_deallocate(
struct vfs *vfsp)
......@@ -295,7 +310,7 @@ bhv_remove_all_vfsops(
bhv_remove_vfsops(vfsp, VFS_POSITION_DM);
if (!freebase)
return;
mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops));
mp = XFS_VFSTOM(vfsp);
VFS_REMOVEBHV(vfsp, &mp->m_bhv);
xfs_mount_free(mp, 0);
}
......
......@@ -193,7 +193,8 @@ typedef struct bhv_vfsops {
#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o))
#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL )
extern vfs_t *vfs_allocate(void);
extern vfs_t *vfs_allocate(struct super_block *);
extern vfs_t *vfs_from_sb(struct super_block *);
extern void vfs_deallocate(vfs_t *);
extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
......
......@@ -58,7 +58,7 @@ struct vnode *
vn_initialize(
struct inode *inode)
{
struct vnode *vp = LINVFS_GET_VP(inode);
struct vnode *vp = vn_from_inode(inode);
XFS_STATS_INC(vn_active);
XFS_STATS_INC(vn_alloc);
......@@ -83,7 +83,7 @@ vn_initialize(
vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
#endif /* XFS_VNODE_TRACE */
vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address);
vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address);
return vp;
}
......@@ -97,7 +97,7 @@ vn_revalidate_core(
struct vnode *vp,
vattr_t *vap)
{
struct inode *inode = LINVFS_GET_IP(vp);
struct inode *inode = vn_to_inode(vp);
inode->i_mode = vap->va_mode;
inode->i_nlink = vap->va_nlink;
......@@ -129,24 +129,31 @@ vn_revalidate_core(
* Revalidate the Linux inode from the vnode.
*/
int
vn_revalidate(
struct vnode *vp)
__vn_revalidate(
struct vnode *vp,
struct vattr *vattr)
{
vattr_t va;
int error;
vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address);
ASSERT(vp->v_fbhv != NULL);
va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS;
VOP_GETATTR(vp, &va, 0, NULL, error);
if (!error) {
vn_revalidate_core(vp, &va);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS;
VOP_GETATTR(vp, vattr, 0, NULL, error);
if (likely(!error)) {
vn_revalidate_core(vp, vattr);
VUNMODIFY(vp);
}
return -error;
}
int
vn_revalidate(
struct vnode *vp)
{
vattr_t vattr;
return __vn_revalidate(vp, &vattr);
}
/*
* Add a reference to a referenced vnode.
*/
......@@ -159,7 +166,7 @@ vn_hold(
XFS_STATS_INC(vn_hold);
VN_LOCK(vp);
inode = igrab(LINVFS_GET_IP(vp));
inode = igrab(vn_to_inode(vp));
ASSERT(inode);
VN_UNLOCK(vp, 0);
......
......@@ -116,8 +116,14 @@ typedef enum {
/*
* Vnode to Linux inode mapping.
*/
#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode))
#define LINVFS_GET_IP(vp) (&(vp)->v_inode)
static inline struct vnode *vn_from_inode(struct inode *inode)
{
return (vnode_t *)list_entry(inode, vnode_t, v_inode);
}
static inline struct inode *vn_to_inode(struct vnode *vnode)
{
return &vnode->v_inode;
}
/*
* Vnode flags.
......@@ -490,6 +496,7 @@ typedef struct vnode_map {
(vmap).v_ino = (vp)->v_inode.i_ino; }
extern int vn_revalidate(struct vnode *);
extern int __vn_revalidate(struct vnode *, vattr_t *);
extern void vn_revalidate_core(struct vnode *, vattr_t *);
extern void vn_iowait(struct vnode *vp);
......@@ -497,7 +504,7 @@ extern void vn_iowake(struct vnode *vp);
static inline int vn_count(struct vnode *vp)
{
return atomic_read(&LINVFS_GET_IP(vp)->i_count);
return atomic_read(&vn_to_inode(vp)->i_count);
}
/*
......@@ -511,16 +518,16 @@ extern vnode_t *vn_hold(struct vnode *);
vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address))
#define VN_RELE(vp) \
(vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \
iput(LINVFS_GET_IP(vp)))
iput(vn_to_inode(vp)))
#else
#define VN_HOLD(vp) ((void)vn_hold(vp))
#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp)))
#define VN_RELE(vp) (iput(vn_to_inode(vp)))
#endif
static inline struct vnode *vn_grab(struct vnode *vp)
{
struct inode *inode = igrab(LINVFS_GET_IP(vp));
return inode ? LINVFS_GET_VP(inode) : NULL;
struct inode *inode = igrab(vn_to_inode(vp));
return inode ? vn_from_inode(inode) : NULL;
}
/*
......@@ -528,7 +535,7 @@ static inline struct vnode *vn_grab(struct vnode *vp)
*/
#define VNAME(dentry) ((char *) (dentry)->d_name.name)
#define VNAMELEN(dentry) ((dentry)->d_name.len)
#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode))
#define VNAME_TO_VNODE(dentry) (vn_from_inode((dentry)->d_inode))
/*
* Vnode spinlock manipulation.
......@@ -557,12 +564,12 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
*/
static inline void vn_mark_bad(struct vnode *vp)
{
make_bad_inode(LINVFS_GET_IP(vp));
make_bad_inode(vn_to_inode(vp));
}
static inline int VN_BAD(struct vnode *vp)
{
return is_bad_inode(LINVFS_GET_IP(vp));
return is_bad_inode(vn_to_inode(vp));
}
/*
......@@ -587,9 +594,9 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
/*
* Some useful predicates.
*/
#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping)
#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
PAGECACHE_TAG_DIRTY)
#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED)
#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
......
......@@ -79,9 +79,11 @@ xfs_qm_dquot_logitem_format(
logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
logvec->i_len = sizeof(xfs_dq_logformat_t);
XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
logvec++;
logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
logvec->i_len = sizeof(xfs_disk_dquot_t);
XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
ASSERT(2 == logitem->qli_item.li_desc->lid_size);
logitem->qli_format.qlf_size = 2;
......
......@@ -1704,9 +1704,9 @@ xfs_qm_get_rtblks(
xfs_qcnt_t *O_rtblks)
{
xfs_filblks_t rtblks; /* total rt blks */
xfs_extnum_t idx; /* extent record index */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_extnum_t nextents; /* number of extent entries */
xfs_bmbt_rec_t *base; /* base of extent array */
xfs_bmbt_rec_t *ep; /* pointer to an extent entry */
int error;
......@@ -1717,10 +1717,11 @@ xfs_qm_get_rtblks(
return error;
}
rtblks = 0;
nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
base = &ifp->if_u1.if_extents[0];
for (ep = base; ep < &base[nextents]; ep++)
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
for (idx = 0; idx < nextents; idx++) {
ep = xfs_iext_get_ext(ifp, idx);
rtblks += xfs_bmbt_get_blockcount(ep);
}
*O_rtblks = (xfs_qcnt_t)rtblks;
return 0;
}
......@@ -2788,9 +2789,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
xfs_qm_dqdestroy(dqp);
dqp = nextdqp;
}
/*
* Don't bother about unlocking.
*/
mutex_unlock(&ql->qh_lock);
mutex_destroy(&ql->qh_lock);
ASSERT(ql->qh_nelems == 0);
......
......@@ -374,7 +374,7 @@ xfs_qm_exit(void)
vfs_bhv_clr_custom(&xfs_qmops);
xfs_qm_cleanup_procfs();
if (qm_dqzone)
kmem_cache_destroy(qm_dqzone);
kmem_zone_destroy(qm_dqzone);
if (qm_dqtrxzone)
kmem_cache_destroy(qm_dqtrxzone);
kmem_zone_destroy(qm_dqtrxzone);
}
......@@ -39,8 +39,8 @@ ktrace_init(int zentries)
void
ktrace_uninit(void)
{
kmem_cache_destroy(ktrace_hdr_zone);
kmem_cache_destroy(ktrace_ent_zone);
kmem_zone_destroy(ktrace_hdr_zone);
kmem_zone_destroy(ktrace_ent_zone);
}
/*
......
......@@ -21,13 +21,6 @@ static mutex_t uuid_monitor;
static int uuid_table_size;
static uuid_t *uuid_table;
void
uuid_init(void)
{
mutex_init(&uuid_monitor);
}
/* IRIX interpretation of an uuid_t */
typedef struct {
__be32 uu_timelow;
......@@ -50,7 +43,7 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
be16_to_cpu(uup->uu_timemid);
fsid[1] = be16_to_cpu(uup->uu_timelow);
fsid[1] = be32_to_cpu(uup->uu_timelow);
}
void
......@@ -139,3 +132,9 @@ uuid_table_remove(uuid_t *uuid)
ASSERT(i < uuid_table_size);
mutex_unlock(&uuid_monitor);
}
void
uuid_init(void)
{
mutex_init(&uuid_monitor);
}
......@@ -55,8 +55,8 @@ struct xfs_inode;
extern struct kmem_zone *xfs_acl_zone;
#define xfs_acl_zone_init(zone, name) \
(zone) = kmem_zone_init(sizeof(xfs_acl_t), name)
#define xfs_acl_zone_destroy(zone) kmem_cache_destroy(zone)
(zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *);
extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
......
......@@ -1127,8 +1127,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
return(error);
ASSERT(bp != NULL);
leaf = bp->data;
if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
!= XFS_ATTR_LEAF_MAGIC)) {
if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
xfs_da_brelse(NULL, bp);
......@@ -1541,8 +1540,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
XFS_ATTR_FORK);
if (error)
goto out;
ASSERT(INT_GET(((xfs_attr_leafblock_t *)
bp->data)->hdr.info.magic, ARCH_CONVERT)
ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *)
bp->data)->hdr.info.magic)
== XFS_ATTR_LEAF_MAGIC);
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
......@@ -1763,7 +1762,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return(error);
if (bp) {
node = bp->data;
switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) {
switch (be16_to_cpu(node->hdr.info.magic)) {
case XFS_DA_NODE_MAGIC:
xfs_attr_trace_l_cn("wrong blk", context, node);
xfs_da_brelse(NULL, bp);
......@@ -1771,18 +1770,14 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
break;
case XFS_ATTR_LEAF_MAGIC:
leaf = bp->data;
if (cursor->hashval >
INT_GET(leaf->entries[
INT_GET(leaf->hdr.count,
ARCH_CONVERT)-1].hashval,
ARCH_CONVERT)) {
if (cursor->hashval > be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval)) {
xfs_attr_trace_l_cl("wrong blk",
context, leaf);
xfs_da_brelse(NULL, bp);
bp = NULL;
} else if (cursor->hashval <=
INT_GET(leaf->entries[0].hashval,
ARCH_CONVERT)) {
be32_to_cpu(leaf->entries[0].hashval)) {
xfs_attr_trace_l_cl("maybe wrong blk",
context, leaf);
xfs_da_brelse(NULL, bp);
......@@ -1817,10 +1812,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return(XFS_ERROR(EFSCORRUPTED));
}
node = bp->data;
if (INT_GET(node->hdr.info.magic, ARCH_CONVERT)
if (be16_to_cpu(node->hdr.info.magic)
== XFS_ATTR_LEAF_MAGIC)
break;
if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT)
if (unlikely(be16_to_cpu(node->hdr.info.magic)
!= XFS_DA_NODE_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
XFS_ERRLEVEL_LOW,
......@@ -1830,19 +1825,17 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return(XFS_ERROR(EFSCORRUPTED));
}
btree = node->btree;
for (i = 0;
i < INT_GET(node->hdr.count, ARCH_CONVERT);
for (i = 0; i < be16_to_cpu(node->hdr.count);
btree++, i++) {
if (cursor->hashval
<= INT_GET(btree->hashval,
ARCH_CONVERT)) {
cursor->blkno = INT_GET(btree->before, ARCH_CONVERT);
<= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
xfs_attr_trace_l_cb("descending",
context, btree);
break;
}
}
if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) {
if (i == be16_to_cpu(node->hdr.count)) {
xfs_da_brelse(NULL, bp);
return(0);
}
......@@ -1858,7 +1851,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
*/
for (;;) {
leaf = bp->data;
if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT)
if (unlikely(be16_to_cpu(leaf->hdr.info.magic)
!= XFS_ATTR_LEAF_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
XFS_ERRLEVEL_LOW,
......@@ -1869,7 +1862,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
error = xfs_attr_leaf_list_int(bp, context);
if (error || !leaf->hdr.info.forw)
break; /* not really an error, buffer full or EOF */
cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT);
cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
xfs_da_brelse(NULL, bp);
error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
&bp, XFS_ATTR_FORK);
......@@ -2232,9 +2225,10 @@ xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
: 0,
(__psunsigned_t)context->dupcnt,
(__psunsigned_t)context->flags,
(__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT),
(__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT),
(__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
(__psunsigned_t)be16_to_cpu(node->hdr.count),
(__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
(__psunsigned_t)be32_to_cpu(node->btree[
be16_to_cpu(node->hdr.count)-1].hashval));
}
/*
......@@ -2261,8 +2255,8 @@ xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
: 0,
(__psunsigned_t)context->dupcnt,
(__psunsigned_t)context->flags,
(__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT),
(__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT),
(__psunsigned_t)be32_to_cpu(btree->hashval),
(__psunsigned_t)be32_to_cpu(btree->before),
(__psunsigned_t)NULL);
}
......@@ -2290,9 +2284,10 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
: 0,
(__psunsigned_t)context->dupcnt,
(__psunsigned_t)context->flags,
(__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT),
(__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT),
(__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
(__psunsigned_t)be16_to_cpu(leaf->hdr.count),
(__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
(__psunsigned_t)be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval));
}
/*
......@@ -2522,7 +2517,7 @@ attr_user_capable(
struct vnode *vp,
cred_t *cred)
{
struct inode *inode = LINVFS_GET_IP(vp);
struct inode *inode = vn_to_inode(vp);
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
......@@ -2540,7 +2535,7 @@ attr_trusted_capable(
struct vnode *vp,
cred_t *cred)
{
struct inode *inode = LINVFS_GET_IP(vp);
struct inode *inode = vn_to_inode(vp);
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
......
This diff is collapsed.
......@@ -73,39 +73,39 @@ struct xfs_trans;
#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
__uint16_t base; /* base of free region */
__uint16_t size; /* length of free region */
__be16 base; /* base of free region */
__be16 size; /* length of free region */
} xfs_attr_leaf_map_t;
typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
xfs_da_blkinfo_t info; /* block type, links, etc. */
__uint16_t count; /* count of active leaf_entry's */
__uint16_t usedbytes; /* num bytes of names/values stored */
__uint16_t firstused; /* first used byte in name area */
__uint8_t holes; /* != 0 if blk needs compaction */
__uint8_t pad1;
__be16 count; /* count of active leaf_entry's */
__be16 usedbytes; /* num bytes of names/values stored */
__be16 firstused; /* first used byte in name area */
__u8 holes; /* != 0 if blk needs compaction */
__u8 pad1;
xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
/* N largest free regions */
} xfs_attr_leaf_hdr_t;
typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
xfs_dahash_t hashval; /* hash value of name */
__uint16_t nameidx; /* index into buffer of name/value */
__uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
__uint8_t pad2; /* unused pad byte */
__be32 hashval; /* hash value of name */
__be16 nameidx; /* index into buffer of name/value */
__u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
__u8 pad2; /* unused pad byte */
} xfs_attr_leaf_entry_t;
typedef struct xfs_attr_leaf_name_local {
__uint16_t valuelen; /* number of bytes in value */
__uint8_t namelen; /* length of name bytes */
__uint8_t nameval[1]; /* name/value bytes */
__be16 valuelen; /* number of bytes in value */
__u8 namelen; /* length of name bytes */
__u8 nameval[1]; /* name/value bytes */
} xfs_attr_leaf_name_local_t;
typedef struct xfs_attr_leaf_name_remote {
xfs_dablk_t valueblk; /* block number of value bytes */
__uint32_t valuelen; /* number of bytes in value */
__uint8_t namelen; /* length of name bytes */
__uint8_t name[1]; /* name bytes */
__be32 valueblk; /* block number of value bytes */
__be32 valuelen; /* number of bytes in value */
__u8 namelen; /* length of name bytes */
__u8 name[1]; /* name bytes */
} xfs_attr_leaf_name_remote_t;
typedef struct xfs_attr_leafblock {
......@@ -143,8 +143,8 @@ typedef struct xfs_attr_leafblock {
static inline xfs_attr_leaf_name_remote_t *
xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
{
return (xfs_attr_leaf_name_remote_t *) &((char *)
(leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)];
return (xfs_attr_leaf_name_remote_t *)
&((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
}
#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \
......@@ -152,16 +152,15 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
static inline xfs_attr_leaf_name_local_t *
xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
{
return (xfs_attr_leaf_name_local_t *) &((char *)
(leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)];
return (xfs_attr_leaf_name_local_t *)
&((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
}
#define XFS_ATTR_LEAF_NAME(leafp,idx) \
xfs_attr_leaf_name(leafp,idx)
static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
{
return (&((char *)
(leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]);
return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
}
/*
......
......@@ -32,8 +32,8 @@ struct xfs_inode;
*/
typedef struct xfs_attr_shortform {
struct xfs_attr_sf_hdr { /* constant-structure header block */
__uint16_t totsize; /* total bytes in shortform list */
__uint8_t count; /* count of active entries */
__be16 totsize; /* total bytes in shortform list */
__u8 count; /* count of active entries */
} hdr;
struct xfs_attr_sf_entry {
__uint8_t namelen; /* actual length of name (no NULL) */
......@@ -66,8 +66,8 @@ typedef struct xfs_attr_sf_sort {
#define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \
((xfs_attr_sf_entry_t *)((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep)))
#define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \
(INT_GET(((xfs_attr_shortform_t *) \
((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT))
(be16_to_cpu(((xfs_attr_shortform_t *) \
((dp)->i_afp->if_u1.if_data))->hdr.totsize))
#if defined(XFS_ATTR_TRACE)
/*
......
This diff is collapsed.
......@@ -20,6 +20,7 @@
struct getbmap;
struct xfs_bmbt_irec;
struct xfs_ifork;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
......@@ -347,9 +348,28 @@ xfs_bmap_count_blocks(
*/
int
xfs_check_nostate_extents(
xfs_bmbt_rec_t *ep,
struct xfs_ifork *ifp,
xfs_extnum_t idx,
xfs_extnum_t num);
/*
* Call xfs_bmap_do_search_extents() to search for the extent
* record containing block bno. If in multi-level in-core extent
* allocation mode, find and extract the target extent buffer,
* otherwise just use the direct extent list.
*/
xfs_bmbt_rec_t *
xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
/*
* Search an extent list for the extent which includes block
* bno.
*/
xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *,
xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *,
xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
#endif /* __KERNEL__ */
#endif /* __XFS_BMAP_H__ */
......@@ -2754,7 +2754,7 @@ xfs_bmbt_update(
}
/*
* Check an extent list, which has just been read, for
* Check extent records, which have just been read, for
* any bit in the extent flag field. ASSERT on debug
* kernels, as this condition should not occur.
* Return an error condition (1) if any flags found,
......@@ -2763,10 +2763,14 @@ xfs_bmbt_update(
int
xfs_check_nostate_extents(
xfs_bmbt_rec_t *ep,
xfs_ifork_t *ifp,
xfs_extnum_t idx,
xfs_extnum_t num)
{
for (; num > 0; num--, ep++) {
xfs_bmbt_rec_t *ep;
for (; num > 0; num--, idx++) {
ep = xfs_iext_get_ext(ifp, idx);
if ((ep->l0 >>
(64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
ASSERT(0);
......
......@@ -372,14 +372,6 @@ extern int xfs_bmbt_get_rec(struct xfs_btree_cur *, xfs_fileoff_t *,
xfs_exntst_t *, int *);
#endif
/*
* Search an extent list for the extent which includes block
* bno.
*/
xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *,
xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *,
xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
#endif /* __KERNEL__ */
#endif /* __XFS_BMAP_BTREE_H__ */
......@@ -68,8 +68,6 @@ struct xfs_mount_args {
* enforcement */
#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit
* enforcement */
#define XFSMNT_NOATIME 0x00000100 /* don't modify access
* times on reads */
#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at
* stripe boundaries*/
#define XFSMNT_RETERR 0x00000400 /* return error to user */
......
This diff is collapsed.
......@@ -45,10 +45,10 @@ struct zone;
(XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC)
typedef struct xfs_da_blkinfo {
xfs_dablk_t forw; /* previous block in list */
xfs_dablk_t back; /* following block in list */
__uint16_t magic; /* validity check on block */
__uint16_t pad; /* unused */
__be32 forw; /* previous block in list */
__be32 back; /* following block in list */
__be16 magic; /* validity check on block */
__be16 pad; /* unused */
} xfs_da_blkinfo_t;
/*
......@@ -65,12 +65,12 @@ typedef struct xfs_da_blkinfo {
typedef struct xfs_da_intnode {
struct xfs_da_node_hdr { /* constant-structure header block */
xfs_da_blkinfo_t info; /* block type, links, etc. */
__uint16_t count; /* count of active entries */
__uint16_t level; /* level above leaves (leaf == 0) */
__be16 count; /* count of active entries */
__be16 level; /* level above leaves (leaf == 0) */
} hdr;
struct xfs_da_node_entry {
xfs_dahash_t hashval; /* hash value for this descendant */
xfs_dablk_t before; /* Btree block before this key */
__be32 hashval; /* hash value for this descendant */
__be32 before; /* Btree block before this key */
} btree[1]; /* variable sized array of keys */
} xfs_da_intnode_t;
typedef struct xfs_da_node_hdr xfs_da_node_hdr_t;
......
......@@ -83,7 +83,7 @@ xfs_swapext(
/* Pull information for the target fd */
if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) ||
((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) {
((vp = vn_from_inode(fp->f_dentry->d_inode)) == NULL)) {
error = XFS_ERROR(EINVAL);
goto error0;
}
......@@ -95,7 +95,7 @@ xfs_swapext(
}
if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) ||
((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) {
((tvp = vn_from_inode(tfp->f_dentry->d_inode)) == NULL)) {
error = XFS_ERROR(EINVAL);
goto error0;
}
......
......@@ -634,7 +634,7 @@ xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen)
return(retval);
ASSERT(bp != NULL);
leaf = bp->data;
ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC);
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
retval = xfs_dir_leaf_lookup_int(bp, args, &index);
if (retval == EEXIST) {
(void)xfs_dir_leaf_remove(args->trans, bp, index);
......@@ -912,7 +912,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
return(error);
if (bp)
leaf = bp->data;
if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) {
if (bp && be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) {
xfs_dir_trace_g_dub("node: block not a leaf",
dp, uio, bno);
xfs_da_brelse(trans, bp);
......@@ -949,17 +949,17 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
if (bp == NULL)
return(XFS_ERROR(EFSCORRUPTED));
node = bp->data;
if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)
if (be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC)
break;
btree = &node->btree[0];
xfs_dir_trace_g_dun("node: node detail", dp, uio, node);
for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) {
if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) {
bno = INT_GET(btree->before, ARCH_CONVERT);
for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) {
if (be32_to_cpu(btree->hashval) >= cookhash) {
bno = be32_to_cpu(btree->before);
break;
}
}
if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) {
if (i == be16_to_cpu(node->hdr.count)) {
xfs_da_brelse(trans, bp);
xfs_dir_trace_g_du("node: hash beyond EOF",
dp, uio);
......@@ -982,7 +982,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
*/
for (;;) {
leaf = bp->data;
if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) {
if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC)) {
xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf);
xfs_da_brelse(trans, bp);
XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)",
......@@ -990,7 +990,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
return XFS_ERROR(EFSCORRUPTED);
}
xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf);
if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) {
if ((nextbno = be32_to_cpu(leaf->hdr.info.forw))) {
nextda = xfs_da_reada_buf(trans, dp, nextbno,
XFS_DATA_FORK);
} else
......@@ -1118,21 +1118,20 @@ void
xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio,
xfs_da_intnode_t *node)
{
int last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1;
int last = be16_to_cpu(node->hdr.count) - 1;
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where,
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)(unsigned long)be32_to_cpu(node->hdr.info.forw),
(void *)(unsigned long)
INT_GET(node->hdr.info.forw, ARCH_CONVERT),
be16_to_cpu(node->hdr.count),
(void *)(unsigned long)
INT_GET(node->hdr.count, ARCH_CONVERT),
be32_to_cpu(node->btree[0].hashval),
(void *)(unsigned long)
INT_GET(node->btree[0].hashval, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(node->btree[last].hashval, ARCH_CONVERT),
be32_to_cpu(node->btree[last].hashval),
NULL, NULL, NULL);
}
......@@ -1150,8 +1149,7 @@ xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)(unsigned long)
INT_GET(leaf->hdr.info.forw, ARCH_CONVERT),
(void *)(unsigned long)be32_to_cpu(leaf->hdr.info.forw),
(void *)(unsigned long)
INT_GET(leaf->hdr.count, ARCH_CONVERT),
(void *)(unsigned long)
......
......@@ -64,7 +64,7 @@ typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa);
typedef struct xfs_dir2_put_args {
xfs_off_t cook; /* cookie of (next) entry */
xfs_intino_t ino; /* inode number */
struct xfs_dirent *dbp; /* buffer pointer */
xfs_dirent_t *dbp; /* buffer pointer */
char *name; /* directory entry name */
int namelen; /* length of name */
int done; /* output: set if value was stored */
......@@ -75,18 +75,13 @@ typedef struct xfs_dir2_put_args {
/*
* Other interfaces used by the rest of the dir v2 code.
*/
extern int
xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
extern int
xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp);
extern int
xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp);
extern int
xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp,
int *vp);
extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
int *vp);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_dabuf *bp);
#endif /* __XFS_DIR2_H__ */
This diff is collapsed.
......@@ -43,8 +43,8 @@ struct xfs_trans;
#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */
typedef struct xfs_dir2_block_tail {
__uint32_t count; /* count of leaf entries */
__uint32_t stale; /* count of stale lf entries */
__be32 count; /* count of leaf entries */
__be32 stale; /* count of stale lf entries */
} xfs_dir2_block_tail_t;
/*
......@@ -75,8 +75,7 @@ xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block)
static inline struct xfs_dir2_leaf_entry *
xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp)
{
return (((struct xfs_dir2_leaf_entry *)
(btp)) - INT_GET((btp)->count, ARCH_CONVERT));
return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
}
/*
......
This diff is collapsed.
......@@ -65,8 +65,8 @@ struct xfs_trans;
* The freespace will be formatted as a xfs_dir2_data_unused_t.
*/
typedef struct xfs_dir2_data_free {
xfs_dir2_data_off_t offset; /* start of freespace */
xfs_dir2_data_off_t length; /* length of freespace */
__be16 offset; /* start of freespace */
__be16 length; /* length of freespace */
} xfs_dir2_data_free_t;
/*
......@@ -75,7 +75,7 @@ typedef struct xfs_dir2_data_free {
* The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
*/
typedef struct xfs_dir2_data_hdr {
__uint32_t magic; /* XFS_DIR2_DATA_MAGIC */
__be32 magic; /* XFS_DIR2_DATA_MAGIC */
/* or XFS_DIR2_BLOCK_MAGIC */
xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT];
} xfs_dir2_data_hdr_t;
......@@ -97,10 +97,10 @@ typedef struct xfs_dir2_data_entry {
* Tag appears as the last 2 bytes.
*/
typedef struct xfs_dir2_data_unused {
__uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */
xfs_dir2_data_off_t length; /* total free length */
__be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */
__be16 length; /* total free length */
/* variable offset */
xfs_dir2_data_off_t tag; /* starting offset of us */
__be16 tag; /* starting offset of us */
} xfs_dir2_data_unused_t;
typedef union {
......@@ -134,12 +134,11 @@ static inline int xfs_dir2_data_entsize(int n)
* Pointer to an entry's tag word.
*/
#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep)
static inline xfs_dir2_data_off_t *
static inline __be16 *
xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
{
return (xfs_dir2_data_off_t *) \
((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \
(uint)sizeof(xfs_dir2_data_off_t));
return (__be16 *)((char *)dep +
XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16));
}
/*
......@@ -147,12 +146,11 @@ xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
*/
#define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \
xfs_dir2_data_unused_tag_p(dup)
static inline xfs_dir2_data_off_t *
static inline __be16 *
xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup)
{
return (xfs_dir2_data_off_t *) \
((char *)(dup) + INT_GET((dup)->length, ARCH_CONVERT) \
- (uint)sizeof(xfs_dir2_data_off_t));
return (__be16 *)((char *)dup +
be16_to_cpu(dup->length) - sizeof(__be16));
}
/*
......
This diff is collapsed.
......@@ -46,23 +46,23 @@ typedef __uint32_t xfs_dir2_dataptr_t;
*/
typedef struct xfs_dir2_leaf_hdr {
xfs_da_blkinfo_t info; /* header for da routines */
__uint16_t count; /* count of entries */
__uint16_t stale; /* count of stale entries */
__be16 count; /* count of entries */
__be16 stale; /* count of stale entries */
} xfs_dir2_leaf_hdr_t;
/*
* Leaf block entry.
*/
typedef struct xfs_dir2_leaf_entry {
xfs_dahash_t hashval; /* hash value of name */
xfs_dir2_dataptr_t address; /* address of data entry */
__be32 hashval; /* hash value of name */
__be32 address; /* address of data entry */
} xfs_dir2_leaf_entry_t;
/*
* Leaf block tail.
*/
typedef struct xfs_dir2_leaf_tail {
__uint32_t bestcount;
__be32 bestcount;
} xfs_dir2_leaf_tail_t;
/*
......@@ -105,11 +105,10 @@ xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp)
* Get address of the bests array in the single-leaf block.
*/
#define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp)
static inline xfs_dir2_data_off_t *
static inline __be16 *
xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
{
return (xfs_dir2_data_off_t *)
(ltp) - INT_GET((ltp)->bestcount, ARCH_CONVERT);
return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
}
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment