Commit a0c30610 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (58 commits)
  Btrfs: use the device_list_mutex during write_dev_supers
  Btrfs: setup free ino caching in a more asynchronous way
  btrfs scrub: don't coalesce pages that are logically discontiguous
  Btrfs: return -ENOMEM in clear_extent_bit
  Btrfs: add mount -o auto_defrag
  Btrfs: using rcu lock in the reader side of devices list
  Btrfs: drop unnecessary device lock
  Btrfs: fix the race between remove dev and alloc chunk
  Btrfs: fix the race between reading and updating devices
  Btrfs: fix bh leak on __btrfs_open_devices path
  Btrfs: fix unsafe usage of merge_state
  Btrfs: allocate extent state and check the result properly
  fs/btrfs: Add missing btrfs_free_path
  Btrfs: check return value of btrfs_inc_extent_ref()
  Btrfs: return error to caller if read_one_inode() fails
  Btrfs: BUG_ON is deleted from the caller of btrfs_truncate_item & btrfs_extend_item
  Btrfs: return error code to caller when btrfs_del_item fails
  Btrfs: return error code to caller when btrfs_previous_item fails
  btrfs: fix typo 'testeing' -> 'testing'
  btrfs: typo: 'btrfS' -> 'btrfs'
  ...
parents 10799db6 174ba509
......@@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
......@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode)
return 0;
acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
if (IS_ERR_OR_NULL(acl))
return PTR_ERR(acl);
clone = posix_acl_clone(acl, GFP_KERNEL);
......
......@@ -22,6 +22,7 @@
#include "extent_map.h"
#include "extent_io.h"
#include "ordered-data.h"
#include "delayed-inode.h"
/* in memory btrfs inode */
struct btrfs_inode {
......@@ -152,20 +153,34 @@ struct btrfs_inode {
unsigned ordered_data_close:1;
unsigned orphan_meta_reserved:1;
unsigned dummy_inode:1;
unsigned in_defrag:1;
/*
* always compress this one file
*/
unsigned force_compress:4;
struct btrfs_delayed_node *delayed_node;
struct inode vfs_inode;
};
extern unsigned char btrfs_filetype_table[];
static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
{
return container_of(inode, struct btrfs_inode, vfs_inode);
}
static inline u64 btrfs_ino(struct inode *inode)
{
u64 ino = BTRFS_I(inode)->location.objectid;
if (ino <= BTRFS_FIRST_FREE_OBJECTID)
ino = inode->i_ino;
return ino;
}
static inline void btrfs_i_size_write(struct inode *inode, u64 size)
{
i_size_write(inode, size);
......
......@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode,
kunmap_atomic(kaddr, KM_USER0);
if (csum != *cb_sum) {
printk(KERN_INFO "btrfs csum failed ino %lu "
printk(KERN_INFO "btrfs csum failed ino %llu "
"extent %llu csum %u "
"wanted %u mirror %d\n", inode->i_ino,
"wanted %u mirror %d\n",
(unsigned long long)btrfs_ino(inode),
(unsigned long long)disk_start,
csum, *cb_sum, cb->mirror_num);
ret = -EIO;
......@@ -332,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int page_index = 0;
int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
struct block_device *bdev;
......@@ -366,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
for (page_index = 0; page_index < cb->nr_pages; page_index++) {
page = compressed_pages[page_index];
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0,
......@@ -432,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
struct compressed_bio *cb)
{
unsigned long end_index;
unsigned long page_index;
unsigned long pg_index;
u64 last_offset;
u64 isize = i_size_read(inode);
int ret;
......@@ -456,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
while (last_offset < compressed_end) {
page_index = last_offset >> PAGE_CACHE_SHIFT;
pg_index = last_offset >> PAGE_CACHE_SHIFT;
if (page_index > end_index)
if (pg_index > end_index)
break;
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_index);
page = radix_tree_lookup(&mapping->page_tree, pg_index);
rcu_read_unlock();
if (page) {
misses++;
......@@ -476,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (!page)
break;
if (add_to_page_cache_lru(page, mapping, page_index,
if (add_to_page_cache_lru(page, mapping, pg_index,
GFP_NOFS)) {
page_cache_release(page);
goto next;
......@@ -560,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long page_index;
unsigned long pg_index;
struct page *page;
struct block_device *bdev;
struct bio *comp_bio;
......@@ -613,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
for (page_index = 0; page_index < nr_pages; page_index++) {
cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
if (!cb->compressed_pages[page_index])
if (!cb->compressed_pages[pg_index])
goto fail2;
}
cb->nr_pages = nr_pages;
......@@ -634,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios);
for (page_index = 0; page_index < nr_pages; page_index++) {
page = cb->compressed_pages[page_index];
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_CACHE_SHIFT;
......@@ -702,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return 0;
fail2:
for (page_index = 0; page_index < nr_pages; page_index++)
free_page((unsigned long)cb->compressed_pages[page_index]);
for (pg_index = 0; pg_index < nr_pages; pg_index++)
free_page((unsigned long)cb->compressed_pages[pg_index]);
kfree(cb->compressed_pages);
fail1:
......@@ -945,7 +946,7 @@ void btrfs_exit_compress(void)
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
unsigned long *page_index,
unsigned long *pg_index,
unsigned long *pg_offset)
{
unsigned long buf_offset;
......@@ -954,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
struct page *page_out = bvec[*page_index].bv_page;
struct page *page_out = bvec[*pg_index].bv_page;
/*
* start byte is the first byte of the page we're currently
......@@ -995,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
/* check if we need to pick another page */
if (*pg_offset == PAGE_CACHE_SIZE) {
(*page_index)++;
if (*page_index >= vcnt)
(*pg_index)++;
if (*pg_index >= vcnt)
return 0;
page_out = bvec[*page_index].bv_page;
page_out = bvec[*pg_index].bv_page;
*pg_offset = 0;
start_byte = page_offset(page_out) - disk_start;
......
......@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
unsigned long *page_index,
unsigned long *pg_index,
unsigned long *pg_offset);
int btrfs_submit_compressed_write(struct inode *inode, u64 start,
......
......@@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *src_buf);
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
static int setup_items_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
u32 total_data, u32 total_size, int nr);
struct btrfs_path *btrfs_alloc_path(void)
{
......@@ -107,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p)
{
if (!p)
return;
btrfs_release_path(NULL, p);
btrfs_release_path(p);
kmem_cache_free(btrfs_path_cachep, p);
}
......@@ -117,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p)
*
* It is safe to call this on paths that no locks or extent buffers held.
*/
noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
noinline void btrfs_release_path(struct btrfs_path *p)
{
int i;
......@@ -1328,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
ret = -EAGAIN;
/* release the whole path */
btrfs_release_path(root, path);
btrfs_release_path(path);
/* read the blocks */
if (block1)
......@@ -1475,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
return 0;
}
free_extent_buffer(tmp);
btrfs_release_path(NULL, p);
btrfs_release_path(p);
return -EIO;
}
}
......@@ -1494,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
if (p->reada)
reada_for_search(root, p, level, slot, key->objectid);
btrfs_release_path(NULL, p);
btrfs_release_path(p);
ret = -EAGAIN;
tmp = read_tree_block(root, blocknr, blocksize, 0);
......@@ -1563,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
b = p->nodes[level];
if (!b) {
btrfs_release_path(NULL, p);
btrfs_release_path(p);
goto again;
}
BUG_ON(btrfs_header_nritems(b) == 1);
......@@ -1753,7 +1748,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (!p->leave_spinning)
btrfs_set_path_blocking(p);
if (ret < 0)
btrfs_release_path(root, p);
btrfs_release_path(p);
return ret;
}
......@@ -3026,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
extent_len = btrfs_file_extent_num_bytes(leaf, fi);
}
btrfs_release_path(root, path);
btrfs_release_path(path);
path->keep_locks = 1;
path->search_for_split = 1;
......@@ -3216,7 +3211,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u32 new_size, int from_end)
{
int ret = 0;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
......@@ -3314,12 +3308,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
btrfs_set_item_size(leaf, item, new_size);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
BUG();
}
return ret;
return 0;
}
/*
......@@ -3329,7 +3322,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
u32 data_size)
{
int ret = 0;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
......@@ -3394,12 +3386,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
btrfs_set_item_size(leaf, item, old_size + data_size);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
BUG();
}
return ret;
return 0;
}
/*
......@@ -3559,8 +3550,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
* to save stack depth by doing the bulk of the work in a function
* that doesn't call btrfs_search_slot
*/
static noinline_for_stack int
setup_items_for_insert(struct btrfs_trans_handle *trans,
int setup_items_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
u32 total_data, u32 total_size, int nr)
......@@ -3647,7 +3637,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
ret = 0;
if (slot == 0) {
struct btrfs_disk_key disk_key;
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
ret = fixup_low_keys(trans, root, path, &disk_key, 1);
}
......@@ -3949,7 +3938,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
else
return 1;
btrfs_release_path(root, path);
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
......@@ -4073,7 +4062,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
sret = btrfs_find_next_key(root, path, min_key, level,
cache_only, min_trans);
if (sret == 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto again;
} else {
goto out;
......@@ -4152,7 +4141,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
btrfs_node_key_to_cpu(c, &cur_key, slot);
orig_lowest = path->lowest_level;
btrfs_release_path(root, path);
btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &cur_key, path,
0, 0);
......@@ -4229,7 +4218,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
again:
level = 1;
next = NULL;
btrfs_release_path(root, path);
btrfs_release_path(path);
path->keep_locks = 1;
......@@ -4285,7 +4274,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
goto again;
if (ret < 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto done;
}
......@@ -4324,7 +4313,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
goto again;
if (ret < 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto done;
}
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2011 Fujitsu. All rights reserved.
* Written by Miao Xie <miaox@cn.fujitsu.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __DELAYED_TREE_OPERATION_H
#define __DELAYED_TREE_OPERATION_H
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <asm/atomic.h>
#include "ctree.h"
/* types of the delayed item */
#define BTRFS_DELAYED_INSERTION_ITEM 1
#define BTRFS_DELAYED_DELETION_ITEM 2
struct btrfs_delayed_root {
spinlock_t lock;
struct list_head node_list;
/*
* Used for delayed nodes which is waiting to be dealt with by the
* worker. If the delayed node is inserted into the work queue, we
* drop it from this list.
*/
struct list_head prepare_list;
atomic_t items; /* for delayed items */
int nodes; /* for delayed nodes */
wait_queue_head_t wait;
};
struct btrfs_delayed_node {
u64 inode_id;
u64 bytes_reserved;
struct btrfs_root *root;
/* Used to add the node into the delayed root's node list. */
struct list_head n_list;
/*
* Used to add the node into the prepare list, the nodes in this list
* is waiting to be dealt with by the async worker.
*/
struct list_head p_list;
struct rb_root ins_root;
struct rb_root del_root;
struct mutex mutex;
struct btrfs_inode_item inode_item;
atomic_t refs;
u64 index_cnt;
bool in_list;
bool inode_dirty;
int count;
};
struct btrfs_delayed_item {
struct rb_node rb_node;
struct btrfs_key key;
struct list_head tree_list; /* used for batch insert/delete items */
struct list_head readdir_list; /* used for readdir items */
u64 bytes_reserved;
struct btrfs_block_rsv *block_rsv;
struct btrfs_delayed_node *delayed_node;
atomic_t refs;
int ins_or_del;
u32 data_len;
char data[0];
};
static inline void btrfs_init_delayed_root(
struct btrfs_delayed_root *delayed_root)
{
atomic_set(&delayed_root->items, 0);
delayed_root->nodes = 0;
spin_lock_init(&delayed_root->lock);
init_waitqueue_head(&delayed_root->wait);
INIT_LIST_HEAD(&delayed_root->node_list);
INIT_LIST_HEAD(&delayed_root->prepare_list);
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *dir,
u64 index);
int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_balance_delayed_items(struct btrfs_root *root);
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct inode *inode);
/* Used for evicting the inode. */
void btrfs_remove_delayed_node(struct inode *inode);
void btrfs_kill_delayed_inode_items(struct inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
/* Used for drop dead root */
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
/* Used for readdir() */
void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
struct list_head *del_list);
void btrfs_put_delayed_items(struct list_head *ins_list,
struct list_head *del_list);
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
filldir_t filldir,
struct list_head *ins_list);
/* for init */
int __init btrfs_delayed_inode_init(void);
void btrfs_delayed_inode_exit(void);
#endif
......@@ -280,44 +280,6 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
return 1;
}
/*
* This checks to see if there are any delayed refs in the
* btree for a given bytenr. It returns one if it finds any
* and zero otherwise.
*
* If it only finds a head node, it returns 0.
*
* The idea is to use this when deciding if you can safely delete an
* extent from the extent allocation tree. There may be a pending
* ref in the rbtree that adds or removes references, so as long as this
* returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
* allocation tree.
*/
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
{
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct rb_node *prev_node;
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
if (ref) {
prev_node = rb_prev(&ref->rb_node);
if (!prev_node)
goto out;
ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
rb_node);
if (ref->bytenr == bytenr)
ret = 1;
}
out:
spin_unlock(&delayed_refs->lock);
return ret;
}
/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
......@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
return btrfs_delayed_node_to_head(ref);
return NULL;
}
/*
* add a delayed ref to the tree. This does all of the accounting required
* to make sure the delayed ref is eventually processed before this
* transaction commits.
*
* The main point of this call is to add and remove a backreference in a single
* shot, taking the lock only once, and only searching for the head node once.
*
* It is the same as doing a ref add and delete in two separate calls.
*/
#if 0
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 orig_parent,
u64 parent, u64 orig_ref_root, u64 ref_root,
u64 orig_ref_generation, u64 ref_generation,
u64 owner_objectid, int pin)
{
struct btrfs_delayed_ref *ref;
struct btrfs_delayed_ref *old_ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
ref = kmalloc(sizeof(*ref), GFP_NOFS);
if (!ref)
return -ENOMEM;
old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
if (!old_ref) {
kfree(ref);
return -ENOMEM;
}
/*
* the parent = 0 case comes from cases where we don't actually
* know the parent yet. It will get updated later via a add/drop
* pair.
*/
if (parent == 0)
parent = bytenr;
if (orig_parent == 0)
orig_parent = bytenr;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
if (!head_ref) {
kfree(ref);
kfree(old_ref);
return -ENOMEM;
}
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
/*
* insert both the head node and the new ref without dropping
* the spin lock
*/
ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
(u64)-1, 0, 0, 0,
BTRFS_UPDATE_DELAYED_HEAD, 0);
BUG_ON(ret);
ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
parent, ref_root, ref_generation,
owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
BUG_ON(ret);
ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
orig_parent, orig_ref_root,
orig_ref_generation, owner_objectid,
BTRFS_DROP_DELAYED_REF, pin);
BUG_ON(ret);
spin_unlock(&delayed_refs->lock);
return 0;
}
#endif
......@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 orig_parent,
u64 parent, u64 orig_ref_root, u64 ref_root,
u64 orig_ref_generation, u64 ref_generation,
u64 owner_objectid, int pin);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
......
......@@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
if (di)
return ERR_PTR(-EEXIST);
ret = btrfs_extend_item(trans, root, path, data_size);
WARN_ON(ret > 0);
}
if (ret < 0)
return ERR_PTR(ret);
......@@ -124,8 +123,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* to use for the second index (if one is created).
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, const char *name, int name_len, u64 dir,
struct btrfs_key *location, u8 type, u64 index)
*root, const char *name, int name_len,
struct inode *dir, struct btrfs_key *location,
u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
......@@ -137,13 +137,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
struct btrfs_disk_key disk_key;
u32 data_size;
key.objectid = dir;
key.objectid = btrfs_ino(dir);
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
key.offset = btrfs_name_hash(name, name_len);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
btrfs_cpu_key_to_disk(&disk_key, location);
data_size = sizeof(*dir_item) + name_len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
name, name_len);
......@@ -155,7 +159,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
}
leaf = path->nodes[0];
btrfs_cpu_key_to_disk(&disk_key, location);
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
btrfs_set_dir_type(leaf, dir_item, type);
btrfs_set_dir_data_len(leaf, dir_item, 0);
......@@ -172,29 +175,11 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
ret = 0;
goto out_free;
}
btrfs_release_path(root, path);
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
key.offset = index;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
name, name_len);
if (IS_ERR(dir_item)) {
ret2 = PTR_ERR(dir_item);
goto out_free;
}
leaf = path->nodes[0];
btrfs_cpu_key_to_disk(&disk_key, location);
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
btrfs_set_dir_type(leaf, dir_item, type);
btrfs_set_dir_data_len(leaf, dir_item, 0);
btrfs_set_dir_name_len(leaf, dir_item, name_len);
btrfs_set_dir_transid(leaf, dir_item, trans->transid);
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
&disk_key, type, index);
out_free:
btrfs_free_path(path);
if (ret)
return ret;
......@@ -452,7 +437,7 @@ int verify_dir_item(struct btrfs_root *root,
namelen = XATTR_NAME_MAX;
if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n",
printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n",
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
}
......
This diff is collapsed.
......@@ -55,35 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root);
int btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_objectid);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_key *location,
const char *name, int namelen);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_key *location);
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location);
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
int btrfs_insert_dev_radix(struct btrfs_root *root,
struct block_device *bdev,
u64 device_id,
u64 block_start,
u64 num_blocks);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
int wait_on_tree_block_writeback(struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
int btrfs_open_device(struct btrfs_device *dev);
int btrfs_verify_block_csum(struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
......@@ -91,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
unsigned long bio_flags, u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done);
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
......
......@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
len = BTRFS_FID_SIZE_NON_CONNECTABLE;
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = inode->i_ino;
fid->objectid = btrfs_ino(inode);
fid->root_objectid = BTRFS_I(inode)->root->objectid;
fid->gen = inode->i_generation;
......@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
if (!path)
return ERR_PTR(-ENOMEM);
if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
} else {
key.objectid = dir->i_ino;
key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
}
......@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
struct btrfs_key key;
int name_len;
int ret;
u64 ino;
if (!dir || !inode)
return -EINVAL;
......@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name,
if (!S_ISDIR(dir->i_mode))
return -EINVAL;
ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
} else {
key.objectid = inode->i_ino;
key.offset = dir->i_ino;
key.objectid = ino;
key.offset = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
}
......@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
btrfs_free_path(path);
return ret;
} else if (ret > 0) {
if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
path->slots[0]--;
} else {
btrfs_free_path(path);
......@@ -281,7 +284,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
}
leaf = path->nodes[0];
if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
rref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
name_ptr = (unsigned long)(rref + 1);
......
This diff is collapsed.
This diff is collapsed.
......@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags)
struct extent_map_tree;
static inline struct extent_state *extent_state_next(struct extent_state *state)
{
struct rb_node *node;
node = rb_next(&state->rb_node);
if (!node)
return NULL;
return rb_entry(node, struct extent_state, rb_node);
}
typedef struct extent_map *(get_extent_t)(struct inode *inode,
struct page *page,
size_t page_offset,
size_t pg_offset,
u64 start, u64 len,
int create);
void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping, gfp_t mask);
struct address_space *mapping);
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
......@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
......@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages,
get_extent_t get_extent);
int extent_prepare_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to, get_extent_t *get_extent);
int extent_commit_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to);
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent);
int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
struct page *page0,
gfp_t mask);
struct page *page0);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
gfp_t mask);
u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
......@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
struct extent_buffer *eb);
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
int clear_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int test_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
......@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long *map_start,
unsigned long *map_len, int km);
void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
int release_extent_buffer_tail_pages(struct extent_buffer *eb);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode,
......
......@@ -28,12 +28,11 @@ void extent_map_exit(void)
/**
* extent_map_tree_init - initialize extent map tree
* @tree: tree to initialize
* @mask: flags for memory allocations during tree operations
*
* Initialize the extent tree @tree. Should be called for each new inode
* or other user of the extent_map interface.
*/
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
void extent_map_tree_init(struct extent_map_tree *tree)
{
tree->map = RB_ROOT;
rwlock_init(&tree->lock);
......@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
/**
* alloc_extent_map - allocate new extent map structure
* @mask: memory allocation flags
*
* Allocate a new extent_map structure. The new structure is
* returned with a reference count of one and needs to be
* freed using free_extent_map()
*/
struct extent_map *alloc_extent_map(gfp_t mask)
struct extent_map *alloc_extent_map(void)
{
struct extent_map *em;
em = kmem_cache_alloc(extent_map_cache, mask);
em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
em->in_tree = 0;
......
......@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em)
return em->block_start + em->block_len;
}
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
void extent_map_tree_init(struct extent_map_tree *tree);
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em);
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
struct extent_map *alloc_extent_map(gfp_t mask);
struct extent_map *alloc_extent_map(void);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
......
......@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u32 item_size;
if (item)
btrfs_release_path(root, path);
btrfs_release_path(path);
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
......@@ -208,12 +208,13 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
EXTENT_NODATASUM, GFP_NOFS);
} else {
printk(KERN_INFO "btrfs no csum found "
"for inode %lu start %llu\n",
inode->i_ino,
"for inode %llu start %llu\n",
(unsigned long long)
btrfs_ino(inode),
(unsigned long long)offset);
}
item = NULL;
btrfs_release_path(root, path);
btrfs_release_path(path);
goto found;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
......@@ -266,7 +267,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list)
struct list_head *list, int search_commit)
{
struct btrfs_key key;
struct btrfs_path *path;
......@@ -283,6 +284,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
path = btrfs_alloc_path();
BUG_ON(!path);
if (search_commit) {
path->skip_locking = 1;
path->reada = 2;
path->search_commit_root = 1;
}
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key.offset = start;
key.type = BTRFS_EXTENT_CSUM_KEY;
......@@ -495,7 +502,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
ret = btrfs_truncate_item(trans, root, path, new_size, 1);
BUG_ON(ret);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
......@@ -508,7 +514,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
new_size *= csum_size;
ret = btrfs_truncate_item(trans, root, path, new_size, 0);
BUG_ON(ret);
key->offset = end_byte;
ret = btrfs_set_item_key_safe(trans, root, path, key);
......@@ -551,10 +556,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
if (path->slots[0] == 0)
goto out;
break;
path->slots[0]--;
} else if (ret < 0) {
goto out;
break;
}
leaf = path->nodes[0];
......@@ -579,7 +584,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
/* delete the entire item, it is inside our range */
if (key.offset >= bytenr && csum_end <= end_byte) {
ret = btrfs_del_item(trans, root, path);
BUG_ON(ret);
if (ret)
goto out;
if (key.offset == bytenr)
break;
} else if (key.offset < bytenr && csum_end > end_byte) {
......@@ -631,11 +637,12 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
if (key.offset < bytenr)
break;
}
btrfs_release_path(root, path);
btrfs_release_path(path);
}
ret = 0;
out:
btrfs_free_path(path);
return 0;
return ret;
}
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
......@@ -722,7 +729,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
* at this point, we know the tree has an item, but it isn't big
* enough yet to put our csum in. Grow it
*/
btrfs_release_path(root, path);
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
if (ret < 0)
......@@ -761,12 +768,11 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
goto insert;
ret = btrfs_extend_item(trans, root, path, diff);
BUG_ON(ret);
goto csum;
}
insert:
btrfs_release_path(root, path);
btrfs_release_path(path);
csum_offset = 0;
if (found_next) {
u64 tmp = total_bytes + root->sectorsize;
......@@ -850,7 +856,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
}
btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(root, path);
btrfs_release_path(path);
cond_resched();
goto again;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -27,6 +27,25 @@ struct btrfs_free_space {
struct list_head list;
};
struct btrfs_free_space_ctl {
spinlock_t tree_lock;
struct rb_root free_space_offset;
u64 free_space;
int extents_thresh;
int free_extents;
int total_bitmaps;
int unit;
u64 start;
struct btrfs_free_space_op *op;
void *private;
};
struct btrfs_free_space_op {
void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
};
struct inode *lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache
*block_group, struct btrfs_path *path);
......@@ -45,17 +64,38 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
struct inode *lookup_free_ino_inode(struct btrfs_root *root,
struct btrfs_path *path);
int create_free_ino_inode(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_path *path);
int load_free_ino_cache(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_path *path);
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size);
static inline int
btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size)
{
return __btrfs_add_free_space(block_group->free_space_ctl,
bytenr, size);
}
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size);
void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group);
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes, u64 empty_size);
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
......
......@@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_size - (ptr + sub_item_len - item_start));
ret = btrfs_truncate_item(trans, root, path,
item_size - sub_item_len, 1);
BUG_ON(ret);
out:
btrfs_free_path(path);
return ret;
......@@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ret = btrfs_extend_item(trans, root, path, ins_len);
BUG_ON(ret);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
......
This diff is collapsed.
#ifndef __BTRFS_INODE_MAP
#define __BTRFS_INODE_MAP
void btrfs_init_free_ino_ctl(struct btrfs_root *root);
void btrfs_unpin_free_ino(struct btrfs_root *root);
void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans);
int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -185,31 +185,6 @@ int btrfs_tree_lock(struct extent_buffer *eb)
return 0;
}
/*
* Very quick trylock, this does not spin or schedule. It returns
* 1 with the spinlock held if it was able to take the lock, or it
* returns zero if it was unable to take the lock.
*
* After this call, scheduling is not safe without first calling
* btrfs_set_lock_blocking()
*/
int btrfs_try_tree_lock(struct extent_buffer *eb)
{
if (spin_trylock(&eb->lock)) {
if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
/*
* we've got the spinlock, but the real owner is
* blocking. Drop the spinlock and return failure
*/
spin_unlock(&eb->lock);
return 0;
}
return 1;
}
/* someone else has the spinlock giveup */
return 0;
}
int btrfs_tree_unlock(struct extent_buffer *eb)
{
/*
......
......@@ -21,8 +21,6 @@
int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
void btrfs_set_lock_blocking(struct extent_buffer *eb);
......
This diff is collapsed.
......@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents)
return sizeof(struct btrfs_leaf_ref) +
sizeof(struct btrfs_extent_info) * nr_extents;
}
static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
{
tree->root = RB_ROOT;
INIT_LIST_HEAD(&tree->list);
spin_lock_init(&tree->lock);
}
static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
{
return RB_EMPTY_ROOT(&tree->root);
}
void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
int nr_extents);
void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
u64 bytenr);
int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
int shared);
int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
int shared);
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_drop_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
......@@ -115,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
int wait_for_unblock);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_throttle(struct btrfs_root *root);
......
......@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
ret = 0;
goto out;
}
btrfs_release_path(root, path);
btrfs_release_path(path);
wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (wret < 0) {
......
This diff is collapsed.
......@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
struct inode *inode, u64 dirid);
int btrfs_join_running_log_trans(struct btrfs_root *root);
int btrfs_end_log_trans(struct btrfs_root *root);
int btrfs_pin_log_trans(struct btrfs_root *root);
int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment