Commit 44148a66 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block:
  ide: always ensure that blk_delay_queue() is called if we have pending IO
  block: fix request sorting at unplug
  dm: improve block integrity support
  fs: export empty_aops
  ide: ide_requeue_and_plug() reinstate "always plug" behaviour
  blk-throttle: don't call xchg on bool
  ufs: remove unessecary blk_flush_plug
  block: make the flush insertion use the tail of the dispatch list
  block: get rid of elv_insert() interface
  block: dump request state on seeing a corrupted request completion
parents d0de4dc5 782b86e2
...@@ -2163,7 +2163,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) ...@@ -2163,7 +2163,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* size, something has gone terribly wrong. * size, something has gone terribly wrong.
*/ */
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
printk(KERN_ERR "blk: request botched\n"); blk_dump_rq_flags(req, "request botched");
req->__data_len = blk_rq_cur_bytes(req); req->__data_len = blk_rq_cur_bytes(req);
} }
...@@ -2665,7 +2665,7 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) ...@@ -2665,7 +2665,7 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
struct request *rqa = container_of(a, struct request, queuelist); struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist);
return !(rqa->q == rqb->q); return !(rqa->q <= rqb->q);
} }
static void flush_plug_list(struct blk_plug *plug) static void flush_plug_list(struct blk_plug *plug)
......
...@@ -261,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q) ...@@ -261,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq.end_io = flush_end_io; q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1; q->flush_pending_idx ^= 1;
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE); list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
return true; return true;
} }
...@@ -281,7 +281,7 @@ static void flush_data_end_io(struct request *rq, int error) ...@@ -281,7 +281,7 @@ static void flush_data_end_io(struct request *rq, int error)
* blk_insert_flush - insert a new FLUSH/FUA request * blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert * @rq: request to insert
* *
* To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions. * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
* @rq is being submitted. Analyze what needs to be done and put it on the * @rq is being submitted. Analyze what needs to be done and put it on the
* right queue. * right queue.
* *
...@@ -312,7 +312,7 @@ void blk_insert_flush(struct request *rq) ...@@ -312,7 +312,7 @@ void blk_insert_flush(struct request *rq)
*/ */
if ((policy & REQ_FSEQ_DATA) && if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
list_add(&rq->queuelist, &q->queue_head); list_add_tail(&rq->queuelist, &q->queue_head);
return; return;
} }
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
static struct kmem_cache *integrity_cachep; static struct kmem_cache *integrity_cachep;
static const char *bi_unsupported_name = "unsupported";
/** /**
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
* @q: request queue * @q: request queue
...@@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = { ...@@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = {
.release = blk_integrity_release, .release = blk_integrity_release,
}; };
bool blk_integrity_is_initialized(struct gendisk *disk)
{
struct blk_integrity *bi = blk_get_integrity(disk);
return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
}
EXPORT_SYMBOL(blk_integrity_is_initialized);
/** /**
* blk_integrity_register - Register a gendisk as being integrity-capable * blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware * @disk: struct gendisk pointer to make integrity-aware
...@@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) ...@@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
bi->get_tag_fn = template->get_tag_fn; bi->get_tag_fn = template->get_tag_fn;
bi->tag_size = template->tag_size; bi->tag_size = template->tag_size;
} else } else
bi->name = "unsupported"; bi->name = bi_unsupported_name;
return 0; return 0;
} }
......
...@@ -77,7 +77,7 @@ struct throtl_grp { ...@@ -77,7 +77,7 @@ struct throtl_grp {
unsigned long slice_end[2]; unsigned long slice_end[2];
/* Some throttle limits got updated for the group */ /* Some throttle limits got updated for the group */
bool limits_changed; int limits_changed;
}; };
struct throtl_data struct throtl_data
...@@ -102,7 +102,7 @@ struct throtl_data ...@@ -102,7 +102,7 @@ struct throtl_data
/* Work for dispatching throttled bios */ /* Work for dispatching throttled bios */
struct delayed_work throtl_work; struct delayed_work throtl_work;
bool limits_changed; int limits_changed;
}; };
enum tg_state_flags { enum tg_state_flags {
......
...@@ -610,7 +610,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) ...@@ -610,7 +610,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
rq->cmd_flags &= ~REQ_STARTED; rq->cmd_flags &= ~REQ_STARTED;
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
} }
void elv_drain_elevator(struct request_queue *q) void elv_drain_elevator(struct request_queue *q)
...@@ -655,12 +655,25 @@ void elv_quiesce_end(struct request_queue *q) ...@@ -655,12 +655,25 @@ void elv_quiesce_end(struct request_queue *q)
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
} }
void elv_insert(struct request_queue *q, struct request *rq, int where) void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{ {
trace_block_rq_insert(q, rq); trace_block_rq_insert(q, rq);
rq->q = q; rq->q = q;
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;
switch (where) { switch (where) {
case ELEVATOR_INSERT_REQUEUE: case ELEVATOR_INSERT_REQUEUE:
case ELEVATOR_INSERT_FRONT: case ELEVATOR_INSERT_FRONT:
...@@ -722,24 +735,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -722,24 +735,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
BUG(); BUG();
} }
} }
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;
elv_insert(q, rq, where);
}
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
void elv_add_request(struct request_queue *q, struct request *rq, int where) void elv_add_request(struct request_queue *q, struct request *rq, int where)
......
...@@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host) ...@@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host)
} }
} }
static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
{
if (rq)
blk_requeue_request(q, rq);
if (rq || blk_peek_request(q)) {
/* Use 3ms as that was the old plug delay */
blk_delay_queue(q, 3);
}
}
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__ide_requeue_and_plug(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/* /*
* Issue a new request to a device. * Issue a new request to a device.
*/ */
...@@ -550,28 +570,7 @@ void do_ide_request(struct request_queue *q) ...@@ -550,28 +570,7 @@ void do_ide_request(struct request_queue *q)
ide_unlock_host(host); ide_unlock_host(host);
plug_device_2: plug_device_2:
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__ide_requeue_and_plug(q, rq);
if (rq) {
blk_requeue_request(q, rq);
blk_delay_queue(q, queue_run_ms);
}
}
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
if (rq)
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
/* Use 3ms as that was the old plug delay */
if (rq)
blk_delay_queue(q, 3);
} }
static int drive_is_ready(ide_drive_t *drive) static int drive_is_ready(ide_drive_t *drive)
......
...@@ -926,21 +926,81 @@ static int dm_table_build_index(struct dm_table *t) ...@@ -926,21 +926,81 @@ static int dm_table_build_index(struct dm_table *t)
return r; return r;
} }
/*
* Get a disk whose integrity profile reflects the table's profile.
* If %match_all is true, all devices' profiles must match.
* If %match_all is false, all devices must at least have an
* allocated integrity profile; but uninitialized is ok.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
bool match_all)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
struct gendisk *prev_disk = NULL, *template_disk = NULL;
list_for_each_entry(dd, devices, list) {
template_disk = dd->dm_dev.bdev->bd_disk;
if (!blk_get_integrity(template_disk))
goto no_integrity;
if (!match_all && !blk_integrity_is_initialized(template_disk))
continue; /* skip uninitialized profiles */
else if (prev_disk &&
blk_integrity_compare(prev_disk, template_disk) < 0)
goto no_integrity;
prev_disk = template_disk;
}
return template_disk;
no_integrity:
if (prev_disk)
DMWARN("%s: integrity not set: %s and %s profile mismatch",
dm_device_name(t->md),
prev_disk->disk_name,
template_disk->disk_name);
return NULL;
}
/* /*
* Register the mapped device for blk_integrity support if * Register the mapped device for blk_integrity support if
* the underlying devices support it. * the underlying devices have an integrity profile. But all devices
* may not have matching profiles (checking all devices isn't reliable
* during table load because this table may use other DM device(s) which
* must be resumed before they will have an initialized integity profile).
* Stacked DM devices force a 2 stage integrity profile validation:
* 1 - during load, validate all initialized integrity profiles match
* 2 - during resume, validate all integrity profiles match
*/ */
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
{ {
struct list_head *devices = dm_table_get_devices(t); struct gendisk *template_disk = NULL;
struct dm_dev_internal *dd;
list_for_each_entry(dd, devices, list) template_disk = dm_table_get_integrity_disk(t, false);
if (bdev_get_integrity(dd->dm_dev.bdev)) { if (!template_disk)
t->integrity_supported = 1; return 0;
return blk_integrity_register(dm_disk(md), NULL);
}
if (!blk_integrity_is_initialized(dm_disk(md))) {
t->integrity_supported = 1;
return blk_integrity_register(dm_disk(md), NULL);
}
/*
* If DM device already has an initalized integrity
* profile the new profile should not conflict.
*/
if (blk_integrity_is_initialized(template_disk) &&
blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMWARN("%s: conflict with existing integrity profile: "
"%s profile mismatch",
dm_device_name(t->md),
template_disk->disk_name);
return 1;
}
/* Preserve existing initialized integrity profile */
t->integrity_supported = 1;
return 0; return 0;
} }
...@@ -1094,41 +1154,27 @@ int dm_calculate_queue_limits(struct dm_table *table, ...@@ -1094,41 +1154,27 @@ int dm_calculate_queue_limits(struct dm_table *table,
/* /*
* Set the integrity profile for this device if all devices used have * Set the integrity profile for this device if all devices used have
* matching profiles. * matching profiles. We're quite deep in the resume path but still
* don't know if all devices (particularly DM devices this device
* may be stacked on) have matching profiles. Even if the profiles
* don't match we have no way to fail (to resume) at this point.
*/ */
static void dm_table_set_integrity(struct dm_table *t) static void dm_table_set_integrity(struct dm_table *t)
{ {
struct list_head *devices = dm_table_get_devices(t); struct gendisk *template_disk = NULL;
struct dm_dev_internal *prev = NULL, *dd = NULL;
if (!blk_get_integrity(dm_disk(t->md))) if (!blk_get_integrity(dm_disk(t->md)))
return; return;
list_for_each_entry(dd, devices, list) { template_disk = dm_table_get_integrity_disk(t, true);
if (prev && if (!template_disk &&
blk_integrity_compare(prev->dm_dev.bdev->bd_disk, blk_integrity_is_initialized(dm_disk(t->md))) {
dd->dm_dev.bdev->bd_disk) < 0) { DMWARN("%s: device no longer has a valid integrity profile",
DMWARN("%s: integrity not set: %s and %s mismatch", dm_device_name(t->md));
dm_device_name(t->md), return;
prev->dm_dev.bdev->bd_disk->disk_name,
dd->dm_dev.bdev->bd_disk->disk_name);
goto no_integrity;
}
prev = dd;
} }
if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
goto no_integrity;
blk_integrity_register(dm_disk(t->md), blk_integrity_register(dm_disk(t->md),
bdev_get_integrity(prev->dm_dev.bdev)); blk_get_integrity(template_disk));
return;
no_integrity:
blk_integrity_register(dm_disk(t->md), NULL);
return;
} }
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
......
...@@ -124,6 +124,14 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); ...@@ -124,6 +124,14 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
*/ */
static DECLARE_RWSEM(iprune_sem); static DECLARE_RWSEM(iprune_sem);
/*
* Empty aops. Can be used for the cases where the user does not
* define any of the address_space operations.
*/
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);
/* /*
* Statistics gathering.. * Statistics gathering..
*/ */
...@@ -176,7 +184,6 @@ int proc_nr_inodes(ctl_table *table, int write, ...@@ -176,7 +184,6 @@ int proc_nr_inodes(ctl_table *table, int write,
*/ */
int inode_init_always(struct super_block *sb, struct inode *inode) int inode_init_always(struct super_block *sb, struct inode *inode)
{ {
static const struct address_space_operations empty_aops;
static const struct inode_operations empty_iops; static const struct inode_operations empty_iops;
static const struct file_operations empty_fops; static const struct file_operations empty_fops;
struct address_space *const mapping = &inode->i_data; struct address_space *const mapping = &inode->i_data;
......
...@@ -495,8 +495,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, ...@@ -495,8 +495,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi) struct backing_dev_info *bdi)
{ {
static const struct address_space_operations empty_aops;
mapping->host = NULL; mapping->host = NULL;
mapping->flags = 0; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS); mapping_set_gfp_mask(mapping, GFP_NOFS);
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
*/ */
#include "ubifs.h" #include "ubifs.h"
#include <linux/fs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/xattr.h> #include <linux/xattr.h>
#include <linux/posix_acl_xattr.h> #include <linux/posix_acl_xattr.h>
...@@ -80,7 +81,6 @@ enum { ...@@ -80,7 +81,6 @@ enum {
}; };
static const struct inode_operations none_inode_operations; static const struct inode_operations none_inode_operations;
static const struct address_space_operations none_address_operations;
static const struct file_operations none_file_operations; static const struct file_operations none_file_operations;
/** /**
...@@ -130,7 +130,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, ...@@ -130,7 +130,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
} }
/* Re-define all operations to be "nothing" */ /* Re-define all operations to be "nothing" */
inode->i_mapping->a_ops = &none_address_operations; inode->i_mapping->a_ops = &empty_aops;
inode->i_op = &none_inode_operations; inode->i_op = &none_inode_operations;
inode->i_fop = &none_file_operations; inode->i_fop = &none_file_operations;
......
...@@ -479,7 +479,6 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) ...@@ -479,7 +479,6 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
break; break;
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode); ufs_sync_inode (inode);
blk_flush_plug(current);
yield(); yield();
} }
......
...@@ -1206,6 +1206,7 @@ struct blk_integrity { ...@@ -1206,6 +1206,7 @@ struct blk_integrity {
struct kobject kobj; struct kobject kobj;
}; };
extern bool blk_integrity_is_initialized(struct gendisk *);
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *); extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
...@@ -1262,6 +1263,7 @@ queue_max_integrity_segments(struct request_queue *q) ...@@ -1262,6 +1263,7 @@ queue_max_integrity_segments(struct request_queue *q)
#define queue_max_integrity_segments(a) (0) #define queue_max_integrity_segments(a) (0)
#define blk_integrity_merge_rq(a, b, c) (0) #define blk_integrity_merge_rq(a, b, c) (0)
#define blk_integrity_merge_bio(a, b, c) (0) #define blk_integrity_merge_bio(a, b, c) (0)
#define blk_integrity_is_initialized(a) (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
......
...@@ -101,7 +101,6 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *); ...@@ -101,7 +101,6 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *);
extern void elv_dispatch_add_tail(struct request_queue *, struct request *); extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
extern void elv_add_request(struct request_queue *, struct request *, int); extern void elv_add_request(struct request_queue *, struct request *, int);
extern void __elv_add_request(struct request_queue *, struct request *, int); extern void __elv_add_request(struct request_queue *, struct request *, int);
extern void elv_insert(struct request_queue *, struct request *, int);
extern int elv_merge(struct request_queue *, struct request **, struct bio *); extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern int elv_try_merge(struct request *, struct bio *); extern int elv_try_merge(struct request *, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *, extern void elv_merge_requests(struct request_queue *, struct request *,
......
...@@ -613,6 +613,8 @@ struct address_space_operations { ...@@ -613,6 +613,8 @@ struct address_space_operations {
int (*error_remove_page)(struct address_space *, struct page *); int (*error_remove_page)(struct address_space *, struct page *);
}; };
extern const struct address_space_operations empty_aops;
/* /*
* pagecache_write_begin/pagecache_write_end must be used by general code * pagecache_write_begin/pagecache_write_end must be used by general code
* to write into the pagecache. * to write into the pagecache.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment