Commit 8d01eddf authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-2.6.25' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.25' of git://git.kernel.dk/linux-2.6-block:
  block: implement drain buffers
  __bio_clone: don't calculate hw/phys segment counts
  block: allow queue dma_alignment of zero
  blktrace: Add blktrace ioctls to SCSI generic devices
parents f0f00520 fa0ccd83
...@@ -235,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt) ...@@ -235,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
kfree(bt); kfree(bt);
} }
static int blk_trace_remove(struct request_queue *q) int blk_trace_remove(struct request_queue *q)
{ {
struct blk_trace *bt; struct blk_trace *bt;
...@@ -249,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q) ...@@ -249,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blk_trace_remove);
static int blk_dropped_open(struct inode *inode, struct file *filp) static int blk_dropped_open(struct inode *inode, struct file *filp)
{ {
...@@ -316,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = { ...@@ -316,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = {
/* /*
* Setup everything required to start tracing * Setup everything required to start tracing
*/ */
int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct blk_user_trace_setup *buts) struct blk_user_trace_setup *buts)
{ {
struct blk_trace *old_bt, *bt = NULL; struct blk_trace *old_bt, *bt = NULL;
struct dentry *dir = NULL; struct dentry *dir = NULL;
char b[BDEVNAME_SIZE];
int ret, i; int ret, i;
if (!buts->buf_size || !buts->buf_nr) if (!buts->buf_size || !buts->buf_nr)
return -EINVAL; return -EINVAL;
strcpy(buts->name, bdevname(bdev, b)); strcpy(buts->name, name);
/* /*
* some device names have larger paths - convert the slashes * some device names have larger paths - convert the slashes
...@@ -352,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, ...@@ -352,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
goto err; goto err;
bt->dir = dir; bt->dir = dir;
bt->dev = bdev->bd_dev; bt->dev = dev;
atomic_set(&bt->dropped, 0); atomic_set(&bt->dropped, 0);
ret = -EIO; ret = -EIO;
...@@ -399,7 +399,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, ...@@ -399,7 +399,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
return ret; return ret;
} }
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg) char __user *arg)
{ {
struct blk_user_trace_setup buts; struct blk_user_trace_setup buts;
...@@ -409,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, ...@@ -409,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
if (ret) if (ret)
return -EFAULT; return -EFAULT;
ret = do_blk_trace_setup(q, bdev, &buts); ret = do_blk_trace_setup(q, name, dev, &buts);
if (ret) if (ret)
return ret; return ret;
...@@ -418,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, ...@@ -418,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blk_trace_setup);
static int blk_trace_startstop(struct request_queue *q, int start) int blk_trace_startstop(struct request_queue *q, int start)
{ {
struct blk_trace *bt; struct blk_trace *bt;
int ret; int ret;
...@@ -452,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start) ...@@ -452,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(blk_trace_startstop);
/** /**
* blk_trace_ioctl: - handle the ioctls associated with tracing * blk_trace_ioctl: - handle the ioctls associated with tracing
...@@ -464,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) ...@@ -464,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{ {
struct request_queue *q; struct request_queue *q;
int ret, start = 0; int ret, start = 0;
char b[BDEVNAME_SIZE];
q = bdev_get_queue(bdev); q = bdev_get_queue(bdev);
if (!q) if (!q)
...@@ -473,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) ...@@ -473,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
switch (cmd) { switch (cmd) {
case BLKTRACESETUP: case BLKTRACESETUP:
ret = blk_trace_setup(q, bdev, arg); strcpy(b, bdevname(bdev, b));
ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
break; break;
case BLKTRACESTART: case BLKTRACESTART:
start = 1; start = 1;
......
...@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) ...@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
struct blk_user_trace_setup buts; struct blk_user_trace_setup buts;
struct compat_blk_user_trace_setup cbuts; struct compat_blk_user_trace_setup cbuts;
struct request_queue *q; struct request_queue *q;
char b[BDEVNAME_SIZE];
int ret; int ret;
q = bdev_get_queue(bdev); q = bdev_get_queue(bdev);
...@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) ...@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
if (copy_from_user(&cbuts, arg, sizeof(cbuts))) if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
return -EFAULT; return -EFAULT;
strcpy(b, bdevname(bdev, b));
buts = (struct blk_user_trace_setup) { buts = (struct blk_user_trace_setup) {
.act_mask = cbuts.act_mask, .act_mask = cbuts.act_mask,
.buf_size = cbuts.buf_size, .buf_size = cbuts.buf_size,
...@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) ...@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
memcpy(&buts.name, &cbuts.name, 32); memcpy(&buts.name, &cbuts.name, 32);
mutex_lock(&bdev->bd_mutex); mutex_lock(&bdev->bd_mutex);
ret = do_blk_trace_setup(q, bdev, &buts); ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdev->bd_mutex);
if (ret) if (ret)
return ret; return ret;
......
...@@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
q->boundary_rq = NULL; q->boundary_rq = NULL;
} }
if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) if (rq->cmd_flags & REQ_DONTPREP)
break;
if (q->dma_drain_size && rq->data_len) {
/*
* make sure space for the drain appears we
* know we can do this because max_hw_segments
* has been adjusted to be one fewer than the
* device can handle
*/
rq->nr_phys_segments++;
rq->nr_hw_segments++;
}
if (!q->prep_rq_fn)
break; break;
ret = q->prep_rq_fn(q, rq); ret = q->prep_rq_fn(q, rq);
...@@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
* avoid resource deadlock. REQ_STARTED will * avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one. * prevent other fs requests from passing this one.
*/ */
if (q->dma_drain_size && rq->data_len &&
!(rq->cmd_flags & REQ_DONTPREP)) {
/*
* remove the space for the drain we added
* so that we don't add it again
*/
--rq->nr_phys_segments;
--rq->nr_hw_segments;
}
rq = NULL; rq = NULL;
break; break;
} else if (ret == BLKPREP_KILL) { } else if (ret == BLKPREP_KILL) {
......
...@@ -720,6 +720,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) ...@@ -720,6 +720,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
EXPORT_SYMBOL(blk_queue_stack_limits); EXPORT_SYMBOL(blk_queue_stack_limits);
/**
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
*
* @q: the request queue for the device
* @buf: physically contiguous buffer
* @size: size of the buffer in bytes
*
* Some devices have excess DMA problems and can't simply discard (or
* zero fill) the unwanted piece of the transfer. They have to have a
* real area of memory to transfer it into. The use case for this is
* ATAPI devices in DMA mode. If the packet command causes a transfer
* bigger than the transfer size some HBAs will lock up if there
* aren't DMA elements to contain the excess transfer. What this API
* does is adjust the queue so that the buf is always appended
* silently to the scatterlist.
*
* Note: This routine adjusts max_hw_segments to make room for
* appending the drain buffer. If you call
* blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
* calling this routine, you must set the limit to one fewer than your
* device can support otherwise there won't be room for the drain
* buffer.
*/
int blk_queue_dma_drain(struct request_queue *q, void *buf,
unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
/** /**
* blk_queue_segment_boundary - set boundary rules for segment merging * blk_queue_segment_boundary - set boundary rules for segment merging
* @q: the request queue for the device * @q: the request queue for the device
...@@ -1374,6 +1413,16 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -1374,6 +1413,16 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
bvprv = bvec; bvprv = bvec;
} /* segments in rq */ } /* segments in rq */
if (q->dma_drain_size) {
sg->page_link &= ~0x02;
sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
q->dma_drain_size,
((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1));
nsegs++;
}
if (sg) if (sg)
sg_mark_end(sg); sg_mark_end(sg);
......
...@@ -48,6 +48,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ ...@@ -48,6 +48,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/blktrace_api.h>
#include "scsi.h" #include "scsi.h"
#include <scsi/scsi_dbg.h> #include <scsi/scsi_dbg.h>
...@@ -1067,6 +1068,17 @@ sg_ioctl(struct inode *inode, struct file *filp, ...@@ -1067,6 +1068,17 @@ sg_ioctl(struct inode *inode, struct file *filp,
case BLKSECTGET: case BLKSECTGET:
return put_user(sdp->device->request_queue->max_sectors * 512, return put_user(sdp->device->request_queue->max_sectors * 512,
ip); ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
sdp->device->sdev_gendev.devt,
(char *)arg);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
return blk_trace_startstop(sdp->device->request_queue, 0);
case BLKTRACETEARDOWN:
return blk_trace_remove(sdp->device->request_queue);
default: default:
if (read_only) if (read_only)
return -EPERM; /* don't know so take safe approach */ return -EPERM; /* don't know so take safe approach */
......
...@@ -248,11 +248,13 @@ inline int bio_hw_segments(struct request_queue *q, struct bio *bio) ...@@ -248,11 +248,13 @@ inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
*/ */
void __bio_clone(struct bio *bio, struct bio *bio_src) void __bio_clone(struct bio *bio, struct bio *bio_src)
{ {
struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
memcpy(bio->bi_io_vec, bio_src->bi_io_vec, memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
bio_src->bi_max_vecs * sizeof(struct bio_vec)); bio_src->bi_max_vecs * sizeof(struct bio_vec));
/*
* most users will be overriding ->bi_bdev with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
bio->bi_sector = bio_src->bi_sector; bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev; bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED; bio->bi_flags |= 1 << BIO_CLONED;
...@@ -260,8 +262,6 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) ...@@ -260,8 +262,6 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
bio->bi_vcnt = bio_src->bi_vcnt; bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_size = bio_src->bi_size; bio->bi_size = bio_src->bi_size;
bio->bi_idx = bio_src->bi_idx; bio->bi_idx = bio_src->bi_idx;
bio_phys_segments(q, bio);
bio_hw_segments(q, bio);
} }
/** /**
......
...@@ -356,6 +356,8 @@ struct request_queue ...@@ -356,6 +356,8 @@ struct request_queue
unsigned int max_segment_size; unsigned int max_segment_size;
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
unsigned int dma_alignment; unsigned int dma_alignment;
struct blk_queue_tag *queue_tags; struct blk_queue_tag *queue_tags;
...@@ -692,6 +694,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); ...@@ -692,6 +694,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
unsigned int size);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
...@@ -768,12 +772,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev) ...@@ -768,12 +772,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
static inline int queue_dma_alignment(struct request_queue *q) static inline int queue_dma_alignment(struct request_queue *q)
{ {
int retval = 511; return q ? q->dma_alignment : 511;
if (q && q->dma_alignment)
retval = q->dma_alignment;
return retval;
} }
/* assumes size > 256 */ /* assumes size > 256 */
......
...@@ -148,7 +148,7 @@ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); ...@@ -148,7 +148,7 @@ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *); extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
extern int do_blk_trace_setup(struct request_queue *q, extern int do_blk_trace_setup(struct request_queue *q,
struct block_device *bdev, struct blk_user_trace_setup *buts); char *name, dev_t dev, struct blk_user_trace_setup *buts);
/** /**
...@@ -282,6 +282,11 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, ...@@ -282,6 +282,11 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
} }
extern int blk_trace_setup(request_queue_t *q, char *name, dev_t dev,
char __user *arg);
extern int blk_trace_startstop(request_queue_t *q, int start);
extern int blk_trace_remove(request_queue_t *q);
#else /* !CONFIG_BLK_DEV_IO_TRACE */ #else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0) #define blk_trace_shutdown(q) do { } while (0)
...@@ -290,7 +295,10 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, ...@@ -290,7 +295,10 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) #define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) #define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) #define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#define do_blk_trace_setup(q, bdev, buts) (-ENOTTY) #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
#define blk_trace_startstop(q, start) (-ENOTTY)
#define blk_trace_remove(q) (-ENOTTY)
#endif /* CONFIG_BLK_DEV_IO_TRACE */ #endif /* CONFIG_BLK_DEV_IO_TRACE */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment