Commit ad519c69 authored by Jens Axboe's avatar Jens Axboe

[PATCH] queue dma alignment

Make it possible for a device to specify the dma alignment restrictions
it has. This will be used by future infrastructure when mapping in user
pages, and allows us to dma to on ATAPI even though user address and
length is not sector size aligned.
parent 0717c0a9
...@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) ...@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
blk_queue_max_sectors(q, MAX_SECTORS); blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512); blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
/* /*
* by default assume old behaviour and bounce for any highmem page * by default assume old behaviour and bounce for any highmem page
...@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) ...@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q->seg_boundary_mask = mask; q->seg_boundary_mask = mask;
} }
/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
* @dma_mask: alignment mask
*
* description:
* set required memory and length aligment for direct dma transactions.
* this is used when buiding direct io requests for the queue.
*
**/
void blk_queue_dma_alignment(request_queue_t *q, int mask)
{
q->dma_alignment = mask;
}
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock) void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{ {
spin_lock_init(lock); spin_lock_init(lock);
...@@ -2124,6 +2140,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments); ...@@ -2124,6 +2140,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size); EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size); EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary); EXPORT_SYMBOL(blk_queue_segment_boundary);
EXPORT_SYMBOL(blk_queue_dma_alignment);
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio); EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
......
...@@ -215,6 +215,7 @@ struct request_queue ...@@ -215,6 +215,7 @@ struct request_queue
unsigned int max_segment_size; unsigned int max_segment_size;
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
unsigned int dma_alignment;
wait_queue_head_t queue_wait; wait_queue_head_t queue_wait;
...@@ -346,6 +347,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); ...@@ -346,6 +347,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *); extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
...@@ -392,6 +394,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev) ...@@ -392,6 +394,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev)); return queue_hardsect_size(bdev_get_queue(bdev));
} }
static inline int queue_dma_alignment(request_queue_t *q)
{
int retval = 511;
if (q && q->dma_alignment)
retval = q->dma_alignment;
return retval;
}
static inline int bdev_dma_aligment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
}
#define blk_finished_io(nsects) do { } while (0) #define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0) #define blk_started_io(nsects) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment