Commit 230b619e authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] shared block queue tag map

This implements the possibility for sharing a tag map between queues.
Some (most?) scsi host adapters needs this, and SATA tcq will need it
for some cases, too.
parent 05c1339f
......@@ -458,6 +458,7 @@ void blk_queue_free_tags(request_queue_t *q)
if (!bqt)
return;
if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(bqt->busy);
BUG_ON(!list_empty(&bqt->busy_list));
......@@ -468,6 +469,8 @@ void blk_queue_free_tags(request_queue_t *q)
bqt->tag_map = NULL;
kfree(bqt);
}
q->queue_tags = NULL;
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
}
......@@ -503,6 +506,9 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
__set_bit(i, tags->tag_map);
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return 0;
fail:
kfree(tags->tag_index);
......@@ -514,19 +520,18 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
* @q: the request queue for the device
* @depth: the maximum queue depth supported
**/
int blk_queue_init_tags(request_queue_t *q, int depth)
int blk_queue_init_tags(request_queue_t *q, int depth,
struct blk_queue_tag *tags)
{
struct blk_queue_tag *tags;
tags = kmalloc(sizeof(struct blk_queue_tag),GFP_ATOMIC);
if (!tags) {
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
if (!tags)
goto fail;
if (init_tag_map(q, tags, depth))
goto fail;
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
} else
atomic_inc(&tags->refcnt);
/*
* assign it, all done
......
......@@ -596,7 +596,7 @@ static int ide_enable_queued(ide_drive_t *drive, int on)
* enable block tagging
*/
if (!blk_queue_tagged(drive->queue))
blk_queue_init_tags(drive->queue, IDE_MAX_TAG);
blk_queue_init_tags(drive->queue, IDE_MAX_TAG, NULL);
/*
* check auto-poll support
......
......@@ -262,6 +262,7 @@ struct blk_queue_tag {
int busy; /* current depth */
int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */
};
struct request_queue
......@@ -579,7 +580,7 @@ request_queue_t *blk_alloc_queue(int);
extern int blk_queue_start_tag(request_queue_t *, struct request *);
extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern void blk_queue_end_tag(request_queue_t *, struct request *);
extern int blk_queue_init_tags(request_queue_t *, int);
extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
extern void blk_queue_free_tags(request_queue_t *);
extern int blk_queue_resize_tags(request_queue_t *, int);
extern void blk_queue_invalidate_tags(request_queue_t *);
......
......@@ -27,7 +27,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
{
if (sdev->tagged_supported) {
if (!blk_queue_tagged(sdev->request_queue))
blk_queue_init_tags(sdev->request_queue, depth);
blk_queue_init_tags(sdev->request_queue, depth, NULL);
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment