Commit fb4b10ab authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: kill loop_mutex
  blktrace: Remove blk_fill_rwbs_rq.
  block: blk-flush shouldn't call directly into q->request_fn() __blk_run_queue()
  block: add @force_kblockd to __blk_run_queue()
  block: fix kernel-doc format for blkdev_issue_zeroout
  blk-throttle: Do not use kblockd workqueue for throtl work
parents 83360269 fd51469f
...@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q) ...@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q); queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q); __blk_run_queue(q, false);
} }
EXPORT_SYMBOL(blk_start_queue); EXPORT_SYMBOL(blk_start_queue);
...@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue); ...@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/** /**
* __blk_run_queue - run a single device queue * __blk_run_queue - run a single device queue
* @q: The queue to run * @q: The queue to run
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
* *
* Description: * Description:
* See @blk_run_queue. This variant must be called with the queue lock * See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled. * held and interrupts disabled.
* *
*/ */
void __blk_run_queue(struct request_queue *q) void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{ {
blk_remove_plug(q); blk_remove_plug(q);
...@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q) ...@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
* Only recurse once to avoid overrunning the stack, let the unplug * Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there. * handling reinvoke the handler shortly if we already got there.
*/ */
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q); q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q); queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else { } else {
...@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q) ...@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_run_queue); EXPORT_SYMBOL(blk_run_queue);
...@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, ...@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1); drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0); __elv_add_request(q, rq, where, 0);
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_insert_request);
...@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) ...@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
} }
EXPORT_SYMBOL(kblockd_schedule_work); EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
int __init blk_dev_init(void) int __init blk_dev_init(void)
{ {
BUILD_BUG_ON(__REQ_NR_BITS > 8 * BUILD_BUG_ON(__REQ_NR_BITS > 8 *
......
...@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, ...@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
/* /*
* Moving a request silently to empty queue_head may stall the * Moving a request silently to empty queue_head may stall the
* queue. Kick the queue in those cases. * queue. Kick the queue in those cases. This function is called
* from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd.
*/ */
if (was_empty && next_rq) if (was_empty && next_rq)
__blk_run_queue(q); __blk_run_queue(q, true);
} }
static void pre_flush_end_io(struct request *rq, int error) static void pre_flush_end_io(struct request *rq, int error)
...@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q) ...@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
BUG(); BUG();
} }
elv_insert(q, rq, ELEVATOR_INSERT_FRONT); elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
return rq; return rq;
} }
......
...@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err) ...@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
} }
/** /**
* blkdev_issue_zeroout generate number of zero filed write bios * blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue * @bdev: blockdev to issue
* @sector: start sector * @sector: start sector
* @nr_sects: number of sectors to write * @nr_sects: number of sectors to write
......
...@@ -20,6 +20,11 @@ static int throtl_quantum = 32; ...@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */ /* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */ static unsigned long throtl_slice = HZ/10; /* 100 ms */
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
unsigned long delay);
struct throtl_rb_root { struct throtl_rb_root {
struct rb_root rb; struct rb_root rb;
struct rb_node *left; struct rb_node *left;
...@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td) ...@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
update_min_dispatch_time(st); update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies)) if (time_before_eq(st->min_disptime, jiffies))
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
else else
throtl_schedule_delayed_work(td->queue, throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
(st->min_disptime - jiffies));
} }
static inline void static inline void
...@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work) ...@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
} }
/* Call with queue lock held */ /* Call with queue lock held */
void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) static void
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{ {
struct throtl_data *td = q->td;
struct delayed_work *dwork = &td->throtl_work; struct delayed_work *dwork = &td->throtl_work;
if (total_nr_queued(td) > 0) { if (total_nr_queued(td) > 0) {
...@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) ...@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
* Cancel that and schedule a new one. * Cancel that and schedule a new one.
*/ */
__cancel_delayed_work(dwork); __cancel_delayed_work(dwork);
kblockd_schedule_delayed_work(q, dwork, delay); queue_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu", throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies); delay, jiffies);
} }
} }
EXPORT_SYMBOL(throtl_schedule_delayed_work);
static void static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
...@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key, ...@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
/* Schedule a work now to process the limit change */ /* Schedule a work now to process the limit change */
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
static void throtl_update_blkio_group_write_bps(void *key, static void throtl_update_blkio_group_write_bps(void *key,
...@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key, ...@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
smp_mb__before_atomic_inc(); smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed); atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
static void throtl_update_blkio_group_read_iops(void *key, static void throtl_update_blkio_group_read_iops(void *key,
...@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key, ...@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
smp_mb__before_atomic_inc(); smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed); atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
static void throtl_update_blkio_group_write_iops(void *key, static void throtl_update_blkio_group_write_iops(void *key,
...@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key, ...@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
smp_mb__before_atomic_inc(); smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed); atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
throtl_schedule_delayed_work(td->queue, 0); throtl_schedule_delayed_work(td, 0);
} }
void throtl_shutdown_timer_wq(struct request_queue *q) void throtl_shutdown_timer_wq(struct request_queue *q)
...@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q) ...@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
static int __init throtl_init(void) static int __init throtl_init(void)
{ {
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
blkio_policy_register(&blkio_policy_throtl); blkio_policy_register(&blkio_policy_throtl);
return 0; return 0;
} }
......
...@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) { cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq); cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
} else { } else {
cfq_blkiocg_update_idle_time_stats( cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg); &cfqq->cfqg->blkg);
...@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE * this new queue is RT and the current one is BE
*/ */
cfq_preempt_queue(cfqd, cfqq); cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
} }
} }
...@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work) ...@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
......
...@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) ...@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
*/ */
elv_drain_elevator(q); elv_drain_elevator(q);
while (q->rq.elvpriv) { while (q->rq.elvpriv) {
__blk_run_queue(q); __blk_run_queue(q, false);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
msleep(10); msleep(10);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue * with anything. There's no point in delaying queue
* processing. * processing.
*/ */
__blk_run_queue(q); __blk_run_queue(q, false);
break; break;
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
......
...@@ -78,7 +78,6 @@ ...@@ -78,7 +78,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
static DEFINE_MUTEX(loop_mutex);
static LIST_HEAD(loop_devices); static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex); static DEFINE_MUTEX(loop_devices_mutex);
...@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode) ...@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{ {
struct loop_device *lo = bdev->bd_disk->private_data; struct loop_device *lo = bdev->bd_disk->private_data;
mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex); mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++; lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex); mutex_unlock(&lo->lo_ctl_mutex);
mutex_unlock(&loop_mutex);
return 0; return 0;
} }
...@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) ...@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data; struct loop_device *lo = disk->private_data;
int err; int err;
mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex); mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt) if (--lo->lo_refcnt)
...@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) ...@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out: out:
mutex_unlock(&lo->lo_ctl_mutex); mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked: out_unlocked:
mutex_unlock(&loop_mutex);
return 0; return 0;
} }
......
...@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
&sdev->request_queue->queue_flags); &sdev->request_queue->queue_flags);
if (flagset) if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
__blk_run_queue(sdev->request_queue); __blk_run_queue(sdev->request_queue, false);
if (flagset) if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock); spin_unlock(sdev->request_queue->queue_lock);
......
...@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) ...@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset) if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
__blk_run_queue(rport->rqst_q); __blk_run_queue(rport->rqst_q, false);
if (flagset) if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
......
...@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q); ...@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *); extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
extern void blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *, extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, struct rq_map_data *, void __user *, unsigned long,
...@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p) ...@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
struct work_struct; struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
/* /*
...@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) ...@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
extern int blk_throtl_init(struct request_queue *q); extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q);
extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
extern void throtl_shutdown_timer_wq(struct request_queue *q); extern void throtl_shutdown_timer_wq(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */ #else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
...@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) ...@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline int blk_throtl_exit(struct request_queue *q) { return 0; } static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
#endif /* CONFIG_BLK_DEV_THROTTLING */ #endif /* CONFIG_BLK_DEV_THROTTLING */
......
...@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq) ...@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
extern void blk_dump_cmd(char *buf, struct request *rq); extern void blk_dump_cmd(char *buf, struct request *rq);
extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
......
...@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, ...@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
0 : blk_rq_sectors(rq); 0 : blk_rq_sectors(rq);
__entry->errors = rq->errors; __entry->errors = rq->errors;
blk_fill_rwbs_rq(__entry->rwbs, rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq); blk_dump_cmd(__get_str(cmd), rq);
), ),
...@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq, ...@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
blk_rq_bytes(rq) : 0; blk_rq_bytes(rq) : 0;
blk_fill_rwbs_rq(__entry->rwbs, rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq); blk_dump_cmd(__get_str(cmd), rq);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
), ),
...@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap, ...@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
__entry->nr_sector = blk_rq_sectors(rq); __entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev; __entry->old_dev = dev;
__entry->old_sector = from; __entry->old_sector = from;
blk_fill_rwbs_rq(__entry->rwbs, rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
), ),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
......
...@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) ...@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
rwbs[i] = '\0'; rwbs[i] = '\0';
} }
void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
{
int rw = rq->cmd_flags & 0x03;
int bytes;
if (rq->cmd_flags & REQ_DISCARD)
rw |= REQ_DISCARD;
if (rq->cmd_flags & REQ_SECURE)
rw |= REQ_SECURE;
bytes = blk_rq_bytes(rq);
blk_fill_rwbs(rwbs, rw, bytes);
}
#endif /* CONFIG_EVENT_TRACING */ #endif /* CONFIG_EVENT_TRACING */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment