Commit 8fba70b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20180425' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:
 "I ended up sitting on this about a week longer than I wanted to, since
  we were hashing out details with a timeout change. I've now killed
  that patch, so we can flush the existing queue in due time.

  This contains:

   - Fix for an old regression, where entering the queue can be
     disturbed by a signal to the process. This can cause spurious EIO.
     Fix from Alan Jenkins.

   - cdrom information leak fix from Dan.

   - Trivial helper for testing queue FUA from Dave Chinner, part of his
     O_DIRECT FUA series.

   - Series of swim fixes from Finn that actually makes it work again.

   - Loop O_DIRECT corruption fix, which caused data corruption in
     production for us. From me.

   - BFQ crash fix from me.

   - bcache maintainer update. Michael no longer has the time to do it,
     Coly has stepped up to serve as the new maintainer.

   - blkcg locking fixes from Jiang Biao.

   - Revert of a change from this merge window from Ming, that causes an
     issue on some hardware.

   - Minor clarification doc addition from Linus Walleij"

* tag 'for-linus-20180425' of git://git.kernel.dk/linux-block: (22 commits)
  Revert "blk-mq: remove code for dealing with remapping queue"
  block: mq: Add some minor doc for core structs
  bcache: mark Coly Li as bcache maintainer
  MAINTAINERS: Remove me as maintainer of bcache
  blkcg: init root blkcg_gq under lock
  blkcg: small fix on comment in blkcg_init_queue
  blkcg: don't hold blkcg lock when deactivating policy
  block: add blk_queue_fua() helper function
  cdrom: information leak in cdrom_ioctl_media_changed()
  bfq-iosched: ensure to clear bic/bfqq pointers when preparing request
  blk-mq: start request gstate with gen 1
  block/swim: Select appropriate drive on device open
  block/swim: Fix IO error at end of medium
  block/swim: Check drive type
  block/swim: Rename macros to avoid inconsistent inverted logic
  block/swim: Don't log an error message for an invalid ioctl
  block/swim: Remove extra put_disk() call from error path
  block/swim: Fix array bounds check
  m68k/mac: Don't remap SWIM MMIO region
  loop: handle short DIO reads
  ...
parents c6dc3e71 4412efec
...@@ -2617,7 +2617,7 @@ S: Maintained ...@@ -2617,7 +2617,7 @@ S: Maintained
F: drivers/net/hamradio/baycom* F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE) BCACHE (BLOCK LAYER CACHE)
M: Michael Lyle <mlyle@lyle.org> M: Coly Li <colyli@suse.de>
M: Kent Overstreet <kent.overstreet@gmail.com> M: Kent Overstreet <kent.overstreet@gmail.com>
L: linux-bcache@vger.kernel.org L: linux-bcache@vger.kernel.org
W: http://bcache.evilpiepirate.org W: http://bcache.evilpiepirate.org
......
...@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio) ...@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
bool new_queue = false; bool new_queue = false;
bool bfqq_already_existing = false, split = false; bool bfqq_already_existing = false, split = false;
if (!rq->elv.icq) /*
* Even if we don't have an icq attached, we should still clear
* the scheduler pointers, as they might point to previously
* allocated bic/bfqq structs.
*/
if (!rq->elv.icq) {
rq->elv.priv[0] = rq->elv.priv[1] = NULL;
return; return;
}
bic = icq_to_bic(rq->elv.icq); bic = icq_to_bic(rq->elv.icq);
spin_lock_irq(&bfqd->lock); spin_lock_irq(&bfqd->lock);
......
...@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q) ...@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
preloaded = !radix_tree_preload(GFP_KERNEL); preloaded = !radix_tree_preload(GFP_KERNEL);
/* /* Make sure the root blkg exists. */
* Make sure the root blkg exists and count the existing blkgs. As
* @q is bypassing at this point, blkg_lookup_create() can't be
* used. Open code insertion.
*/
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg); blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
if (IS_ERR(blkg))
return PTR_ERR(blkg);
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
ret = blk_throtl_init(q); ret = blk_throtl_init(q);
if (ret) { if (ret) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q) ...@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
return ret; return ret;
err_unlock:
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
return PTR_ERR(blkg);
} }
/** /**
...@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q, ...@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols); __clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
if (!blkg->pd[pol->plid]->offline && if (!blkg->pd[pol->plid]->offline &&
pol->pd_offline_fn) { pol->pd_offline_fn) {
...@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q, ...@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
} }
spin_unlock(&blkg->blkcg->lock);
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
......
...@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq) ...@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->part = NULL; rq->part = NULL;
seqcount_init(&rq->gstate_seq); seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync); u64_stats_init(&rq->aborted_gstate_sync);
/*
* See comment of blk_mq_init_request
*/
WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
} }
EXPORT_SYMBOL(blk_rq_init); EXPORT_SYMBOL(blk_rq_init);
...@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) ...@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
while (true) { while (true) {
bool success = false; bool success = false;
int ret;
rcu_read_lock(); rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) { if (percpu_ref_tryget_live(&q->q_usage_counter)) {
...@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) ...@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
*/ */
smp_rmb(); smp_rmb();
ret = wait_event_interruptible(q->mq_freeze_wq, wait_event(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 && (atomic_read(&q->mq_freeze_depth) == 0 &&
(preempt || !blk_queue_preempt_only(q))) || (preempt || !blk_queue_preempt_only(q))) ||
blk_queue_dying(q)); blk_queue_dying(q));
if (blk_queue_dying(q)) if (blk_queue_dying(q))
return -ENODEV; return -ENODEV;
if (ret)
return ret;
} }
} }
......
...@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, ...@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
seqcount_init(&rq->gstate_seq); seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync); u64_stats_init(&rq->aborted_gstate_sync);
/*
* start gstate with gen 1 instead of 0, otherwise it will be equal
* to aborted_gstate, and be identified timed out by
* blk_mq_terminate_expired.
*/
WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
return 0; return 0;
} }
...@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, ...@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q) static void blk_mq_map_swqueue(struct request_queue *q)
{ {
unsigned int i; unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
...@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/* /*
* Map software to hardware queues. * Map software to hardware queues.
*
* If the cpu isn't present, the cpu is mapped to first hctx.
*/ */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
/*
* If tags initialization fail for some hctx,
* that hctx won't be brought online. In this
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
q->mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i); ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i); hctx = blk_mq_map_queue(q, i);
...@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
/* every hctx should get mapped by at least one CPU */ /*
WARN_ON(!hctx->nr_ctx); * If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
/* Never unmap queue 0. We need it as a
* fallback in case of a new remap fails
* allocation
*/
if (i && set->tags[i])
blk_mq_free_map_and_requests(set, i);
hctx->tags = NULL;
continue;
}
hctx->tags = set->tags[i]; hctx->tags = set->tags[i];
WARN_ON(!hctx->tags); WARN_ON(!hctx->tags);
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
struct blk_mq_tag_set; struct blk_mq_tag_set;
/**
* struct blk_mq_ctx - State for a software queue facing the submitting CPUs
*/
struct blk_mq_ctx { struct blk_mq_ctx {
struct { struct {
spinlock_t lock; spinlock_t lock;
......
...@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq) ...@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static void lo_complete_rq(struct request *rq) static void lo_complete_rq(struct request *rq)
{ {
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;
if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { req_op(rq) != REQ_OP_READ) {
struct bio *bio = cmd->rq->bio; if (cmd->ret < 0)
ret = BLK_STS_IOERR;
goto end_io;
}
bio_advance(bio, cmd->ret); /*
* Short READ - if we got some data, advance our request and
* retry it. If we got no data, end the rest with EIO.
*/
if (cmd->ret) {
blk_update_request(rq, BLK_STS_OK, cmd->ret);
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
if (cmd->use_aio) {
struct bio *bio = rq->bio;
while (bio) {
zero_fill_bio(bio); zero_fill_bio(bio);
bio = bio->bi_next;
}
}
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
} }
blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
} }
static void lo_rw_aio_do_completion(struct loop_cmd *cmd) static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{ {
struct request *rq = blk_mq_rq_from_pdu(cmd);
if (!atomic_dec_and_test(&cmd->ref)) if (!atomic_dec_and_test(&cmd->ref))
return; return;
kfree(cmd->bvec); kfree(cmd->bvec);
cmd->bvec = NULL; cmd->bvec = NULL;
blk_mq_complete_request(cmd->rq); blk_mq_complete_request(rq);
} }
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
...@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, ...@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{ {
struct iov_iter iter; struct iov_iter iter;
struct bio_vec *bvec; struct bio_vec *bvec;
struct request *rq = cmd->rq; struct request *rq = blk_mq_rq_from_pdu(cmd);
struct bio *bio = rq->bio; struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
unsigned int offset; unsigned int offset;
...@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer); ...@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); struct request *rq = bd->rq;
struct loop_device *lo = cmd->rq->q->queuedata; struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
struct loop_device *lo = rq->q->queuedata;
blk_mq_start_request(bd->rq); blk_mq_start_request(rq);
if (lo->lo_state != Lo_bound) if (lo->lo_state != Lo_bound)
return BLK_STS_IOERR; return BLK_STS_IOERR;
switch (req_op(cmd->rq)) { switch (req_op(rq)) {
case REQ_OP_FLUSH: case REQ_OP_FLUSH:
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
...@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */ /* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
cmd->css = cmd->rq->bio->bi_css; cmd->css = rq->bio->bi_css;
css_get(cmd->css); css_get(cmd->css);
} else } else
#endif #endif
...@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd) static void loop_handle_cmd(struct loop_cmd *cmd)
{ {
const bool write = op_is_write(req_op(cmd->rq)); struct request *rq = blk_mq_rq_from_pdu(cmd);
struct loop_device *lo = cmd->rq->q->queuedata; const bool write = op_is_write(req_op(rq));
struct loop_device *lo = rq->q->queuedata;
int ret = 0; int ret = 0;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
...@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd) ...@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
goto failed; goto failed;
} }
ret = do_req_filebacked(lo, cmd->rq); ret = do_req_filebacked(lo, rq);
failed: failed:
/* complete non-aio request */ /* complete non-aio request */
if (!cmd->use_aio || ret) { if (!cmd->use_aio || ret) {
cmd->ret = ret ? -EIO : 0; cmd->ret = ret ? -EIO : 0;
blk_mq_complete_request(cmd->rq); blk_mq_complete_request(rq);
} }
} }
...@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, ...@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
{ {
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->rq = rq;
kthread_init_work(&cmd->work, loop_queue_work); kthread_init_work(&cmd->work, loop_queue_work);
return 0; return 0;
} }
......
...@@ -66,7 +66,6 @@ struct loop_device { ...@@ -66,7 +66,6 @@ struct loop_device {
struct loop_cmd { struct loop_cmd {
struct kthread_work work; struct kthread_work work;
struct request *rq;
bool use_aio; /* use AIO interface to handle I/O */ bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */ atomic_t ref; /* only for aio */
long ret; long ret;
......
...@@ -110,7 +110,7 @@ struct iwm { ...@@ -110,7 +110,7 @@ struct iwm {
/* Select values for swim_select and swim_readbit */ /* Select values for swim_select and swim_readbit */
#define READ_DATA_0 0x074 #define READ_DATA_0 0x074
#define TWOMEG_DRIVE 0x075 #define ONEMEG_DRIVE 0x075
#define SINGLE_SIDED 0x076 #define SINGLE_SIDED 0x076
#define DRIVE_PRESENT 0x077 #define DRIVE_PRESENT 0x077
#define DISK_IN 0x170 #define DISK_IN 0x170
...@@ -118,9 +118,9 @@ struct iwm { ...@@ -118,9 +118,9 @@ struct iwm {
#define TRACK_ZERO 0x172 #define TRACK_ZERO 0x172
#define TACHO 0x173 #define TACHO 0x173
#define READ_DATA_1 0x174 #define READ_DATA_1 0x174
#define MFM_MODE 0x175 #define GCR_MODE 0x175
#define SEEK_COMPLETE 0x176 #define SEEK_COMPLETE 0x176
#define ONEMEG_MEDIA 0x177 #define TWOMEG_MEDIA 0x177
/* Bits in handshake register */ /* Bits in handshake register */
...@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs) ...@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
struct floppy_struct *g; struct floppy_struct *g;
fs->disk_in = 1; fs->disk_in = 1;
fs->write_protected = swim_readbit(base, WRITE_PROT); fs->write_protected = swim_readbit(base, WRITE_PROT);
fs->type = swim_readbit(base, ONEMEG_MEDIA);
if (swim_track00(base)) if (swim_track00(base))
printk(KERN_ERR printk(KERN_ERR
...@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs) ...@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
swim_track00(base); swim_track00(base);
fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
HD_MEDIA : DD_MEDIA;
fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
get_floppy_geometry(fs, 0, &g); get_floppy_geometry(fs, 0, &g);
fs->total_secs = g->size; fs->total_secs = g->size;
fs->secpercyl = g->head * g->sect; fs->secpercyl = g->head * g->sect;
...@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) ...@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
udelay(10); udelay(10);
swim_drive(base, INTERNAL_DRIVE); swim_drive(base, fs->location);
swim_motor(base, ON); swim_motor(base, ON);
swim_action(base, SETMFM); swim_action(base, SETMFM);
if (fs->ejected) if (fs->ejected)
...@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) ...@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
goto out; goto out;
} }
set_capacity(fs->disk, fs->total_secs);
if (mode & FMODE_NDELAY) if (mode & FMODE_NDELAY)
return 0; return 0;
...@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_to_user((void __user *) param, (void *) &floppy_type, if (copy_to_user((void __user *) param, (void *) &floppy_type,
sizeof(struct floppy_struct))) sizeof(struct floppy_struct)))
return -EFAULT; return -EFAULT;
break;
default:
printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
cmd);
return -ENOSYS;
}
return 0; return 0;
}
return -ENOTTY;
} }
static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
...@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) ...@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
struct swim_priv *swd = data; struct swim_priv *swd = data;
int drive = (*part & 3); int drive = (*part & 3);
if (drive > swd->floppy_count) if (drive >= swd->floppy_count)
return NULL; return NULL;
*part = 0; *part = 0;
...@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) ...@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
swim_motor(base, OFF); swim_motor(base, OFF);
if (swim_readbit(base, SINGLE_SIDED)) fs->type = HD_MEDIA;
fs->head_number = 1;
else
fs->head_number = 2; fs->head_number = 2;
fs->ref_count = 0; fs->ref_count = 0;
fs->ejected = 1; fs->ejected = 1;
...@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd) ...@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
/* scan floppy drives */ /* scan floppy drives */
swim_drive(base, INTERNAL_DRIVE); swim_drive(base, INTERNAL_DRIVE);
if (swim_readbit(base, DRIVE_PRESENT)) if (swim_readbit(base, DRIVE_PRESENT) &&
!swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, INTERNAL_DRIVE); swim_add_floppy(swd, INTERNAL_DRIVE);
swim_drive(base, EXTERNAL_DRIVE); swim_drive(base, EXTERNAL_DRIVE);
if (swim_readbit(base, DRIVE_PRESENT)) if (swim_readbit(base, DRIVE_PRESENT) &&
!swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, EXTERNAL_DRIVE); swim_add_floppy(swd, EXTERNAL_DRIVE);
/* register floppy drives */ /* register floppy drives */
...@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd) ...@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
&swd->lock); &swd->lock);
if (!swd->unit[drive].disk->queue) { if (!swd->unit[drive].disk->queue) {
err = -ENOMEM; err = -ENOMEM;
put_disk(swd->unit[drive].disk);
goto exit_put_disks; goto exit_put_disks;
} }
blk_queue_bounce_limit(swd->unit[drive].disk->queue, blk_queue_bounce_limit(swd->unit[drive].disk->queue,
...@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev) ...@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
goto out; goto out;
} }
swim_base = ioremap(res->start, resource_size(res)); swim_base = (struct swim __iomem *)res->start;
if (!swim_base) { if (!swim_base) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_release_io; goto out_release_io;
...@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev) ...@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
if (!get_swim_mode(swim_base)) { if (!get_swim_mode(swim_base)) {
printk(KERN_INFO "SWIM device not found !\n"); printk(KERN_INFO "SWIM device not found !\n");
ret = -ENODEV; ret = -ENODEV;
goto out_iounmap; goto out_release_io;
} }
/* set platform driver data */ /* set platform driver data */
...@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev) ...@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
if (!swd) { if (!swd) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_iounmap; goto out_release_io;
} }
platform_set_drvdata(dev, swd); platform_set_drvdata(dev, swd);
...@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev) ...@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
out_kfree: out_kfree:
kfree(swd); kfree(swd);
out_iounmap:
iounmap(swim_base);
out_release_io: out_release_io:
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
out: out:
...@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev) ...@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++) for (drive = 0; drive < swd->floppy_count; drive++)
floppy_eject(&swd->unit[drive]); floppy_eject(&swd->unit[drive]);
iounmap(swd->base);
res = platform_get_resource(dev, IORESOURCE_MEM, 0); res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res) if (res)
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
......
...@@ -148,7 +148,7 @@ struct swim3 { ...@@ -148,7 +148,7 @@ struct swim3 {
#define MOTOR_ON 2 #define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */ #define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4 #define READ_DATA_0 4
#define TWOMEG_DRIVE 5 #define ONEMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7 #define DRIVE_PRESENT 7
#define DISK_IN 8 #define DISK_IN 8
...@@ -156,9 +156,9 @@ struct swim3 { ...@@ -156,9 +156,9 @@ struct swim3 {
#define TRACK_ZERO 10 #define TRACK_ZERO 10
#define TACHO 11 #define TACHO 11
#define READ_DATA_1 12 #define READ_DATA_1 12
#define MFM_MODE 13 #define GCR_MODE 13
#define SEEK_COMPLETE 14 #define SEEK_COMPLETE 14
#define ONEMEG_MEDIA 15 #define TWOMEG_MEDIA 15
/* Definitions of values used in writing and formatting */ /* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99 #define DATA_ESCAPE 0x99
......
...@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, ...@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1); return media_changed(cdi, 1);
if ((unsigned int)arg >= cdi->capacity) if (arg >= cdi->capacity)
return -EINVAL; return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL); info = kmalloc(sizeof(*info), GFP_KERNEL);
......
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
struct blk_mq_tags; struct blk_mq_tags;
struct blk_flush_queue; struct blk_flush_queue;
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
*/
struct blk_mq_hw_ctx { struct blk_mq_hw_ctx {
struct { struct {
spinlock_t lock; spinlock_t lock;
......
...@@ -737,6 +737,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); ...@@ -737,6 +737,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_preempt_only(q) \ #define blk_queue_preempt_only(q) \
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
extern int blk_set_preempt_only(struct request_queue *q); extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q); extern void blk_clear_preempt_only(struct request_queue *q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment