Commit 020b3023 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three small fixes:

   - A fix for skd, it was using kfree() to free a structure allocate
     with kmem_cache_alloc().

   - Stable fix for nbd, fixing a regression using the normal ioctl
     based tools.

   - Fix for a previous fix in this series, that fixed up
     inconsistencies between buffered and direct IO"

* 'for-linus' of git://git.kernel.dk/linux-block:
  fs: Avoid invalidation in interrupt context in dio_complete()
  nbd: don't set the device size until we're connected
  skd: Use kmem_cache_free
parents 3e0cc09a ffe51f01
...@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, ...@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
config->blksize = blocksize; config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks; config->bytesize = blocksize * nr_blocks;
nbd_size_update(nbd);
} }
static void nbd_complete_rq(struct request *req) static void nbd_complete_rq(struct request *req)
...@@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd) ...@@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i; args->index = i;
queue_work(recv_workqueue, &args->work); queue_work(recv_workqueue, &args->work);
} }
nbd_size_update(nbd);
return error; return error;
} }
......
...@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, ...@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
return NULL; return NULL;
*dma_handle = dma_map_single(dev, buf, s->size, dir); *dma_handle = dma_map_single(dev, buf, s->size, dir);
if (dma_mapping_error(dev, *dma_handle)) { if (dma_mapping_error(dev, *dma_handle)) {
kfree(buf); kmem_cache_free(s, buf);
buf = NULL; buf = NULL;
} }
return buf; return buf;
......
...@@ -44,6 +44,12 @@ ...@@ -44,6 +44,12 @@
*/ */
#define DIO_PAGES 64 #define DIO_PAGES 64
/*
* Flags for dio_complete()
*/
#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */
#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */
/* /*
* This code generally works in units of "dio_blocks". A dio_block is * This code generally works in units of "dio_blocks". A dio_block is
* somewhere between the hard sector size and the filesystem block size. it * somewhere between the hard sector size and the filesystem block size. it
...@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio, ...@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio,
* filesystems can use it to hold additional state between get_block calls and * filesystems can use it to hold additional state between get_block calls and
* dio_complete. * dio_complete.
*/ */
static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
{ {
loff_t offset = dio->iocb->ki_pos; loff_t offset = dio->iocb->ki_pos;
ssize_t transferred = 0; ssize_t transferred = 0;
...@@ -266,7 +272,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) ...@@ -266,7 +272,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
* one is a pretty crazy thing to do, so we don't support it 100%. If * one is a pretty crazy thing to do, so we don't support it 100%. If
* this invalidation fails, tough, the write still worked... * this invalidation fails, tough, the write still worked...
*/ */
if (ret > 0 && dio->op == REQ_OP_WRITE && if (flags & DIO_COMPLETE_INVALIDATE &&
ret > 0 && dio->op == REQ_OP_WRITE &&
dio->inode->i_mapping->nrpages) { dio->inode->i_mapping->nrpages) {
err = invalidate_inode_pages2_range(dio->inode->i_mapping, err = invalidate_inode_pages2_range(dio->inode->i_mapping,
offset >> PAGE_SHIFT, offset >> PAGE_SHIFT,
...@@ -285,7 +292,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) ...@@ -285,7 +292,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
if (!(dio->flags & DIO_SKIP_DIO_COUNT)) if (!(dio->flags & DIO_SKIP_DIO_COUNT))
inode_dio_end(dio->inode); inode_dio_end(dio->inode);
if (is_async) { if (flags & DIO_COMPLETE_ASYNC) {
/* /*
* generic_write_sync expects ki_pos to have been updated * generic_write_sync expects ki_pos to have been updated
* already, but the submission path only does this for * already, but the submission path only does this for
...@@ -306,7 +313,7 @@ static void dio_aio_complete_work(struct work_struct *work) ...@@ -306,7 +313,7 @@ static void dio_aio_complete_work(struct work_struct *work)
{ {
struct dio *dio = container_of(work, struct dio, complete_work); struct dio *dio = container_of(work, struct dio, complete_work);
dio_complete(dio, 0, true); dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
} }
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
...@@ -348,7 +355,7 @@ static void dio_bio_end_aio(struct bio *bio) ...@@ -348,7 +355,7 @@ static void dio_bio_end_aio(struct bio *bio)
queue_work(dio->inode->i_sb->s_dio_done_wq, queue_work(dio->inode->i_sb->s_dio_done_wq,
&dio->complete_work); &dio->complete_work);
} else { } else {
dio_complete(dio, 0, true); dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
} }
} }
} }
...@@ -1360,7 +1367,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, ...@@ -1360,7 +1367,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio_await_completion(dio); dio_await_completion(dio);
if (drop_refcount(dio) == 0) { if (drop_refcount(dio) == 0) {
retval = dio_complete(dio, retval, false); retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
} else } else
BUG_ON(retval != -EIOCBQUEUED); BUG_ON(retval != -EIOCBQUEUED);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment