Commit 02ef12a6 authored by Johannes Thumshirn's avatar Johannes Thumshirn Committed by Jens Axboe

zonefs: use REQ_OP_ZONE_APPEND for sync DIO

Synchronous direct I/O to a sequential write only zone can be issued using
the new REQ_OP_ZONE_APPEND request operation. As dispatching multiple
BIOs can potentially result in reordering, we cannot support asynchronous
IO via this interface.

We also can only dispatch up to queue_max_zone_append_sectors() via the
new zone-append method and have to return a short write back to user-space
in case an IO larger than queue_max_zone_append_sectors() has been issued.
Signed-off-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 29b2a3aa
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/task_io_accounting_ops.h>
#include "zonefs.h" #include "zonefs.h"
...@@ -596,6 +597,61 @@ static const struct iomap_dio_ops zonefs_write_dio_ops = { ...@@ -596,6 +597,61 @@ static const struct iomap_dio_ops zonefs_write_dio_ops = {
.end_io = zonefs_file_write_dio_end_io, .end_io = zonefs_file_write_dio_end_io,
}; };
static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int max;
struct bio *bio;
ssize_t size;
int nr_pages;
ssize_t ret;
nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
if (!nr_pages)
return 0;
max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
if (!bio)
return -ENOMEM;
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_ioprio = iocb->ki_ioprio;
bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
if (iocb->ki_flags & IOCB_DSYNC)
bio->bi_opf |= REQ_FUA;
ret = bio_iov_iter_get_pages(bio, from);
if (unlikely(ret)) {
bio_io_error(bio);
return ret;
}
size = bio->bi_iter.bi_size;
task_io_account_write(ret);
if (iocb->ki_flags & IOCB_HIPRI)
bio_set_polled(bio, iocb);
ret = submit_bio_wait(bio);
bio_put(bio);
zonefs_file_write_dio_end_io(iocb, size, ret, 0);
if (ret >= 0) {
iocb->ki_pos += size;
return size;
}
return ret;
}
/* /*
* Handle direct writes. For sequential zone files, this is the only possible * Handle direct writes. For sequential zone files, this is the only possible
* write path. For these files, check that the user is issuing writes * write path. For these files, check that the user is issuing writes
...@@ -611,6 +667,8 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -611,6 +667,8 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool sync = is_sync_kiocb(iocb);
bool append = false;
size_t count; size_t count;
ssize_t ret; ssize_t ret;
...@@ -619,7 +677,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -619,7 +677,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
* as this can cause write reordering (e.g. the first aio gets EAGAIN * as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned). * on the inode lock but the second goes through but is now unaligned).
*/ */
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) && if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
(iocb->ki_flags & IOCB_NOWAIT)) (iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -643,16 +701,22 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -643,16 +701,22 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
} }
/* Enforce sequential writes (append only) in sequential zones */ /* Enforce sequential writes (append only) in sequential zones */
mutex_lock(&zi->i_truncate_mutex); if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) { mutex_lock(&zi->i_truncate_mutex);
if (iocb->ki_pos != zi->i_wpoffset) {
mutex_unlock(&zi->i_truncate_mutex);
ret = -EINVAL;
goto inode_unlock;
}
mutex_unlock(&zi->i_truncate_mutex); mutex_unlock(&zi->i_truncate_mutex);
ret = -EINVAL; append = sync;
goto inode_unlock;
} }
mutex_unlock(&zi->i_truncate_mutex);
ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, if (append)
&zonefs_write_dio_ops, is_sync_kiocb(iocb)); ret = zonefs_file_dio_append(iocb, from);
else
ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
&zonefs_write_dio_ops, sync);
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
(ret > 0 || ret == -EIOCBQUEUED)) { (ret > 0 || ret == -EIOCBQUEUED)) {
if (ret > 0) if (ret > 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment