Commit 90679312 authored by Al Viro's avatar Al Viro

ntfs_copy_from_user_iter(): don't bother with copying iov_iter

Advance the original, let the caller revert if it needs to.
Don't mess with iov_iter_single_seg_count() in the caller -
if we got a (non-zero) short copy, use the amount actually
copied for the next pass, limit it to "up to the end
of page" if nothing got copied at all.

Originally fault-in only read the first iovec; back then it used
to make sense to limit to the just one iovec for the pass after
short copy.  These days it's no long true.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 6efb943b
...@@ -1684,20 +1684,19 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, ...@@ -1684,20 +1684,19 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
{ {
struct page **last_page = pages + nr_pages; struct page **last_page = pages + nr_pages;
size_t total = 0; size_t total = 0;
struct iov_iter data = *i;
unsigned len, copied; unsigned len, copied;
do { do {
len = PAGE_SIZE - ofs; len = PAGE_SIZE - ofs;
if (len > bytes) if (len > bytes)
len = bytes; len = bytes;
copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, copied = iov_iter_copy_from_user_atomic(*pages, i, ofs,
len); len);
iov_iter_advance(i, copied);
total += copied; total += copied;
bytes -= copied; bytes -= copied;
if (!bytes) if (!bytes)
break; break;
iov_iter_advance(&data, copied);
if (copied < len) if (copied < len)
goto err; goto err;
ofs = 0; ofs = 0;
...@@ -1866,34 +1865,24 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, ...@@ -1866,34 +1865,24 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
if (likely(copied == bytes)) { if (likely(copied == bytes)) {
status = ntfs_commit_pages_after_write(pages, do_pages, status = ntfs_commit_pages_after_write(pages, do_pages,
pos, bytes); pos, bytes);
if (!status)
status = bytes;
} }
do { do {
unlock_page(pages[--do_pages]); unlock_page(pages[--do_pages]);
put_page(pages[do_pages]); put_page(pages[do_pages]);
} while (do_pages); } while (do_pages);
if (unlikely(status < 0)) if (unlikely(status < 0)) {
iov_iter_revert(i, copied);
break; break;
copied = status; }
cond_resched(); cond_resched();
if (unlikely(!copied)) { if (unlikely(copied < bytes)) {
size_t sc; iov_iter_revert(i, copied);
if (copied)
/* bytes = copied;
* We failed to copy anything. Fall back to single else if (bytes > PAGE_SIZE - ofs)
* segment length write. bytes = PAGE_SIZE - ofs;
*
* This is needed to avoid possible livelock in the
* case that all segments in the iov cannot be copied
* at once without a pagefault.
*/
sc = iov_iter_single_seg_count(i);
if (bytes > sc)
bytes = sc;
goto again; goto again;
} }
iov_iter_advance(i, copied);
pos += copied; pos += copied;
written += copied; written += copied;
balance_dirty_pages_ratelimited(mapping); balance_dirty_pages_ratelimited(mapping);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment