Commit 20c2474f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "vfs:

   - Ensure that iter_folioq_get_pages() advances to the next slot
     otherwise it will end up using the same folio with an out-of-bound
     offset.

  iomap:

   - Dont unshare delalloc extents which can't be reflinked, and thus
     can't be shared.

   - Constrain the file range passed to iomap_file_unshare() directly in
     iomap instead of requiring the callers to do it.

  netfs:

   - Use folioq_count instead of folioq_nr_slot to prevent an
     unitialized value warning in netfs_clear_buffer().

   - Fix missing wakeup after issuing writes by scheduling the write
     collector only if all the subrequest queues are empty and thus no
     writes are pending.

   - Fix two minor documentation bugs"

* tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  iomap: constrain the file range passed to iomap_file_unshare
  iomap: don't bother unsharing delalloc extents
  netfs: Fix missing wakeup after issuing writes
  Documentation: add missing folio_queue entry
  folio_queue: fix documentation
  netfs: Fix a KMSAN uninit-value error in netfs_clear_buffer
  iov_iter: fix advancing slot in iter_folioq_get_pages()
parents 7ec46210 a311a08a
...@@ -37,6 +37,7 @@ Library functionality that is used throughout the kernel. ...@@ -37,6 +37,7 @@ Library functionality that is used throughout the kernel.
kref kref
cleanup cleanup
assoc_array assoc_array
folio_queue
xarray xarray
maple_tree maple_tree
idr idr
......
...@@ -1305,11 +1305,15 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, ...@@ -1305,11 +1305,15 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
struct iomap_iter iter = { struct iomap_iter iter = {
.inode = inode, .inode = inode,
.pos = pos, .pos = pos,
.len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
}; };
loff_t size = i_size_read(inode);
int ret; int ret;
if (pos < 0 || pos >= size)
return 0;
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0) while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = dax_unshare_iter(&iter); iter.processed = dax_unshare_iter(&iter);
return ret; return ret;
......
...@@ -1321,7 +1321,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) ...@@ -1321,7 +1321,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
return length; return length;
/* /*
* Don't bother with holes or unwritten extents. * Don't bother with delalloc reservations, holes or unwritten extents.
* *
* Note that we use srcmap directly instead of iomap_iter_srcmap as * Note that we use srcmap directly instead of iomap_iter_srcmap as
* unsharing requires providing a separate source map, and the presence * unsharing requires providing a separate source map, and the presence
...@@ -1330,6 +1330,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) ...@@ -1330,6 +1330,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
* fork for XFS. * fork for XFS.
*/ */
if (iter->srcmap.type == IOMAP_HOLE || if (iter->srcmap.type == IOMAP_HOLE ||
iter->srcmap.type == IOMAP_DELALLOC ||
iter->srcmap.type == IOMAP_UNWRITTEN) iter->srcmap.type == IOMAP_UNWRITTEN)
return length; return length;
...@@ -1374,11 +1375,15 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, ...@@ -1374,11 +1375,15 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
struct iomap_iter iter = { struct iomap_iter iter = {
.inode = inode, .inode = inode,
.pos = pos, .pos = pos,
.len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE, .flags = IOMAP_WRITE | IOMAP_UNSHARE,
}; };
loff_t size = i_size_read(inode);
int ret; int ret;
if (pos < 0 || pos >= size)
return 0;
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0) while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_unshare_iter(&iter); iter.processed = iomap_unshare_iter(&iter);
return ret; return ret;
......
...@@ -102,7 +102,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq) ...@@ -102,7 +102,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
while ((p = rreq->buffer)) { while ((p = rreq->buffer)) {
rreq->buffer = p->next; rreq->buffer = p->next;
for (int slot = 0; slot < folioq_nr_slots(p); slot++) { for (int slot = 0; slot < folioq_count(p); slot++) {
struct folio *folio = folioq_folio(p, slot); struct folio *folio = folioq_folio(p, slot);
if (!folio) if (!folio)
continue; continue;
......
...@@ -508,6 +508,30 @@ static int netfs_write_folio(struct netfs_io_request *wreq, ...@@ -508,6 +508,30 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
return 0; return 0;
} }
/*
* End the issuing of writes, letting the collector know we're done.
*/
static void netfs_end_issue_write(struct netfs_io_request *wreq)
{
bool needs_poke = true;
smp_wmb(); /* Write subreq lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
for (int s = 0; s < NR_IO_STREAMS; s++) {
struct netfs_io_stream *stream = &wreq->io_streams[s];
if (!stream->active)
continue;
if (!list_empty(&stream->subrequests))
needs_poke = false;
netfs_issue_write(wreq, stream);
}
if (needs_poke)
netfs_wake_write_collector(wreq, false);
}
/* /*
* Write some of the pending data back to the server * Write some of the pending data back to the server
*/ */
...@@ -559,10 +583,7 @@ int netfs_writepages(struct address_space *mapping, ...@@ -559,10 +583,7 @@ int netfs_writepages(struct address_space *mapping,
break; break;
} while ((folio = writeback_iter(mapping, wbc, folio, &error))); } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
for (int s = 0; s < NR_IO_STREAMS; s++) netfs_end_issue_write(wreq);
netfs_issue_write(wreq, &wreq->io_streams[s]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
...@@ -650,10 +671,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr ...@@ -650,10 +671,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
if (writethrough_cache) if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache); netfs_write_folio(wreq, wbc, writethrough_cache);
netfs_issue_write(wreq, &wreq->io_streams[0]); netfs_end_issue_write(wreq);
netfs_issue_write(wreq, &wreq->io_streams[1]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
...@@ -699,13 +717,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t ...@@ -699,13 +717,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
break; break;
} }
netfs_issue_write(wreq, upload); netfs_end_issue_write(wreq);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
if (list_empty(&upload->subrequests))
netfs_wake_write_collector(wreq, false);
_leave(" = %d", error); _leave(" = %d", error);
return error; return error;
} }
...@@ -81,7 +81,7 @@ static inline unsigned int folioq_count(struct folio_queue *folioq) ...@@ -81,7 +81,7 @@ static inline unsigned int folioq_count(struct folio_queue *folioq)
} }
/** /**
* folioq_count: Query if a folio queue segment is full * folioq_full: Query if a folio queue segment is full
* @folioq: The segment to query * @folioq: The segment to query
* *
* Query if a folio queue segment is fully occupied. Note that this does not * Query if a folio queue segment is fully occupied. Note that this does not
......
...@@ -1033,7 +1033,7 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter, ...@@ -1033,7 +1033,7 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
if (maxpages == 0 || extracted >= maxsize) if (maxpages == 0 || extracted >= maxsize)
break; break;
if (offset >= fsize) { if (iov_offset >= fsize) {
iov_offset = 0; iov_offset = 0;
slot++; slot++;
if (slot == folioq_nr_slots(folioq) && folioq->next) { if (slot == folioq_nr_slots(folioq) && folioq->next) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment