Commit 22de489d authored by David Howells's avatar David Howells Committed by Christian Brauner

netfs: Use bh-disabling spinlocks for rreq->lock

Use bh-disabling spinlocks when accessing rreq->lock because, in the
future, it may be twiddled from softirq context when cleanup is driven from
cache backend DIO completion.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20240814203850.2240469-12-dhowells@redhat.com/ # v2
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent 24c90a79
......@@ -473,7 +473,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
cancel:
/* Remove if completely consumed. */
spin_lock(&wreq->lock);
spin_lock_bh(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
......@@ -489,7 +489,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
}
}
spin_unlock(&wreq->lock);
spin_unlock_bh(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
......
......@@ -191,7 +191,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
spin_lock(&wreq->lock);
spin_lock_bh(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
......@@ -202,7 +202,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
spin_unlock(&wreq->lock);
spin_unlock_bh(&wreq->lock);
stream->construct = subreq;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment