Commit 33c0dd78 authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: move the CIL workqueue to the CIL

We only use the CIL workqueue in the CIL, so it makes no sense to
hang it off the xfs_mount and have to walk multiple pointers back up
to the mount when we have the CIL structures right there.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 39823d0f
...@@ -1151,7 +1151,7 @@ xlog_cil_push_background( ...@@ -1151,7 +1151,7 @@ xlog_cil_push_background(
spin_lock(&cil->xc_push_lock); spin_lock(&cil->xc_push_lock);
if (cil->xc_push_seq < cil->xc_current_sequence) { if (cil->xc_push_seq < cil->xc_current_sequence) {
cil->xc_push_seq = cil->xc_current_sequence; cil->xc_push_seq = cil->xc_current_sequence;
queue_work(log->l_mp->m_cil_workqueue, &cil->xc_ctx->push_work); queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
} }
/* /*
...@@ -1217,7 +1217,7 @@ xlog_cil_push_now( ...@@ -1217,7 +1217,7 @@ xlog_cil_push_now(
/* start on any pending background push to minimise wait time on it */ /* start on any pending background push to minimise wait time on it */
if (!async) if (!async)
flush_workqueue(log->l_mp->m_cil_workqueue); flush_workqueue(cil->xc_push_wq);
/* /*
* If the CIL is empty or we've already pushed the sequence then * If the CIL is empty or we've already pushed the sequence then
...@@ -1231,7 +1231,7 @@ xlog_cil_push_now( ...@@ -1231,7 +1231,7 @@ xlog_cil_push_now(
cil->xc_push_seq = push_seq; cil->xc_push_seq = push_seq;
cil->xc_push_commit_stable = async; cil->xc_push_commit_stable = async;
queue_work(log->l_mp->m_cil_workqueue, &cil->xc_ctx->push_work); queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
spin_unlock(&cil->xc_push_lock); spin_unlock(&cil->xc_push_lock);
} }
...@@ -1470,6 +1470,15 @@ xlog_cil_init( ...@@ -1470,6 +1470,15 @@ xlog_cil_init(
cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
if (!cil) if (!cil)
return -ENOMEM; return -ENOMEM;
/*
* Limit the CIL pipeline depth to 4 concurrent works to bound the
* concurrency the log spinlocks will be exposed to.
*/
cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
4, log->l_mp->m_super->s_id);
if (!cil->xc_push_wq)
goto out_destroy_cil;
INIT_LIST_HEAD(&cil->xc_cil); INIT_LIST_HEAD(&cil->xc_cil);
INIT_LIST_HEAD(&cil->xc_committing); INIT_LIST_HEAD(&cil->xc_committing);
...@@ -1486,6 +1495,10 @@ xlog_cil_init( ...@@ -1486,6 +1495,10 @@ xlog_cil_init(
xlog_cil_ctx_switch(cil, ctx); xlog_cil_ctx_switch(cil, ctx);
return 0; return 0;
out_destroy_cil:
kmem_free(cil);
return -ENOMEM;
} }
void void
...@@ -1499,6 +1512,7 @@ xlog_cil_destroy( ...@@ -1499,6 +1512,7 @@ xlog_cil_destroy(
} }
ASSERT(list_empty(&log->l_cilp->xc_cil)); ASSERT(list_empty(&log->l_cilp->xc_cil));
destroy_workqueue(log->l_cilp->xc_push_wq);
kmem_free(log->l_cilp); kmem_free(log->l_cilp);
} }
...@@ -272,6 +272,7 @@ struct xfs_cil { ...@@ -272,6 +272,7 @@ struct xfs_cil {
struct xlog *xc_log; struct xlog *xc_log;
struct list_head xc_cil; struct list_head xc_cil;
spinlock_t xc_cil_lock; spinlock_t xc_cil_lock;
struct workqueue_struct *xc_push_wq;
struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
struct xfs_cil_ctx *xc_ctx; struct xfs_cil_ctx *xc_ctx;
......
...@@ -107,7 +107,6 @@ typedef struct xfs_mount { ...@@ -107,7 +107,6 @@ typedef struct xfs_mount {
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct workqueue_struct *m_buf_workqueue; struct workqueue_struct *m_buf_workqueue;
struct workqueue_struct *m_unwritten_workqueue; struct workqueue_struct *m_unwritten_workqueue;
struct workqueue_struct *m_cil_workqueue;
struct workqueue_struct *m_reclaim_workqueue; struct workqueue_struct *m_reclaim_workqueue;
struct workqueue_struct *m_sync_workqueue; struct workqueue_struct *m_sync_workqueue;
struct workqueue_struct *m_blockgc_wq; struct workqueue_struct *m_blockgc_wq;
......
...@@ -518,21 +518,11 @@ xfs_init_mount_workqueues( ...@@ -518,21 +518,11 @@ xfs_init_mount_workqueues(
if (!mp->m_unwritten_workqueue) if (!mp->m_unwritten_workqueue)
goto out_destroy_buf; goto out_destroy_buf;
/*
* Limit the CIL pipeline depth to 4 concurrent works to bound the
* concurrency the log spinlocks will be exposed to.
*/
mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
4, mp->m_super->s_id);
if (!mp->m_cil_workqueue)
goto out_destroy_unwritten;
mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
0, mp->m_super->s_id); 0, mp->m_super->s_id);
if (!mp->m_reclaim_workqueue) if (!mp->m_reclaim_workqueue)
goto out_destroy_cil; goto out_destroy_unwritten;
mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
...@@ -559,8 +549,6 @@ xfs_init_mount_workqueues( ...@@ -559,8 +549,6 @@ xfs_init_mount_workqueues(
destroy_workqueue(mp->m_blockgc_wq); destroy_workqueue(mp->m_blockgc_wq);
out_destroy_reclaim: out_destroy_reclaim:
destroy_workqueue(mp->m_reclaim_workqueue); destroy_workqueue(mp->m_reclaim_workqueue);
out_destroy_cil:
destroy_workqueue(mp->m_cil_workqueue);
out_destroy_unwritten: out_destroy_unwritten:
destroy_workqueue(mp->m_unwritten_workqueue); destroy_workqueue(mp->m_unwritten_workqueue);
out_destroy_buf: out_destroy_buf:
...@@ -577,7 +565,6 @@ xfs_destroy_mount_workqueues( ...@@ -577,7 +565,6 @@ xfs_destroy_mount_workqueues(
destroy_workqueue(mp->m_blockgc_wq); destroy_workqueue(mp->m_blockgc_wq);
destroy_workqueue(mp->m_inodegc_wq); destroy_workqueue(mp->m_inodegc_wq);
destroy_workqueue(mp->m_reclaim_workqueue); destroy_workqueue(mp->m_reclaim_workqueue);
destroy_workqueue(mp->m_cil_workqueue);
destroy_workqueue(mp->m_unwritten_workqueue); destroy_workqueue(mp->m_unwritten_workqueue);
destroy_workqueue(mp->m_buf_workqueue); destroy_workqueue(mp->m_buf_workqueue);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment