Commit 64748b16 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm cache background tracker: limit amount of background work that may be issued at once

On large systems the cache policy can be over enthusiastic and queue far
too much dirty data to be written back.  This consumes memory.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent deb71918
...@@ -161,8 +161,17 @@ EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued); ...@@ -161,8 +161,17 @@ EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
static bool max_work_reached(struct background_tracker *b) static bool max_work_reached(struct background_tracker *b)
{ {
// FIXME: finish return atomic_read(&b->pending_promotes) +
return false; atomic_read(&b->pending_writebacks) +
atomic_read(&b->pending_demotes) >= b->max_work;
}
struct bt_work *alloc_work(struct background_tracker *b)
{
if (max_work_reached(b))
return NULL;
return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
} }
int btracker_queue(struct background_tracker *b, int btracker_queue(struct background_tracker *b,
...@@ -174,10 +183,7 @@ int btracker_queue(struct background_tracker *b, ...@@ -174,10 +183,7 @@ int btracker_queue(struct background_tracker *b,
if (pwork) if (pwork)
*pwork = NULL; *pwork = NULL;
if (max_work_reached(b)) w = alloc_work(b);
return -ENOMEM;
w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
if (!w) if (!w)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment