Commit 243418e3 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: fs: invalidate bh_lrus for only cold path

The kernel test robot reported the regression of fio.write_iops[1] with
commit 8cc621d2 ("mm: fs: invalidate BH LRU during page migration").

Since lru_add_drain is called frequently, invalidate bh_lrus there could
increase bh_lrus cache miss ratio, which needs more IO in the end.

This patch moves the bh_lrus invalidation from the hot path( e.g.,
zap_page_range, pagevec_release) to cold path(i.e., lru_add_drain_all,
lru_cache_disable).

Zhengjun Xing confirmed
 "I test the patch, the regression reduced to -2.9%"

[1] https://lore.kernel.org/lkml/20210520083144.GD14190@xsang-OptiPlex-9020/
[2] 8cc621d2, mm: fs: invalidate BH LRU during page migration

Link: https://lkml.kernel.org/r/20210907212347.1977686-1-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Reviewed-by: default avatarChris Goldsworthy <cgoldswo@codeaurora.org>
Tested-by: default avatar"Xing, Zhengjun" <zhengjun.xing@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b7cd9fa5
...@@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void) ...@@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void)
} }
EXPORT_SYMBOL_GPL(invalidate_bh_lrus); EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
void invalidate_bh_lrus_cpu(int cpu) /*
* It's called from workqueue context so we need a bh_lru_lock to close
* the race with preemption/irq.
*/
void invalidate_bh_lrus_cpu(void)
{ {
struct bh_lru *b; struct bh_lru *b;
bh_lru_lock(); bh_lru_lock();
b = per_cpu_ptr(&bh_lrus, cpu); b = this_cpu_ptr(&bh_lrus);
__invalidate_bh_lrus(b); __invalidate_bh_lrus(b);
bh_lru_unlock(); bh_lru_unlock();
} }
......
...@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, ...@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
struct buffer_head *__bread_gfp(struct block_device *, struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp); sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void); void invalidate_bh_lrus(void);
void invalidate_bh_lrus_cpu(int cpu); void invalidate_bh_lrus_cpu(void);
bool has_bh_in_lru(int cpu, void *dummy); bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh); void free_buffer_head(struct buffer_head * bh);
...@@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } ...@@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {} static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
static inline void invalidate_bh_lrus_cpu(int cpu) {} static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0 #define buffer_heads_over_limit 0
......
...@@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu) ...@@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn); pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
activate_page_drain(cpu); activate_page_drain(cpu);
invalidate_bh_lrus_cpu(cpu);
} }
/** /**
...@@ -703,6 +702,20 @@ void lru_add_drain(void) ...@@ -703,6 +702,20 @@ void lru_add_drain(void)
local_unlock(&lru_pvecs.lock); local_unlock(&lru_pvecs.lock);
} }
/*
* It's called from per-cpu workqueue context in SMP case so
* lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
* the same cpu. It shouldn't be a problem in !SMP case since
* the core is only one and the locks will disable preemption.
*/
static void lru_add_and_bh_lrus_drain(void)
{
local_lock(&lru_pvecs.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&lru_pvecs.lock);
invalidate_bh_lrus_cpu();
}
void lru_add_drain_cpu_zone(struct zone *zone) void lru_add_drain_cpu_zone(struct zone *zone)
{ {
local_lock(&lru_pvecs.lock); local_lock(&lru_pvecs.lock);
...@@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); ...@@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy) static void lru_add_drain_per_cpu(struct work_struct *dummy)
{ {
lru_add_drain(); lru_add_and_bh_lrus_drain();
} }
/* /*
...@@ -858,7 +871,7 @@ void lru_cache_disable(void) ...@@ -858,7 +871,7 @@ void lru_cache_disable(void)
*/ */
__lru_add_drain_all(true); __lru_add_drain_all(true);
#else #else
lru_add_drain(); lru_add_and_bh_lrus_drain();
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment