Commit 30c92ffe authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Better inlining in bch2_time_stats_update()

Move the actual slowpath off into a new function -
bch2_time_stats_clear_buffer() - and inline
bch2_time_stats_update_one().

Alo, use the new inlined update functions from mean_and_variance.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c96f108b
...@@ -319,7 +319,7 @@ static void bch2_quantiles_update(struct bch2_quantiles *q, u64 v) ...@@ -319,7 +319,7 @@ static void bch2_quantiles_update(struct bch2_quantiles *q, u64 v)
} }
} }
static void bch2_time_stats_update_one(struct bch2_time_stats *stats, static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
u64 start, u64 end) u64 start, u64 end)
{ {
u64 duration, freq; u64 duration, freq;
...@@ -343,6 +343,22 @@ static void bch2_time_stats_update_one(struct bch2_time_stats *stats, ...@@ -343,6 +343,22 @@ static void bch2_time_stats_update_one(struct bch2_time_stats *stats,
} }
} }
static noinline void bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
struct bch2_time_stat_buffer *b)
{
struct bch2_time_stat_buffer_entry *i;
unsigned long flags;
spin_lock_irqsave(&stats->lock, flags);
for (i = b->entries;
i < b->entries + ARRAY_SIZE(b->entries);
i++)
bch2_time_stats_update_one(stats, i->start, i->end);
spin_unlock_irqrestore(&stats->lock, flags);
b->nr = 0;
}
void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
{ {
unsigned long flags; unsigned long flags;
...@@ -362,7 +378,6 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) ...@@ -362,7 +378,6 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
GFP_ATOMIC); GFP_ATOMIC);
spin_unlock_irqrestore(&stats->lock, flags); spin_unlock_irqrestore(&stats->lock, flags);
} else { } else {
struct bch2_time_stat_buffer_entry *i;
struct bch2_time_stat_buffer *b; struct bch2_time_stat_buffer *b;
preempt_disable(); preempt_disable();
...@@ -374,17 +389,8 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) ...@@ -374,17 +389,8 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
.end = end .end = end
}; };
if (b->nr == ARRAY_SIZE(b->entries)) { if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
spin_lock_irqsave(&stats->lock, flags); bch2_time_stats_clear_buffer(stats, b);
for (i = b->entries;
i < b->entries + ARRAY_SIZE(b->entries);
i++)
bch2_time_stats_update_one(stats, i->start, i->end);
spin_unlock_irqrestore(&stats->lock, flags);
b->nr = 0;
}
preempt_enable(); preempt_enable();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment