Commit 6d338bb2 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Use Local Percpu Macros for Local Percpu Variables

In general, it is more better to use get_cpu_var() and __get_cpu_var()
to access per-cpu variables on this CPU than to use smp_processor_id()
and per_cpu().  In the current default implemention they are equivalent,
but on IA64 the former is already faster, and other archs will follow.
parent 5e49d31e
......@@ -1337,7 +1337,7 @@ static void bh_lru_install(struct buffer_head *bh)
check_irqs_on();
bh_lru_lock();
lru = &per_cpu(bh_lrus, smp_processor_id());
lru = &__get_cpu_var(bh_lrus);
if (lru->bhs[0] != bh) {
struct buffer_head *bhs[BH_LRU_SIZE];
int in;
......@@ -1381,7 +1381,7 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
check_irqs_on();
bh_lru_lock();
lru = &per_cpu(bh_lrus, smp_processor_id());
lru = &__get_cpu_var(bh_lrus);
for (i = 0; i < BH_LRU_SIZE; i++) {
struct buffer_head *bh = lru->bhs[i];
......@@ -1474,15 +1474,14 @@ EXPORT_SYMBOL(__bread);
*/
static void invalidate_bh_lru(void *arg)
{
const int cpu = get_cpu();
struct bh_lru *b = &per_cpu(bh_lrus, cpu);
struct bh_lru *b = &get_cpu_var(bh_lrus);
int i;
for (i = 0; i < BH_LRU_SIZE; i++) {
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
put_cpu();
put_cpu_var(bh_lrus);
}
static void invalidate_bh_lrus(void)
......
......@@ -41,7 +41,7 @@ static void __unhash_process(struct task_struct *p)
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
if (p->pid)
per_cpu(process_counts, smp_processor_id())--;
__get_cpu_var(process_counts)--;
}
REMOVE_LINKS(p);
......
......@@ -1006,7 +1006,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
attach_pid(p, PIDTYPE_PGID, p->pgrp);
attach_pid(p, PIDTYPE_SID, p->session);
if (p->pid)
per_cpu(process_counts, smp_processor_id())++;
__get_cpu_var(process_counts)++;
} else
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
......
......@@ -156,8 +156,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
*/
void add_timer(struct timer_list *timer)
{
int cpu = get_cpu();
tvec_base_t *base = &per_cpu(tvec_bases, cpu);
tvec_base_t *base = &get_cpu_var(tvec_bases);
unsigned long flags;
BUG_ON(timer_pending(timer) || !timer->function);
......@@ -168,7 +167,7 @@ void add_timer(struct timer_list *timer)
internal_add_timer(base, timer);
timer->base = base;
spin_unlock_irqrestore(&base->lock, flags);
put_cpu();
put_cpu_var(tvec_bases);
}
/***
......@@ -231,7 +230,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
return 1;
spin_lock_irqsave(&timer->lock, flags);
new_base = &per_cpu(tvec_bases, smp_processor_id());
new_base = &__get_cpu_var(tvec_bases);
repeat:
old_base = timer->base;
......@@ -789,7 +788,7 @@ seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
*/
static void run_timer_softirq(struct softirq_action *h)
{
tvec_base_t *base = &per_cpu(tvec_bases, smp_processor_id());
tvec_base_t *base = &__get_cpu_var(tvec_bases);
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
......
......@@ -213,21 +213,19 @@ void balance_dirty_pages(struct address_space *mapping)
void balance_dirty_pages_ratelimited(struct address_space *mapping)
{
static DEFINE_PER_CPU(int, ratelimits) = 0;
int cpu;
long ratelimit;
ratelimit = ratelimit_pages;
if (dirty_exceeded)
ratelimit = 8;
cpu = get_cpu();
if (per_cpu(ratelimits, cpu)++ >= ratelimit) {
per_cpu(ratelimits, cpu) = 0;
put_cpu();
if (get_cpu_var(ratelimits)++ >= ratelimit) {
__get_cpu_var(ratelimits) = 0;
put_cpu_var(ratelimits);
balance_dirty_pages(mapping);
return;
}
put_cpu();
put_cpu_var(ratelimits);
}
/*
......
......@@ -477,16 +477,15 @@ DEFINE_PER_CPU(struct pte_chain *, local_pte_chain) = 0;
*/
void __pte_chain_free(struct pte_chain *pte_chain)
{
int cpu = get_cpu();
struct pte_chain **pte_chainp;
pte_chainp = &get_cpu_var(local_pte_chain);
if (pte_chain->next_and_idx)
pte_chain->next_and_idx = 0;
pte_chainp = &per_cpu(local_pte_chain, cpu);
if (*pte_chainp)
kmem_cache_free(pte_chain_cache, *pte_chainp);
*pte_chainp = pte_chain;
put_cpu();
put_cpu_var(local_pte_chain);
}
/*
......@@ -501,21 +500,19 @@ void __pte_chain_free(struct pte_chain *pte_chain)
*/
struct pte_chain *pte_chain_alloc(int gfp_flags)
{
int cpu;
struct pte_chain *ret;
struct pte_chain **pte_chainp;
if (gfp_flags & __GFP_WAIT)
might_sleep();
cpu = get_cpu();
pte_chainp = &per_cpu(local_pte_chain, cpu);
pte_chainp = &get_cpu_var(local_pte_chain);
if (*pte_chainp) {
ret = *pte_chainp;
*pte_chainp = NULL;
put_cpu();
put_cpu_var(local_pte_chain);
} else {
put_cpu();
put_cpu_var(local_pte_chain);
ret = kmem_cache_alloc(pte_chain_cache, gfp_flags);
}
return ret;
......
......@@ -112,35 +112,34 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
void lru_cache_add(struct page *page)
{
struct pagevec *pvec = &per_cpu(lru_add_pvecs, get_cpu());
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
__pagevec_lru_add(pvec);
put_cpu();
put_cpu_var(lru_add_pvecs);
}
void lru_cache_add_active(struct page *page)
{
struct pagevec *pvec = &per_cpu(lru_add_active_pvecs, get_cpu());
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
__pagevec_lru_add_active(pvec);
put_cpu();
put_cpu_var(lru_add_active_pvecs);
}
void lru_add_drain(void)
{
int cpu = get_cpu();
struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
if (pagevec_count(pvec))
__pagevec_lru_add(pvec);
pvec = &per_cpu(lru_add_active_pvecs, cpu);
pvec = &__get_cpu_var(lru_add_active_pvecs);
if (pagevec_count(pvec))
__pagevec_lru_add_active(pvec);
put_cpu();
put_cpu_var(lru_add_pvecs);
}
/*
......
......@@ -228,7 +228,7 @@ static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
* On SMP we have one ICMP socket per-cpu.
*/
static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL;
#define icmp_socket per_cpu(__icmp_socket, smp_processor_id())
#define icmp_socket __get_cpu_var(__icmp_socket)
static __inline__ void icmp_xmit_lock(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment