Commit 6f6d6a1a authored by Roman Zippel's avatar Roman Zippel Committed by Linus Torvalds

rename div64_64 to div64_u64

Rename div64_64 to div64_u64 to make it consistent with the other divide
functions, so it clearly includes the type of the divide.  Move its definition
to math64.h as currently no architecture overrides the generic implementation.
 They can still override it of course, but the duplicated declarations are
avoided.
Signed-off-by: default avatarRoman Zippel <zippel@linux-m68k.org>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 71abb3af
/* /*
* kvm_ia64.c: Basic KVM suppport On Itanium series processors * kvm_ia64.c: Basic KVM suppport On Itanium series processors
* *
...@@ -431,7 +430,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -431,7 +430,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (itc_diff < 0) if (itc_diff < 0)
itc_diff = -itc_diff; itc_diff = -itc_diff;
expires = div64_64(itc_diff, cyc_per_usec); expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires); kt = ktime_set(0, 1000 * expires);
vcpu->arch.ht_active = 1; vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include "i8254.h" #include "i8254.h"
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_64(x, y)) #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else #else
#define mod_64(x, y) ((x) % (y)) #define mod_64(x, y) ((x) % (y))
#endif #endif
...@@ -60,8 +60,8 @@ static u64 muldiv64(u64 a, u32 b, u32 c) ...@@ -60,8 +60,8 @@ static u64 muldiv64(u64 a, u32 b, u32 c)
rl = (u64)u.l.low * (u64)b; rl = (u64)u.l.low * (u64)b;
rh = (u64)u.l.high * (u64)b; rh = (u64)u.l.high * (u64)b;
rh += (rl >> 32); rh += (rl >> 32);
res.l.high = div64_64(rh, c); res.l.high = div64_u64(rh, c);
res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
return res.ll; return res.ll;
} }
......
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/math64.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/div64.h>
#include "irq.h" #include "irq.h"
#define PRId64 "d" #define PRId64 "d"
...@@ -526,8 +526,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ...@@ -526,8 +526,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
} else } else
passed = ktime_sub(now, apic->timer.last_update); passed = ktime_sub(now, apic->timer.last_update);
counter_passed = div64_64(ktime_to_ns(passed), counter_passed = div64_u64(ktime_to_ns(passed),
(APIC_BUS_CYCLE_NS * apic->timer.divide_count)); (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
if (counter_passed > tmcct) { if (counter_passed > tmcct) {
if (unlikely(!apic_lvtt_period(apic))) { if (unlikely(!apic_lvtt_period(apic))) {
......
...@@ -224,6 +224,4 @@ ...@@ -224,6 +224,4 @@
#endif #endif
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif #endif
...@@ -30,11 +30,6 @@ ...@@ -30,11 +30,6 @@
__rem; \ __rem; \
}) })
static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
{
return dividend / divisor;
}
#elif BITS_PER_LONG == 32 #elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
...@@ -54,8 +49,6 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); ...@@ -54,8 +49,6 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
__rem; \ __rem; \
}) })
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#else /* BITS_PER_LONG == ?? */ #else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64 # error do_div() does not yet support the C64
......
...@@ -25,5 +25,4 @@ ...@@ -25,5 +25,4 @@
__rem; \ __rem; \
}) })
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif /* _M68K_DIV64_H */ #endif /* _M68K_DIV64_H */
...@@ -82,7 +82,6 @@ ...@@ -82,7 +82,6 @@
(n) = __quot; \ (n) = __quot; \
__mod; }) __mod; })
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif /* (_MIPS_SZLONG == 32) */ #endif /* (_MIPS_SZLONG == 32) */
#if (_MIPS_SZLONG == 64) #if (_MIPS_SZLONG == 64)
...@@ -106,11 +105,6 @@ extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); ...@@ -106,11 +105,6 @@ extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
(n) = __quot; \ (n) = __quot; \
__mod; }) __mod; })
static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
{
return dividend / divisor;
}
#endif /* (_MIPS_SZLONG == 64) */ #endif /* (_MIPS_SZLONG == 64) */
#endif /* _ASM_DIV64_H */ #endif /* _ASM_DIV64_H */
...@@ -97,7 +97,4 @@ signed __muldiv64s(signed val, signed mult, signed div) ...@@ -97,7 +97,4 @@ signed __muldiv64s(signed val, signed mult, signed div)
return result; return result;
} }
extern __attribute__((const))
uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif /* _ASM_DIV64 */ #endif /* _ASM_DIV64 */
...@@ -3,5 +3,4 @@ ...@@ -3,5 +3,4 @@
#include "asm/arch/div64.h" #include "asm/arch/div64.h"
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif #endif
...@@ -71,8 +71,6 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ...@@ -71,8 +71,6 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
} }
#define div_u64_rem div_u64_rem #define div_u64_rem div_u64_rem
extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#else #else
# include <asm-generic/div64.h> # include <asm-generic/div64.h>
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
......
...@@ -27,6 +27,14 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) ...@@ -27,6 +27,14 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
return dividend / divisor; return dividend / divisor;
} }
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
return dividend / divisor;
}
#elif BITS_PER_LONG == 32 #elif BITS_PER_LONG == 32
#ifndef div_u64_rem #ifndef div_u64_rem
...@@ -41,6 +49,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ...@@ -41,6 +49,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
#endif #endif
#ifndef div64_u64
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
#endif /* BITS_PER_LONG */ #endif /* BITS_PER_LONG */
/** /**
......
...@@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, ...@@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
se->my_q = cfs_rq; se->my_q = cfs_rq;
se->load.weight = tg->shares; se->load.weight = tg->shares;
se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight);
se->parent = parent; se->parent = parent;
} }
#endif #endif
...@@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares) ...@@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
dequeue_entity(cfs_rq, se, 0); dequeue_entity(cfs_rq, se, 0);
se->load.weight = shares; se->load.weight = shares;
se->load.inv_weight = div64_64((1ULL<<32), shares); se->load.inv_weight = div64_u64((1ULL<<32), shares);
if (on_rq) if (on_rq)
enqueue_entity(cfs_rq, se, 0); enqueue_entity(cfs_rq, se, 0);
...@@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) ...@@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
if (runtime == RUNTIME_INF) if (runtime == RUNTIME_INF)
return 1ULL << 16; return 1ULL << 16;
return div64_64(runtime << 16, period); return div64_u64(runtime << 16, period);
} }
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
......
...@@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
avg_per_cpu = p->se.sum_exec_runtime; avg_per_cpu = p->se.sum_exec_runtime;
if (p->se.nr_migrations) { if (p->se.nr_migrations) {
avg_per_cpu = div64_64(avg_per_cpu, avg_per_cpu = div64_u64(avg_per_cpu,
p->se.nr_migrations); p->se.nr_migrations);
} else { } else {
avg_per_cpu = -1LL; avg_per_cpu = -1LL;
} }
......
...@@ -78,9 +78,10 @@ EXPORT_SYMBOL(div_s64_rem); ...@@ -78,9 +78,10 @@ EXPORT_SYMBOL(div_s64_rem);
#endif #endif
/* 64bit divisor, dividend and result. dynamic precision */ /* 64bit divisor, dividend and result. dynamic precision */
uint64_t div64_64(uint64_t dividend, uint64_t divisor) #ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{ {
uint32_t high, d; u32 high, d;
high = divisor >> 32; high = divisor >> 32;
if (high) { if (high) {
...@@ -91,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor) ...@@ -91,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor)
} else } else
d = divisor; d = divisor;
do_div(dividend, d); return div_u64(dividend, d);
return dividend;
} }
EXPORT_SYMBOL(div64_64); EXPORT_SYMBOL(div64_u64);
#endif
#endif /* BITS_PER_LONG == 32 */ #endif /* BITS_PER_LONG == 32 */
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/math64.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <asm/div64.h>
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta * max_cwnd = snd_cwnd * beta
...@@ -128,7 +128,7 @@ static u32 cubic_root(u64 a) ...@@ -128,7 +128,7 @@ static u32 cubic_root(u64 a)
* x = ( 2 * x + a / x ) / 3 * x = ( 2 * x + a / x ) / 3
* k+1 k k * k+1 k k
*/ */
x = (2 * x + (u32)div64_64(a, (u64)x * (u64)(x - 1))); x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
x = ((x * 341) >> 10); x = ((x * 341) >> 10);
return x; return x;
} }
......
...@@ -4,12 +4,11 @@ ...@@ -4,12 +4,11 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/math64.h>
#include <linux/netfilter/x_tables.h> #include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connbytes.h> #include <linux/netfilter/xt_connbytes.h>
#include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack.h>
#include <asm/div64.h>
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching"); MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching");
...@@ -82,7 +81,7 @@ connbytes_mt(const struct sk_buff *skb, const struct net_device *in, ...@@ -82,7 +81,7 @@ connbytes_mt(const struct sk_buff *skb, const struct net_device *in,
break; break;
} }
if (pkts != 0) if (pkts != 0)
what = div64_64(bytes, pkts); what = div64_u64(bytes, pkts);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment