Commit e3de671d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

Pull asm-generic updates from Arnd Bergmann:
 "The asm-generic tree this time contains one series from Nicolas Pitre
  that makes the optimized do_div() implementation from the ARM
  architecture available to all architectures.

  This also adds stricter type checking for callers of do_div, which has
  uncovered a number of bugs in existing code, and fixes up the ones we
  have found"

* tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic:
  ARM: asm/div64.h: adjust to generic codde
  __div64_32(): make it overridable at compile time
  __div64_const32(): abstract out the actual 128-bit cross product code
  do_div(): generic optimization for constant divisor on 32-bit machines
  div64.h: optimize do_div() for power-of-two constant divisors
  mtd/sm_ftl.c: fix wrong do_div() usage
  drm/mgag200/mgag200_mode.c: fix wrong do_div() usage
  hid-sensor-hub.c: fix wrong do_div() usage
  ti/fapll: fix wrong do_div() usage
  ti/clkt_dpll: fix wrong do_div() usage
  tegra/clk-divider: fix wrong do_div() usage
  imx/clk-pllv2: fix wrong do_div() usage
  imx/clk-pllv1: fix wrong do_div() usage
  nouveau/nvkm/subdev/clk/gk20a.c: fix wrong do_div() usage
parents 71e4634e 040b323b
This diff is collapsed.
......@@ -32,7 +32,7 @@
static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
unsigned long parent_rate)
{
s64 divider_ux1 = parent_rate;
u64 divider_ux1 = parent_rate;
u8 flags = divider->flags;
int mul;
......@@ -54,7 +54,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
divider_ux1 -= mul;
if (divider_ux1 < 0)
if ((s64)divider_ux1 < 0)
return 0;
if (divider_ux1 > get_max_div(divider))
......
......@@ -1564,7 +1564,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
int bits_per_pixel)
{
uint32_t total_area, divisor;
int64_t active_area, pixels_per_second, bandwidth;
uint64_t active_area, pixels_per_second, bandwidth;
uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
divisor = 1024;
......
......@@ -141,9 +141,8 @@ gk20a_pllg_calc_rate(struct gk20a_clk *clk)
rate = clk->parent_rate * clk->n;
divider = clk->m * pl_to_div[clk->pl];
do_div(rate, divider);
return rate / 2;
return rate / divider / 2;
}
static int
......
......@@ -218,7 +218,8 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
goto done_proc;
}
remaining_bytes = do_div(buffer_size, sizeof(__s32));
remaining_bytes = buffer_size % sizeof(__s32);
buffer_size = buffer_size / sizeof(__s32);
if (buffer_size) {
for (i = 0; i < buffer_size; ++i) {
hid_set_field(report->field[field_index], i,
......
......@@ -4,6 +4,9 @@
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
* Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
*
* Optimization for constant divisors on 32-bit machines:
* Copyright (C) 2006-2015 Nicolas Pitre
*
* The semantics of do_div() are:
*
* uint32_t do_div(uint64_t *n, uint32_t base)
......@@ -32,7 +35,168 @@
#elif BITS_PER_LONG == 32
#include <linux/log2.h>
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
* multiplications which ought to be much faster. And yet only if compiling
* with a sufficiently recent gcc version to perform proper 64-bit constant
* propagation.
*
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
#ifndef __div64_const32_is_OK
#define __div64_const32_is_OK (__GNUC__ >= 4)
#endif
#define __div64_const32(n, ___b) \
({ \
/* \
* Multiplication by reciprocal of b: n / b = n * (p / b) / p \
* \
* We rely on the fact that most of this code gets optimized \
* away at compile time due to constant propagation and only \
* a few multiplication instructions should remain. \
* Hence this monstrous macro (static inline doesn't always \
* do the trick here). \
*/ \
uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
uint32_t ___p, ___bias; \
\
/* determine MSB of b */ \
___p = 1 << ilog2(___b); \
\
/* compute m = ((p << 64) + b - 1) / b */ \
___m = (~0ULL / ___b) * ___p; \
___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \
\
/* one less than the dividend with highest result */ \
___x = ~0ULL / ___b * ___b - 1; \
\
/* test our ___m with res = m * x / (p << 64) */ \
___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
___res += (___x & 0xffffffff) * (___m >> 32); \
___t = (___res < ___t) ? (1ULL << 32) : 0; \
___res = (___res >> 32) + ___t; \
___res += (___m >> 32) * (___x >> 32); \
___res /= ___p; \
\
/* Now sanitize and optimize what we've got. */ \
if (~0ULL % (___b / (___b & -___b)) == 0) { \
/* special case, can be simplified to ... */ \
___n /= (___b & -___b); \
___m = ~0ULL / (___b / (___b & -___b)); \
___p = 1; \
___bias = 1; \
} else if (___res != ___x / ___b) { \
/* \
* We can't get away without a bias to compensate \
* for bit truncation errors. To avoid it we'd need an \
* additional bit to represent m which would overflow \
* a 64-bit variable. \
* \
* Instead we do m = p / b and n / b = (n * m + m) / p. \
*/ \
___bias = 1; \
/* Compute m = (p << 64) / b */ \
___m = (~0ULL / ___b) * ___p; \
___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
} else { \
/* \
* Reduce m / p, and try to clear bit 31 of m when \
* possible, otherwise that'll need extra overflow \
* handling later. \
*/ \
uint32_t ___bits = -(___m & -___m); \
___bits |= ___m >> 32; \
___bits = (~___bits) << 1; \
/* \
* If ___bits == 0 then setting bit 31 is unavoidable. \
* Simply apply the maximum possible reduction in that \
* case. Otherwise the MSB of ___bits indicates the \
* best reduction we should apply. \
*/ \
if (!___bits) { \
___p /= (___m & -___m); \
___m /= (___m & -___m); \
} else { \
___p >>= ilog2(___bits); \
___m >>= ilog2(___bits); \
} \
/* No bias needed. */ \
___bias = 0; \
} \
\
/* \
* Now we have a combination of 2 conditions: \
* \
* 1) whether or not we need to apply a bias, and \
* \
* 2) whether or not there might be an overflow in the cross \
* product determined by (___m & ((1 << 63) | (1 << 31))). \
* \
* Select the best way to do (m_bias + m * n) / (1 << 64). \
* From now on there will be actual runtime code generated. \
*/ \
___res = __arch_xprod_64(___m, ___n, ___bias); \
\
___res /= ___p; \
})
#ifndef __arch_xprod_64
/*
* Default C implementation for __arch_xprod_64()
*
* Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
* Semantic: retval = ((bias ? m : 0) + m * n) >> 64
*
* The product is a 128-bit value, scaled down to 64 bits.
* Assuming constant propagation to optimize away unused conditional code.
* Architectures may provide their own optimized assembly implementation.
*/
static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
{
uint32_t m_lo = m;
uint32_t m_hi = m >> 32;
uint32_t n_lo = n;
uint32_t n_hi = n >> 32;
uint64_t res, tmp;
if (!bias) {
res = ((uint64_t)m_lo * n_lo) >> 32;
} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
/* there can't be any overflow here */
res = (m + (uint64_t)m_lo * n_lo) >> 32;
} else {
res = m + (uint64_t)m_lo * n_lo;
tmp = (res < m) ? (1ULL << 32) : 0;
res = (res >> 32) + tmp;
}
if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
/* there can't be any overflow here */
res += (uint64_t)m_lo * n_hi;
res += (uint64_t)m_hi * n_lo;
res >>= 32;
} else {
tmp = res += (uint64_t)m_lo * n_hi;
res += (uint64_t)m_hi * n_lo;
tmp = (res < tmp) ? (1ULL << 32) : 0;
res = (res >> 32) + tmp;
}
res += (uint64_t)m_hi * n_hi;
return res;
}
#endif
#ifndef __div64_32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
#endif
/* The unnecessary pointer compare is there
* to check for type safety (n must be 64bit)
......@@ -41,7 +205,19 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
uint32_t __base = (base); \
uint32_t __rem; \
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
if (likely(((n) >> 32) == 0)) { \
if (__builtin_constant_p(__base) && \
is_power_of_2(__base)) { \
__rem = (n) & (__base - 1); \
(n) >>= ilog2(__base); \
} else if (__div64_const32_is_OK && \
__builtin_constant_p(__base) && \
__base != 0) { \
uint32_t __res_lo, __n_lo = (n); \
(n) = __div64_const32(n, __base); \
/* the remainder can be computed with 32-bit regs */ \
__res_lo = (n); \
__rem = __n_lo - __res_lo * __base; \
} else if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
} else \
......
......@@ -13,7 +13,8 @@
*
* Code generated for this function might be very inefficient
* for some CPUs. __div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
* or by defining a preprocessor macro in arch/include/asm/div64.h.
*/
#include <linux/export.h>
......@@ -23,6 +24,7 @@
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
#ifndef __div64_32
uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
......@@ -55,8 +57,8 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
*n = res;
return rem;
}
EXPORT_SYMBOL(__div64_32);
#endif
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment