Commit d492a29d authored by Ville Syrjälä's avatar Ville Syrjälä

drm/i915: Use mul_u32_u32() more

We have a lot of '(u64)foo * bar' everywhere. Replace with
mul_u32_u32() to avoid gcc failing to use a regular 32x32->64
multiply for this.
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190408152702.4153-1-ville.syrjala@linux.intel.comReviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent b7ffc4a8
...@@ -71,7 +71,7 @@ static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul) ...@@ -71,7 +71,7 @@ static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{ {
u64 tmp; u64 tmp;
tmp = (u64)val * mul.val; tmp = mul_u32_u32(val, mul.val);
tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16); tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
WARN_ON(tmp > U32_MAX); WARN_ON(tmp > U32_MAX);
...@@ -83,7 +83,7 @@ static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, ...@@ -83,7 +83,7 @@ static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
{ {
u64 tmp; u64 tmp;
tmp = (u64)val.val * mul.val; tmp = mul_u32_u32(val.val, mul.val);
tmp = tmp >> 16; tmp = tmp >> 16;
return clamp_u64_to_fixed16(tmp); return clamp_u64_to_fixed16(tmp);
...@@ -114,7 +114,7 @@ static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul ...@@ -114,7 +114,7 @@ static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul
{ {
u64 tmp; u64 tmp;
tmp = (u64)val * mul.val; tmp = mul_u32_u32(val, mul.val);
return clamp_u64_to_fixed16(tmp); return clamp_u64_to_fixed16(tmp);
} }
......
...@@ -575,7 +575,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) ...@@ -575,7 +575,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
clock->p = clock->p1 * clock->p2; clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0)) if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0; return 0;
clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m, clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
clock->n << 22); clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
...@@ -960,8 +960,8 @@ chv_find_best_dpll(const struct intel_limit *limit, ...@@ -960,8 +960,8 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p = clock.p1 * clock.p2; clock.p = clock.p1 * clock.p2;
m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p * m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
clock.n) << 22, refclk * clock.m1); refclk * clock.m1);
if (m2 > INT_MAX/clock.m1) if (m2 > INT_MAX/clock.m1)
continue; continue;
...@@ -6946,7 +6946,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) ...@@ -6946,7 +6946,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (WARN_ON(!pfit_w || !pfit_h)) if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate; return pixel_rate;
pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h, pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
pfit_w * pfit_h); pfit_w * pfit_h);
} }
...@@ -7066,7 +7066,7 @@ static void compute_m_n(unsigned int m, unsigned int n, ...@@ -7066,7 +7066,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
else else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((u64)m * *ret_n, n); *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
intel_reduce_m_n_ratio(ret_m, ret_n); intel_reduce_m_n_ratio(ret_m, ret_n);
} }
......
...@@ -2743,11 +2743,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) ...@@ -2743,11 +2743,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
} }
if (use_ssc) { if (use_ssc) {
tmp = (u64)dco_khz * 47 * 32; tmp = mul_u32_u32(dco_khz, 47 * 32);
do_div(tmp, refclk_khz * m1div * 10000); do_div(tmp, refclk_khz * m1div * 10000);
ssc_stepsize = tmp; ssc_stepsize = tmp;
tmp = (u64)dco_khz * 1000; tmp = mul_u32_u32(dco_khz, 1000);
ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32); ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
} else { } else {
ssc_stepsize = 0; ssc_stepsize = 0;
......
...@@ -678,7 +678,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate, ...@@ -678,7 +678,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
{ {
u64 ret; u64 ret;
ret = (u64)pixel_rate * cpp * latency; ret = mul_u32_u32(pixel_rate, cpp * latency);
ret = DIV_ROUND_UP_ULL(ret, 10000); ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment