Commit 383be5d1 authored by Alex Deucher's avatar Alex Deucher Committed by Dave Airlie

drm/radeon/kms: update new pll algo

- add support for pre-avivo chips
- add support for fixed post/ref dividers
- add support for non-fractional fb dividers

By default avivo chips use the new algo and
pre-avivo chips use the old algo. Use the "new_pll"
module option to toggle between them.
Signed-off-by: default avatarAlex Deucher <alexdeucher@gmail.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 939461d5
...@@ -438,12 +438,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, ...@@ -438,12 +438,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* select the PLL algo */ /* select the PLL algo */
if (ASIC_IS_AVIVO(rdev)) { if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll) if (radeon_new_pll == 0)
pll->algo = PLL_ALGO_AVIVO; pll->algo = PLL_ALGO_LEGACY;
else
pll->algo = PLL_ALGO_NEW;
} else {
if (radeon_new_pll == 1)
pll->algo = PLL_ALGO_NEW;
else else
pll->algo = PLL_ALGO_LEGACY; pll->algo = PLL_ALGO_LEGACY;
} else }
pll->algo = PLL_ALGO_LEGACY;
if (ASIC_IS_AVIVO(rdev)) { if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) || if ((rdev->family == CHIP_RS600) ||
......
...@@ -1191,12 +1191,16 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ...@@ -1191,12 +1191,16 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
if (ASIC_IS_AVIVO(rdev)) { if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll) if (radeon_new_pll == 0)
lvds->pll_algo = PLL_ALGO_AVIVO; lvds->pll_algo = PLL_ALGO_LEGACY;
else
lvds->pll_algo = PLL_ALGO_NEW;
} else {
if (radeon_new_pll == 1)
lvds->pll_algo = PLL_ALGO_NEW;
else else
lvds->pll_algo = PLL_ALGO_LEGACY; lvds->pll_algo = PLL_ALGO_LEGACY;
} else }
lvds->pll_algo = PLL_ALGO_LEGACY;
/* LVDS quirks */ /* LVDS quirks */
radeon_atom_apply_lvds_quirks(dev, lvds); radeon_atom_apply_lvds_quirks(dev, lvds);
......
...@@ -603,95 +603,173 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, ...@@ -603,95 +603,173 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
*post_div_p = best_post_div; *post_div_p = best_post_div;
} }
static void radeon_compute_pll_avivo(struct radeon_pll *pll, static bool
uint64_t freq, calc_fb_div(struct radeon_pll *pll,
uint32_t *dot_clock_p, uint32_t freq,
uint32_t *fb_div_p, uint32_t post_div,
uint32_t *frac_fb_div_p, uint32_t ref_div,
uint32_t *ref_div_p, uint32_t *fb_div,
uint32_t *post_div_p) uint32_t *fb_div_frac)
{ {
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; fixed20_12 feedback_divider, a, b;
fixed20_12 pll_out_max, pll_out_min; u32 vco_freq;
fixed20_12 pll_in_max, pll_in_min;
fixed20_12 reference_freq; vco_freq = freq * post_div;
fixed20_12 error, ffreq, a, b; /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
a.full = rfixed_const(pll->reference_freq);
pll_out_max.full = rfixed_const(pll->pll_out_max); feedback_divider.full = rfixed_const(vco_freq);
pll_out_min.full = rfixed_const(pll->pll_out_min); feedback_divider.full = rfixed_div(feedback_divider, a);
pll_in_max.full = rfixed_const(pll->pll_in_max); a.full = rfixed_const(ref_div);
pll_in_min.full = rfixed_const(pll->pll_in_min); feedback_divider.full = rfixed_mul(feedback_divider, a);
reference_freq.full = rfixed_const(pll->reference_freq);
do_div(freq, 10); if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
a.full = rfixed_const(10);
feedback_divider.full = rfixed_mul(feedback_divider, a);
feedback_divider.full += rfixed_const_half(0);
feedback_divider.full = rfixed_floor(feedback_divider);
feedback_divider.full = rfixed_div(feedback_divider, a);
/* *fb_div = floor(feedback_divider); */
a.full = rfixed_floor(feedback_divider);
*fb_div = rfixed_trunc(a);
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
a.full = rfixed_const(10);
b.full = rfixed_mul(feedback_divider, a);
feedback_divider.full = rfixed_floor(feedback_divider);
feedback_divider.full = rfixed_mul(feedback_divider, a);
feedback_divider.full = b.full - feedback_divider.full;
*fb_div_frac = rfixed_trunc(feedback_divider);
} else {
/* *fb_div = floor(feedback_divider + 0.5); */
feedback_divider.full += rfixed_const_half(0);
feedback_divider.full = rfixed_floor(feedback_divider);
*fb_div = rfixed_trunc(feedback_divider);
*fb_div_frac = 0;
}
if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
return false;
else
return true;
}
static bool
calc_fb_ref_div(struct radeon_pll *pll,
uint32_t freq,
uint32_t post_div,
uint32_t *fb_div,
uint32_t *fb_div_frac,
uint32_t *ref_div)
{
fixed20_12 ffreq, max_error, error, pll_out, a;
u32 vco;
ffreq.full = rfixed_const(freq); ffreq.full = rfixed_const(freq);
error.full = rfixed_const(100 * 100); /* max_error = ffreq * 0.0025; */
a.full = rfixed_const(400);
max_error.full = rfixed_div(ffreq, a);
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
vco = vco / ((*ref_div) * 10);
if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
continue;
/* max p */ /* pll_out = vco / post_div; */
p.full = rfixed_div(pll_out_max, ffreq); a.full = rfixed_const(post_div);
p.full = rfixed_floor(p); pll_out.full = rfixed_const(vco);
pll_out.full = rfixed_div(pll_out, a);
/* min m */ if (pll_out.full >= ffreq.full) {
m.full = rfixed_div(reference_freq, pll_in_max); error.full = pll_out.full - ffreq.full;
m.full = rfixed_ceil(m); if (error.full <= max_error.full)
return true;
}
}
}
return false;
}
while (1) { static void radeon_compute_pll_new(struct radeon_pll *pll,
n.full = rfixed_div(ffreq, reference_freq); uint64_t freq,
n.full = rfixed_mul(n, m); uint32_t *dot_clock_p,
n.full = rfixed_mul(n, p); uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
uint32_t *post_div_p)
{
u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
u32 best_freq = 0, vco_frequency;
f_vco.full = rfixed_div(n, m); /* freq = freq / 10; */
f_vco.full = rfixed_mul(f_vco, reference_freq); do_div(freq, 10);
f_pclk.full = rfixed_div(f_vco, p); if (pll->flags & RADEON_PLL_USE_POST_DIV) {
post_div = pll->post_div;
if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
goto done;
vco_frequency = freq * post_div;
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
goto done;
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
ref_div = pll->reference_div;
if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
goto done;
if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
goto done;
}
} else {
for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
(post_div == 10) ||
(post_div == 11))
continue;
}
if (f_pclk.full > ffreq.full) if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
error.full = f_pclk.full - ffreq.full; continue;
else
error.full = ffreq.full - f_pclk.full;
error.full = rfixed_div(error, f_pclk);
a.full = rfixed_const(100 * 100);
error.full = rfixed_mul(error, a);
a.full = rfixed_mul(m, p);
a.full = rfixed_div(n, a);
best_freq.full = rfixed_mul(reference_freq, a);
if (rfixed_trunc(error) < 25)
break;
a.full = rfixed_const(1);
m.full = m.full + a.full;
a.full = rfixed_div(reference_freq, m);
if (a.full >= pll_in_min.full)
continue;
m.full = rfixed_div(reference_freq, pll_in_max); vco_frequency = freq * post_div;
m.full = rfixed_ceil(m); if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
a.full= rfixed_const(1); continue;
p.full = p.full - a.full; if (pll->flags & RADEON_PLL_USE_REF_DIV) {
a.full = rfixed_mul(p, ffreq); ref_div = pll->reference_div;
if (a.full >= pll_out_min.full) if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
continue; goto done;
else { if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
DRM_ERROR("Unable to find pll dividers\n"); break;
break; } else {
if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
break;
}
} }
} }
a.full = rfixed_const(10); best_freq = pll->reference_freq * 10 * fb_div;
b.full = rfixed_mul(n, a); best_freq += pll->reference_freq * fb_div_frac;
best_freq = best_freq / (ref_div * post_div);
frac_n.full = rfixed_floor(n); done:
frac_n.full = rfixed_mul(frac_n, a); if (best_freq == 0)
frac_n.full = b.full - frac_n.full; DRM_ERROR("Couldn't find valid PLL dividers\n");
*dot_clock_p = rfixed_trunc(best_freq); *dot_clock_p = best_freq / 10;
*fb_div_p = rfixed_trunc(n); *fb_div_p = fb_div;
*frac_fb_div_p = rfixed_trunc(frac_n); *frac_fb_div_p = fb_div_frac;
*ref_div_p = rfixed_trunc(m); *ref_div_p = ref_div;
*post_div_p = rfixed_trunc(p); *post_div_p = post_div;
DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
} }
void radeon_compute_pll(struct radeon_pll *pll, void radeon_compute_pll(struct radeon_pll *pll,
...@@ -703,9 +781,9 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -703,9 +781,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p) uint32_t *post_div_p)
{ {
switch (pll->algo) { switch (pll->algo) {
case PLL_ALGO_AVIVO: case PLL_ALGO_NEW:
radeon_compute_pll_avivo(pll, freq, dot_clock_p, fb_div_p, radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
frac_fb_div_p, ref_div_p, post_div_p); frac_fb_div_p, ref_div_p, post_div_p);
break; break;
case PLL_ALGO_LEGACY: case PLL_ALGO_LEGACY:
default: default:
......
...@@ -86,7 +86,7 @@ int radeon_benchmarking = 0; ...@@ -86,7 +86,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0; int radeon_testing = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 1; int radeon_tv = 1;
int radeon_new_pll = 1; int radeon_new_pll = -1;
int radeon_dynpm = -1; int radeon_dynpm = -1;
int radeon_audio = 1; int radeon_audio = 1;
...@@ -123,7 +123,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); ...@@ -123,7 +123,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444); module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444); module_param_named(new_pll, radeon_new_pll, int, 0444);
MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)"); MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
......
...@@ -703,7 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -703,7 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll; pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY; pll->flags = RADEON_PLL_LEGACY;
pll->algo = PLL_ALGO_LEGACY; if (radeon_new_pll == 1)
pll->algo = PLL_ALGO_NEW;
else
pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */ if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
......
...@@ -133,7 +133,7 @@ struct radeon_tmds_pll { ...@@ -133,7 +133,7 @@ struct radeon_tmds_pll {
/* pll algo */ /* pll algo */
enum radeon_pll_algo { enum radeon_pll_algo {
PLL_ALGO_LEGACY, PLL_ALGO_LEGACY,
PLL_ALGO_AVIVO PLL_ALGO_NEW
}; };
struct radeon_pll { struct radeon_pll {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment