Commit 822ad79f authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/clk: switch to device pri macros

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 14caba44
...@@ -47,7 +47,8 @@ static u32 read_div(struct gf100_clk *, int, u32, u32); ...@@ -47,7 +47,8 @@ static u32 read_div(struct gf100_clk *, int, u32, u32);
static u32 static u32
read_vco(struct gf100_clk *clk, u32 dsrc) read_vco(struct gf100_clk *clk, u32 dsrc)
{ {
u32 ssrc = nv_rd32(clk, dsrc); struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100)) if (!(ssrc & 0x00000100))
return clk->base.read(&clk->base, nv_clk_src_sppll0); return clk->base.read(&clk->base, nv_clk_src_sppll0);
return clk->base.read(&clk->base, nv_clk_src_sppll1); return clk->base.read(&clk->base, nv_clk_src_sppll1);
...@@ -56,8 +57,9 @@ read_vco(struct gf100_clk *clk, u32 dsrc) ...@@ -56,8 +57,9 @@ read_vco(struct gf100_clk *clk, u32 dsrc)
static u32 static u32
read_pll(struct gf100_clk *clk, u32 pll) read_pll(struct gf100_clk *clk, u32 pll)
{ {
u32 ctrl = nv_rd32(clk, pll + 0x00); struct nvkm_device *device = clk->base.subdev.device;
u32 coef = nv_rd32(clk, pll + 0x04); u32 ctrl = nvkm_rd32(device, pll + 0x00);
u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16; u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8; u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0; u32 M = (coef & 0x000000ff) >> 0;
...@@ -69,7 +71,7 @@ read_pll(struct gf100_clk *clk, u32 pll) ...@@ -69,7 +71,7 @@ read_pll(struct gf100_clk *clk, u32 pll)
switch (pll) { switch (pll) {
case 0x00e800: case 0x00e800:
case 0x00e820: case 0x00e820:
sclk = nv_device(clk)->crystal; sclk = device->crystal;
P = 1; P = 1;
break; break;
case 0x132000: case 0x132000:
...@@ -94,13 +96,14 @@ read_pll(struct gf100_clk *clk, u32 pll) ...@@ -94,13 +96,14 @@ read_pll(struct gf100_clk *clk, u32 pll)
static u32 static u32
read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
{ {
u32 ssrc = nv_rd32(clk, dsrc + (doff * 4)); struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nv_rd32(clk, dctl + (doff * 4)); u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) { switch (ssrc & 0x00000003) {
case 0: case 0:
if ((ssrc & 0x00030000) != 0x00030000) if ((ssrc & 0x00030000) != 0x00030000)
return nv_device(clk)->crystal; return device->crystal;
return 108000; return 108000;
case 2: case 2:
return 100000; return 100000;
...@@ -120,8 +123,9 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) ...@@ -120,8 +123,9 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
static u32 static u32
read_clk(struct gf100_clk *clk, int idx) read_clk(struct gf100_clk *clk, int idx)
{ {
u32 sctl = nv_rd32(clk, 0x137250 + (idx * 4)); struct nvkm_device *device = clk->base.subdev.device;
u32 ssel = nv_rd32(clk, 0x137100); u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
u32 ssel = nvkm_rd32(device, 0x137100);
u32 sclk, sdiv; u32 sclk, sdiv;
if (ssel & (1 << idx)) { if (ssel & (1 << idx)) {
...@@ -145,7 +149,7 @@ static int ...@@ -145,7 +149,7 @@ static int
gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct gf100_clk *clk = container_of(obj, typeof(*clk), base); struct gf100_clk *clk = container_of(obj, typeof(*clk), base);
struct nvkm_device *device = nv_device(clk); struct nvkm_device *device = clk->base.subdev.device;
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
...@@ -166,7 +170,7 @@ gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -166,7 +170,7 @@ gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
case nv_clk_src_mdiv: case nv_clk_src_mdiv:
return read_div(clk, 0, 0x137300, 0x137310); return read_div(clk, 0, 0x137300, 0x137310);
case nv_clk_src_mem: case nv_clk_src_mem:
if (nv_rd32(clk, 0x1373f0) & 0x00000002) if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
return clk->base.read(&clk->base, nv_clk_src_mpll); return clk->base.read(&clk->base, nv_clk_src_mpll);
return clk->base.read(&clk->base, nv_clk_src_mdiv); return clk->base.read(&clk->base, nv_clk_src_mdiv);
...@@ -329,16 +333,18 @@ static void ...@@ -329,16 +333,18 @@ static void
gf100_clk_prog_0(struct gf100_clk *clk, int idx) gf100_clk_prog_0(struct gf100_clk *clk, int idx)
{ {
struct gf100_clk_info *info = &clk->eng[idx]; struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (idx < 7 && !info->ssel) { if (idx < 7 && !info->ssel) {
nv_mask(clk, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv); nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
nv_wr32(clk, 0x137160 + (idx * 0x04), info->dsrc); nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
} }
} }
static void static void
gf100_clk_prog_1(struct gf100_clk *clk, int idx) gf100_clk_prog_1(struct gf100_clk *clk, int idx)
{ {
nv_mask(clk, 0x137100, (1 << idx), 0x00000000); struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
nv_wait(clk, 0x137100, (1 << idx), 0x00000000); nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
} }
...@@ -346,15 +352,16 @@ static void ...@@ -346,15 +352,16 @@ static void
gf100_clk_prog_2(struct gf100_clk *clk, int idx) gf100_clk_prog_2(struct gf100_clk *clk, int idx)
{ {
struct gf100_clk_info *info = &clk->eng[idx]; struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
const u32 addr = 0x137000 + (idx * 0x20); const u32 addr = 0x137000 + (idx * 0x20);
if (idx <= 7) { if (idx <= 7) {
nv_mask(clk, addr + 0x00, 0x00000004, 0x00000000); nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000000); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) { if (info->coef) {
nv_wr32(clk, addr + 0x04, info->coef); nvkm_wr32(device, addr + 0x04, info->coef);
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000001); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000); nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
nv_mask(clk, addr + 0x00, 0x00020004, 0x00000004); nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
} }
} }
} }
...@@ -363,8 +370,9 @@ static void ...@@ -363,8 +370,9 @@ static void
gf100_clk_prog_3(struct gf100_clk *clk, int idx) gf100_clk_prog_3(struct gf100_clk *clk, int idx)
{ {
struct gf100_clk_info *info = &clk->eng[idx]; struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) { if (info->ssel) {
nv_mask(clk, 0x137100, (1 << idx), info->ssel); nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
nv_wait(clk, 0x137100, (1 << idx), info->ssel); nv_wait(clk, 0x137100, (1 << idx), info->ssel);
} }
} }
...@@ -373,7 +381,8 @@ static void ...@@ -373,7 +381,8 @@ static void
gf100_clk_prog_4(struct gf100_clk *clk, int idx) gf100_clk_prog_4(struct gf100_clk *clk, int idx)
{ {
struct gf100_clk_info *info = &clk->eng[idx]; struct gf100_clk_info *info = &clk->eng[idx];
nv_mask(clk, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv); struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
} }
static int static int
......
...@@ -48,7 +48,8 @@ static u32 read_pll(struct gk104_clk *, u32); ...@@ -48,7 +48,8 @@ static u32 read_pll(struct gk104_clk *, u32);
static u32 static u32
read_vco(struct gk104_clk *clk, u32 dsrc) read_vco(struct gk104_clk *clk, u32 dsrc)
{ {
u32 ssrc = nv_rd32(clk, dsrc); struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100)) if (!(ssrc & 0x00000100))
return read_pll(clk, 0x00e800); return read_pll(clk, 0x00e800);
return read_pll(clk, 0x00e820); return read_pll(clk, 0x00e820);
...@@ -57,8 +58,9 @@ read_vco(struct gk104_clk *clk, u32 dsrc) ...@@ -57,8 +58,9 @@ read_vco(struct gk104_clk *clk, u32 dsrc)
static u32 static u32
read_pll(struct gk104_clk *clk, u32 pll) read_pll(struct gk104_clk *clk, u32 pll)
{ {
u32 ctrl = nv_rd32(clk, pll + 0x00); struct nvkm_device *device = clk->base.subdev.device;
u32 coef = nv_rd32(clk, pll + 0x04); u32 ctrl = nvkm_rd32(device, pll + 0x00);
u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16; u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8; u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0; u32 M = (coef & 0x000000ff) >> 0;
...@@ -71,7 +73,7 @@ read_pll(struct gk104_clk *clk, u32 pll) ...@@ -71,7 +73,7 @@ read_pll(struct gk104_clk *clk, u32 pll)
switch (pll) { switch (pll) {
case 0x00e800: case 0x00e800:
case 0x00e820: case 0x00e820:
sclk = nv_device(clk)->crystal; sclk = device->crystal;
P = 1; P = 1;
break; break;
case 0x132000: case 0x132000:
...@@ -80,7 +82,7 @@ read_pll(struct gk104_clk *clk, u32 pll) ...@@ -80,7 +82,7 @@ read_pll(struct gk104_clk *clk, u32 pll)
break; break;
case 0x132020: case 0x132020:
sclk = read_div(clk, 0, 0x137320, 0x137330); sclk = read_div(clk, 0, 0x137320, 0x137330);
fN = nv_rd32(clk, pll + 0x10) >> 16; fN = nvkm_rd32(device, pll + 0x10) >> 16;
break; break;
case 0x137000: case 0x137000:
case 0x137020: case 0x137020:
...@@ -102,13 +104,14 @@ read_pll(struct gk104_clk *clk, u32 pll) ...@@ -102,13 +104,14 @@ read_pll(struct gk104_clk *clk, u32 pll)
static u32 static u32
read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl) read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
{ {
u32 ssrc = nv_rd32(clk, dsrc + (doff * 4)); struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nv_rd32(clk, dctl + (doff * 4)); u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) { switch (ssrc & 0x00000003) {
case 0: case 0:
if ((ssrc & 0x00030000) != 0x00030000) if ((ssrc & 0x00030000) != 0x00030000)
return nv_device(clk)->crystal; return device->crystal;
return 108000; return 108000;
case 2: case 2:
return 100000; return 100000;
...@@ -128,7 +131,8 @@ read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl) ...@@ -128,7 +131,8 @@ read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
static u32 static u32
read_mem(struct gk104_clk *clk) read_mem(struct gk104_clk *clk)
{ {
switch (nv_rd32(clk, 0x1373f4) & 0x0000000f) { struct nvkm_device *device = clk->base.subdev.device;
switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
case 1: return read_pll(clk, 0x132020); case 1: return read_pll(clk, 0x132020);
case 2: return read_pll(clk, 0x132000); case 2: return read_pll(clk, 0x132000);
default: default:
...@@ -139,11 +143,12 @@ read_mem(struct gk104_clk *clk) ...@@ -139,11 +143,12 @@ read_mem(struct gk104_clk *clk)
static u32 static u32
read_clk(struct gk104_clk *clk, int idx) read_clk(struct gk104_clk *clk, int idx)
{ {
u32 sctl = nv_rd32(clk, 0x137250 + (idx * 4)); struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
u32 sclk, sdiv; u32 sclk, sdiv;
if (idx < 7) { if (idx < 7) {
u32 ssel = nv_rd32(clk, 0x137100); u32 ssel = nvkm_rd32(device, 0x137100);
if (ssel & (1 << idx)) { if (ssel & (1 << idx)) {
sclk = read_pll(clk, 0x137000 + (idx * 0x20)); sclk = read_pll(clk, 0x137000 + (idx * 0x20));
sdiv = 1; sdiv = 1;
...@@ -152,7 +157,7 @@ read_clk(struct gk104_clk *clk, int idx) ...@@ -152,7 +157,7 @@ read_clk(struct gk104_clk *clk, int idx)
sdiv = 0; sdiv = 0;
} }
} else { } else {
u32 ssrc = nv_rd32(clk, 0x137160 + (idx * 0x04)); u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
if ((ssrc & 0x00000003) == 0x00000003) { if ((ssrc & 0x00000003) == 0x00000003) {
sclk = read_div(clk, idx, 0x137160, 0x1371d0); sclk = read_div(clk, idx, 0x137160, 0x1371d0);
if (ssrc & 0x00000100) { if (ssrc & 0x00000100) {
...@@ -183,7 +188,7 @@ static int ...@@ -183,7 +188,7 @@ static int
gk104_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) gk104_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct gk104_clk *clk = container_of(obj, typeof(*clk), base); struct gk104_clk *clk = container_of(obj, typeof(*clk), base);
struct nvkm_device *device = nv_device(clk); struct nvkm_device *device = clk->base.subdev.device;
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
...@@ -349,37 +354,41 @@ static void ...@@ -349,37 +354,41 @@ static void
gk104_clk_prog_0(struct gk104_clk *clk, int idx) gk104_clk_prog_0(struct gk104_clk *clk, int idx)
{ {
struct gk104_clk_info *info = &clk->eng[idx]; struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (!info->ssel) { if (!info->ssel) {
nv_mask(clk, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv); nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
nv_wr32(clk, 0x137160 + (idx * 0x04), info->dsrc); nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
} }
} }
static void static void
gk104_clk_prog_1_0(struct gk104_clk *clk, int idx) gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
{ {
nv_mask(clk, 0x137100, (1 << idx), 0x00000000); struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
nv_wait(clk, 0x137100, (1 << idx), 0x00000000); nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
} }
static void static void
gk104_clk_prog_1_1(struct gk104_clk *clk, int idx) gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
{ {
nv_mask(clk, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000); struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
} }
static void static void
gk104_clk_prog_2(struct gk104_clk *clk, int idx) gk104_clk_prog_2(struct gk104_clk *clk, int idx)
{ {
struct gk104_clk_info *info = &clk->eng[idx]; struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
const u32 addr = 0x137000 + (idx * 0x20); const u32 addr = 0x137000 + (idx * 0x20);
nv_mask(clk, addr + 0x00, 0x00000004, 0x00000000); nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000000); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) { if (info->coef) {
nv_wr32(clk, addr + 0x04, info->coef); nvkm_wr32(device, addr + 0x04, info->coef);
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000001); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000); nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
nv_mask(clk, addr + 0x00, 0x00020004, 0x00000004); nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
} }
} }
...@@ -387,18 +396,20 @@ static void ...@@ -387,18 +396,20 @@ static void
gk104_clk_prog_3(struct gk104_clk *clk, int idx) gk104_clk_prog_3(struct gk104_clk *clk, int idx)
{ {
struct gk104_clk_info *info = &clk->eng[idx]; struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) if (info->ssel)
nv_mask(clk, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv); nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
else else
nv_mask(clk, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv); nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
} }
static void static void
gk104_clk_prog_4_0(struct gk104_clk *clk, int idx) gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
{ {
struct gk104_clk_info *info = &clk->eng[idx]; struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) { if (info->ssel) {
nv_mask(clk, 0x137100, (1 << idx), info->ssel); nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
nv_wait(clk, 0x137100, (1 << idx), info->ssel); nv_wait(clk, 0x137100, (1 << idx), info->ssel);
} }
} }
...@@ -407,9 +418,10 @@ static void ...@@ -407,9 +418,10 @@ static void
gk104_clk_prog_4_1(struct gk104_clk *clk, int idx) gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
{ {
struct gk104_clk_info *info = &clk->eng[idx]; struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) { if (info->ssel) {
nv_mask(clk, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000); nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
nv_mask(clk, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100); nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
} }
} }
......
...@@ -126,9 +126,10 @@ struct gk20a_clk { ...@@ -126,9 +126,10 @@ struct gk20a_clk {
static void static void
gk20a_pllg_read_mnp(struct gk20a_clk *clk) gk20a_pllg_read_mnp(struct gk20a_clk *clk)
{ {
struct nvkm_device *device = clk->base.subdev.device;
u32 val; u32 val;
val = nv_rd32(clk, GPCPLL_COEFF); val = nvkm_rd32(device, GPCPLL_COEFF);
clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
...@@ -265,51 +266,52 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) ...@@ -265,51 +266,52 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
static int static int
gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
{ {
struct nvkm_device *device = clk->base.subdev.device;
u32 val; u32 val;
int ramp_timeout; int ramp_timeout;
/* get old coefficients */ /* get old coefficients */
val = nv_rd32(clk, GPCPLL_COEFF); val = nvkm_rd32(device, GPCPLL_COEFF);
/* do nothing if NDIV is the same */ /* do nothing if NDIV is the same */
if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
return 0; return 0;
/* setup */ /* setup */
nv_mask(clk, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
nv_mask(clk, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
/* pll slowdown mode */ /* pll slowdown mode */
nv_mask(clk, GPCPLL_NDIV_SLOWDOWN, nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */ /* new ndiv ready for ramp */
val = nv_rd32(clk, GPCPLL_COEFF); val = nvkm_rd32(device, GPCPLL_COEFF);
val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
udelay(1); udelay(1);
nv_wr32(clk, GPCPLL_COEFF, val); nvkm_wr32(device, GPCPLL_COEFF, val);
/* dynamic ramp to new ndiv */ /* dynamic ramp to new ndiv */
val = nv_rd32(clk, GPCPLL_NDIV_SLOWDOWN); val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
udelay(1); udelay(1);
nv_wr32(clk, GPCPLL_NDIV_SLOWDOWN, val); nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
udelay(1); udelay(1);
val = nv_rd32(clk, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
break; break;
} }
/* exit slowdown mode */ /* exit slowdown mode */
nv_mask(clk, GPCPLL_NDIV_SLOWDOWN, nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nv_rd32(clk, GPCPLL_NDIV_SLOWDOWN); nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
if (ramp_timeout <= 0) { if (ramp_timeout <= 0) {
nv_error(clk, "gpcpll dynamic ramp timeout\n"); nv_error(clk, "gpcpll dynamic ramp timeout\n");
...@@ -322,30 +324,33 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) ...@@ -322,30 +324,33 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
static void static void
_gk20a_pllg_enable(struct gk20a_clk *clk) _gk20a_pllg_enable(struct gk20a_clk *clk)
{ {
nv_mask(clk, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); struct nvkm_device *device = clk->base.subdev.device;
nv_rd32(clk, GPCPLL_CFG); nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
} }
static void static void
_gk20a_pllg_disable(struct gk20a_clk *clk) _gk20a_pllg_disable(struct gk20a_clk *clk)
{ {
nv_mask(clk, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); struct nvkm_device *device = clk->base.subdev.device;
nv_rd32(clk, GPCPLL_CFG); nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
} }
static int static int
_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
{ {
struct nvkm_device *device = clk->base.subdev.device;
u32 val, cfg; u32 val, cfg;
u32 m_old, pl_old, n_lo; u32 m_old, pl_old, n_lo;
/* get old coefficients */ /* get old coefficients */
val = nv_rd32(clk, GPCPLL_COEFF); val = nvkm_rd32(device, GPCPLL_COEFF);
m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
/* do NDIV slide if there is no change in M and PL */ /* do NDIV slide if there is no change in M and PL */
cfg = nv_rd32(clk, GPCPLL_CFG); cfg = nvkm_rd32(device, GPCPLL_CFG);
if (allow_slide && clk->m == m_old && clk->pl == pl_old && if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
(cfg & GPCPLL_CFG_ENABLE)) { (cfg & GPCPLL_CFG_ENABLE)) {
return gk20a_pllg_slide(clk, clk->n); return gk20a_pllg_slide(clk, clk->n);
...@@ -362,21 +367,21 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) ...@@ -362,21 +367,21 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
} }
/* split FO-to-bypass jump in halfs by setting out divider 1:2 */ /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
nv_mask(clk, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
0x2 << GPC2CLK_OUT_VCODIV_SHIFT); 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
/* put PLL in bypass before programming it */ /* put PLL in bypass before programming it */
val = nv_rd32(clk, SEL_VCO); val = nvkm_rd32(device, SEL_VCO);
val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
udelay(2); udelay(2);
nv_wr32(clk, SEL_VCO, val); nvkm_wr32(device, SEL_VCO, val);
/* get out from IDDQ */ /* get out from IDDQ */
val = nv_rd32(clk, GPCPLL_CFG); val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_IDDQ) { if (val & GPCPLL_CFG_IDDQ) {
val &= ~GPCPLL_CFG_IDDQ; val &= ~GPCPLL_CFG_IDDQ;
nv_wr32(clk, GPCPLL_CFG, val); nvkm_wr32(device, GPCPLL_CFG, val);
nv_rd32(clk, GPCPLL_CFG); nvkm_rd32(device, GPCPLL_CFG);
udelay(2); udelay(2);
} }
...@@ -390,14 +395,14 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) ...@@ -390,14 +395,14 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
val = clk->m << GPCPLL_COEFF_M_SHIFT; val = clk->m << GPCPLL_COEFF_M_SHIFT;
val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT; val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
val |= clk->pl << GPCPLL_COEFF_P_SHIFT; val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
nv_wr32(clk, GPCPLL_COEFF, val); nvkm_wr32(device, GPCPLL_COEFF, val);
_gk20a_pllg_enable(clk); _gk20a_pllg_enable(clk);
val = nv_rd32(clk, GPCPLL_CFG); val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_LOCK_DET_OFF) { if (val & GPCPLL_CFG_LOCK_DET_OFF) {
val &= ~GPCPLL_CFG_LOCK_DET_OFF; val &= ~GPCPLL_CFG_LOCK_DET_OFF;
nv_wr32(clk, GPCPLL_CFG, val); nvkm_wr32(device, GPCPLL_CFG, val);
} }
if (!nvkm_timer_wait_eq(clk, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK, if (!nvkm_timer_wait_eq(clk, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
...@@ -407,13 +412,13 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) ...@@ -407,13 +412,13 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
} }
/* switch to VCO mode */ /* switch to VCO mode */
nv_mask(clk, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
/* restore out divider 1:1 */ /* restore out divider 1:1 */
val = nv_rd32(clk, GPC2CLK_OUT); val = nvkm_rd32(device, GPC2CLK_OUT);
val &= ~GPC2CLK_OUT_VCODIV_MASK; val &= ~GPC2CLK_OUT_VCODIV_MASK;
udelay(2); udelay(2);
nv_wr32(clk, GPC2CLK_OUT, val); nvkm_wr32(device, GPC2CLK_OUT, val);
/* slide up to new NDIV */ /* slide up to new NDIV */
return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0; return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
...@@ -434,14 +439,15 @@ gk20a_pllg_program_mnp(struct gk20a_clk *clk) ...@@ -434,14 +439,15 @@ gk20a_pllg_program_mnp(struct gk20a_clk *clk)
static void static void
gk20a_pllg_disable(struct gk20a_clk *clk) gk20a_pllg_disable(struct gk20a_clk *clk)
{ {
struct nvkm_device *device = clk->base.subdev.device;
u32 val; u32 val;
/* slide to VCO min */ /* slide to VCO min */
val = nv_rd32(clk, GPCPLL_CFG); val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_ENABLE) { if (val & GPCPLL_CFG_ENABLE) {
u32 coeff, m, n_lo; u32 coeff, m, n_lo;
coeff = nv_rd32(clk, GPCPLL_COEFF); coeff = nvkm_rd32(device, GPCPLL_COEFF);
m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
n_lo = DIV_ROUND_UP(m * clk->params->min_vco, n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
clk->parent_rate / MHZ); clk->parent_rate / MHZ);
...@@ -449,7 +455,7 @@ gk20a_pllg_disable(struct gk20a_clk *clk) ...@@ -449,7 +455,7 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
} }
/* put PLL in bypass before disabling it */ /* put PLL in bypass before disabling it */
nv_mask(clk, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
_gk20a_pllg_disable(clk); _gk20a_pllg_disable(clk);
} }
...@@ -561,10 +567,11 @@ static int ...@@ -561,10 +567,11 @@ static int
gk20a_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) gk20a_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct gk20a_clk *clk = container_of(obj, typeof(*clk), base); struct gk20a_clk *clk = container_of(obj, typeof(*clk), base);
struct nvkm_device *device = clk->base.subdev.device;
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
return nv_device(clk)->crystal; return device->crystal;
case nv_clk_src_gpc: case nv_clk_src_gpc:
gk20a_pllg_read_mnp(clk); gk20a_pllg_read_mnp(clk);
return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV; return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
...@@ -613,9 +620,10 @@ static int ...@@ -613,9 +620,10 @@ static int
gk20a_clk_init(struct nvkm_object *object) gk20a_clk_init(struct nvkm_object *object)
{ {
struct gk20a_clk *clk = (void *)object; struct gk20a_clk *clk = (void *)object;
struct nvkm_device *device = clk->base.subdev.device;
int ret; int ret;
nv_mask(clk, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
ret = nvkm_clk_init(&clk->base); ret = nvkm_clk_init(&clk->base);
if (ret) if (ret)
......
...@@ -41,11 +41,12 @@ static u32 read_pll(struct gt215_clk *, int, u32); ...@@ -41,11 +41,12 @@ static u32 read_pll(struct gt215_clk *, int, u32);
static u32 static u32
read_vco(struct gt215_clk *clk, int idx) read_vco(struct gt215_clk *clk, int idx)
{ {
u32 sctl = nv_rd32(clk, 0x4120 + (idx * 4)); struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
switch (sctl & 0x00000030) { switch (sctl & 0x00000030) {
case 0x00000000: case 0x00000000:
return nv_device(clk)->crystal; return device->crystal;
case 0x00000020: case 0x00000020:
return read_pll(clk, 0x41, 0x00e820); return read_pll(clk, 0x41, 0x00e820);
case 0x00000030: case 0x00000030:
...@@ -58,19 +59,20 @@ read_vco(struct gt215_clk *clk, int idx) ...@@ -58,19 +59,20 @@ read_vco(struct gt215_clk *clk, int idx)
static u32 static u32
read_clk(struct gt215_clk *clk, int idx, bool ignore_en) read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
{ {
struct nvkm_device *device = clk->base.subdev.device;
u32 sctl, sdiv, sclk; u32 sctl, sdiv, sclk;
/* refclk for the 0xe8xx plls is a fixed frequency */ /* refclk for the 0xe8xx plls is a fixed frequency */
if (idx >= 0x40) { if (idx >= 0x40) {
if (nv_device(clk)->chipset == 0xaf) { if (device->chipset == 0xaf) {
/* no joke.. seriously.. sigh.. */ /* no joke.. seriously.. sigh.. */
return nv_rd32(clk, 0x00471c) * 1000; return nvkm_rd32(device, 0x00471c) * 1000;
} }
return nv_device(clk)->crystal; return device->crystal;
} }
sctl = nv_rd32(clk, 0x4120 + (idx * 4)); sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
if (!ignore_en && !(sctl & 0x00000100)) if (!ignore_en && !(sctl & 0x00000100))
return 0; return 0;
...@@ -82,7 +84,7 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en) ...@@ -82,7 +84,7 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
switch (sctl & 0x00003000) { switch (sctl & 0x00003000) {
case 0x00000000: case 0x00000000:
if (!(sctl & 0x00000200)) if (!(sctl & 0x00000200))
return nv_device(clk)->crystal; return device->crystal;
return 0; return 0;
case 0x00002000: case 0x00002000:
if (sctl & 0x00000040) if (sctl & 0x00000040)
...@@ -104,12 +106,13 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en) ...@@ -104,12 +106,13 @@ read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
static u32 static u32
read_pll(struct gt215_clk *clk, int idx, u32 pll) read_pll(struct gt215_clk *clk, int idx, u32 pll)
{ {
u32 ctrl = nv_rd32(clk, pll + 0); struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1; u32 sclk = 0, P = 1, N = 1, M = 1;
if (!(ctrl & 0x00000008)) { if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) { if (ctrl & 0x00000001) {
u32 coef = nv_rd32(clk, pll + 4); u32 coef = nvkm_rd32(device, pll + 4);
M = (coef & 0x000000ff) >> 0; M = (coef & 0x000000ff) >> 0;
N = (coef & 0x0000ff00) >> 8; N = (coef & 0x0000ff00) >> 8;
P = (coef & 0x003f0000) >> 16; P = (coef & 0x003f0000) >> 16;
...@@ -136,11 +139,12 @@ static int ...@@ -136,11 +139,12 @@ static int
gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct gt215_clk *clk = container_of(obj, typeof(*clk), base); struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
struct nvkm_device *device = clk->base.subdev.device;
u32 hsrc; u32 hsrc;
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
return nv_device(clk)->crystal; return device->crystal;
case nv_clk_src_core: case nv_clk_src_core:
case nv_clk_src_core_intm: case nv_clk_src_core_intm:
return read_pll(clk, 0x00, 0x4200); return read_pll(clk, 0x00, 0x4200);
...@@ -155,7 +159,7 @@ gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -155,7 +159,7 @@ gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
case nv_clk_src_daemon: case nv_clk_src_daemon:
return read_clk(clk, 0x25, false); return read_clk(clk, 0x25, false);
case nv_clk_src_host: case nv_clk_src_host:
hsrc = (nv_rd32(clk, 0xc040) & 0x30000000) >> 28; hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
switch (hsrc) { switch (hsrc) {
case 0: case 0:
return read_clk(clk, 0x1d, false); return read_clk(clk, 0x1d, false);
...@@ -297,11 +301,12 @@ calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate) ...@@ -297,11 +301,12 @@ calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
int int
gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
{ {
struct nvkm_device *device = clk->subdev.device;
struct nvkm_fifo *fifo = nvkm_fifo(clk); struct nvkm_fifo *fifo = nvkm_fifo(clk);
/* halt and idle execution engines */ /* halt and idle execution engines */
nv_mask(clk, 0x020060, 0x00070000, 0x00000000); nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
nv_mask(clk, 0x002504, 0x00000001, 0x00000001); nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
/* Wait until the interrupt handler is finished */ /* Wait until the interrupt handler is finished */
if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000)) if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
return -EBUSY; return -EBUSY;
...@@ -320,26 +325,29 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) ...@@ -320,26 +325,29 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
void void
gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags) gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
{ {
struct nvkm_device *device = clk->subdev.device;
struct nvkm_fifo *fifo = nvkm_fifo(clk); struct nvkm_fifo *fifo = nvkm_fifo(clk);
if (fifo && flags) if (fifo && flags)
fifo->start(fifo, flags); fifo->start(fifo, flags);
nv_mask(clk, 0x002504, 0x00000001, 0x00000000); nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
nv_mask(clk, 0x020060, 0x00070000, 0x00040000); nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
} }
static void static void
disable_clk_src(struct gt215_clk *clk, u32 src) disable_clk_src(struct gt215_clk *clk, u32 src)
{ {
nv_mask(clk, src, 0x00000100, 0x00000000); struct nvkm_device *device = clk->base.subdev.device;
nv_mask(clk, src, 0x00000001, 0x00000000); nvkm_mask(device, src, 0x00000100, 0x00000000);
nvkm_mask(device, src, 0x00000001, 0x00000000);
} }
static void static void
prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom) prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
{ {
struct gt215_clk_info *info = &clk->eng[dom]; struct gt215_clk_info *info = &clk->eng[dom];
struct nvkm_device *device = clk->base.subdev.device;
const u32 src0 = 0x004120 + (idx * 4); const u32 src0 = 0x004120 + (idx * 4);
const u32 src1 = 0x004160 + (idx * 4); const u32 src1 = 0x004160 + (idx * 4);
const u32 ctrl = pll + 0; const u32 ctrl = pll + 0;
...@@ -348,30 +356,30 @@ prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom) ...@@ -348,30 +356,30 @@ prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
if (info->pll) { if (info->pll) {
/* Always start from a non-PLL clock */ /* Always start from a non-PLL clock */
bypass = nv_rd32(clk, ctrl) & 0x00000008; bypass = nvkm_rd32(device, ctrl) & 0x00000008;
if (!bypass) { if (!bypass) {
nv_mask(clk, src1, 0x00000101, 0x00000101); nvkm_mask(device, src1, 0x00000101, 0x00000101);
nv_mask(clk, ctrl, 0x00000008, 0x00000008); nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
udelay(20); udelay(20);
} }
nv_mask(clk, src0, 0x003f3141, 0x00000101 | info->clk); nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
nv_wr32(clk, coef, info->pll); nvkm_wr32(device, coef, info->pll);
nv_mask(clk, ctrl, 0x00000015, 0x00000015); nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
nv_mask(clk, ctrl, 0x00000010, 0x00000000); nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
if (!nv_wait(clk, ctrl, 0x00020000, 0x00020000)) { if (!nv_wait(clk, ctrl, 0x00020000, 0x00020000)) {
nv_mask(clk, ctrl, 0x00000010, 0x00000010); nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
nv_mask(clk, src0, 0x00000101, 0x00000000); nvkm_mask(device, src0, 0x00000101, 0x00000000);
return; return;
} }
nv_mask(clk, ctrl, 0x00000010, 0x00000010); nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
nv_mask(clk, ctrl, 0x00000008, 0x00000000); nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
disable_clk_src(clk, src1); disable_clk_src(clk, src1);
} else { } else {
nv_mask(clk, src1, 0x003f3141, 0x00000101 | info->clk); nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
nv_mask(clk, ctrl, 0x00000018, 0x00000018); nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
udelay(20); udelay(20);
nv_mask(clk, ctrl, 0x00000001, 0x00000000); nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
disable_clk_src(clk, src0); disable_clk_src(clk, src0);
} }
} }
...@@ -380,26 +388,28 @@ static void ...@@ -380,26 +388,28 @@ static void
prog_clk(struct gt215_clk *clk, int idx, int dom) prog_clk(struct gt215_clk *clk, int idx, int dom)
{ {
struct gt215_clk_info *info = &clk->eng[dom]; struct gt215_clk_info *info = &clk->eng[dom];
nv_mask(clk, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk); struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
} }
static void static void
prog_host(struct gt215_clk *clk) prog_host(struct gt215_clk *clk)
{ {
struct gt215_clk_info *info = &clk->eng[nv_clk_src_host]; struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
u32 hsrc = (nv_rd32(clk, 0xc040)); struct nvkm_device *device = clk->base.subdev.device;
u32 hsrc = (nvkm_rd32(device, 0xc040));
switch (info->host_out) { switch (info->host_out) {
case NVA3_HOST_277: case NVA3_HOST_277:
if ((hsrc & 0x30000000) == 0) { if ((hsrc & 0x30000000) == 0) {
nv_wr32(clk, 0xc040, hsrc | 0x20000000); nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
disable_clk_src(clk, 0x4194); disable_clk_src(clk, 0x4194);
} }
break; break;
case NVA3_HOST_CLK: case NVA3_HOST_CLK:
prog_clk(clk, 0x1d, nv_clk_src_host); prog_clk(clk, 0x1d, nv_clk_src_host);
if ((hsrc & 0x30000000) >= 0x20000000) { if ((hsrc & 0x30000000) >= 0x20000000) {
nv_wr32(clk, 0xc040, hsrc & ~0x30000000); nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
} }
break; break;
default: default:
...@@ -407,22 +417,23 @@ prog_host(struct gt215_clk *clk) ...@@ -407,22 +417,23 @@ prog_host(struct gt215_clk *clk)
} }
/* This seems to be a clock gating factor on idle, always set to 64 */ /* This seems to be a clock gating factor on idle, always set to 64 */
nv_wr32(clk, 0xc044, 0x3e); nvkm_wr32(device, 0xc044, 0x3e);
} }
static void static void
prog_core(struct gt215_clk *clk, int dom) prog_core(struct gt215_clk *clk, int dom)
{ {
struct gt215_clk_info *info = &clk->eng[dom]; struct gt215_clk_info *info = &clk->eng[dom];
u32 fb_delay = nv_rd32(clk, 0x10002c); struct nvkm_device *device = clk->base.subdev.device;
u32 fb_delay = nvkm_rd32(device, 0x10002c);
if (fb_delay < info->fb_delay) if (fb_delay < info->fb_delay)
nv_wr32(clk, 0x10002c, info->fb_delay); nvkm_wr32(device, 0x10002c, info->fb_delay);
prog_pll(clk, 0x00, 0x004200, dom); prog_pll(clk, 0x00, 0x004200, dom);
if (fb_delay > info->fb_delay) if (fb_delay > info->fb_delay)
nv_wr32(clk, 0x10002c, info->fb_delay); nvkm_wr32(device, 0x10002c, info->fb_delay);
} }
static int static int
......
...@@ -40,14 +40,16 @@ struct mcp77_clk { ...@@ -40,14 +40,16 @@ struct mcp77_clk {
static u32 static u32
read_div(struct mcp77_clk *clk) read_div(struct mcp77_clk *clk)
{ {
return nv_rd32(clk, 0x004600); struct nvkm_device *device = clk->base.subdev.device;
return nvkm_rd32(device, 0x004600);
} }
static u32 static u32
read_pll(struct mcp77_clk *clk, u32 base) read_pll(struct mcp77_clk *clk, u32 base)
{ {
u32 ctrl = nv_rd32(clk, base + 0); struct nvkm_device *device = clk->base.subdev.device;
u32 coef = nv_rd32(clk, base + 4); u32 ctrl = nvkm_rd32(device, base + 0);
u32 coef = nvkm_rd32(device, base + 4);
u32 ref = clk->base.read(&clk->base, nv_clk_src_href); u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
u32 post_div = 0; u32 post_div = 0;
u32 clock = 0; u32 clock = 0;
...@@ -55,10 +57,10 @@ read_pll(struct mcp77_clk *clk, u32 base) ...@@ -55,10 +57,10 @@ read_pll(struct mcp77_clk *clk, u32 base)
switch (base){ switch (base){
case 0x4020: case 0x4020:
post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16); post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
break; break;
case 0x4028: case 0x4028:
post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16; post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
break; break;
default: default:
break; break;
...@@ -78,12 +80,13 @@ static int ...@@ -78,12 +80,13 @@ static int
mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct mcp77_clk *clk = container_of(obj, typeof(*clk), base); struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
u32 mast = nv_rd32(clk, 0x00c054); struct nvkm_device *device = clk->base.subdev.device;
u32 mast = nvkm_rd32(device, 0x00c054);
u32 P = 0; u32 P = 0;
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
return nv_device(clk)->crystal; return device->crystal;
case nv_clk_src_href: case nv_clk_src_href:
return 100000; /* PCIE reference clock */ return 100000; /* PCIE reference clock */
case nv_clk_src_hclkm4: case nv_clk_src_hclkm4:
...@@ -99,7 +102,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -99,7 +102,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
} }
break; break;
case nv_clk_src_core: case nv_clk_src_core:
P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16; P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) { switch (mast & 0x00000003) {
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P; case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
...@@ -122,7 +125,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -122,7 +125,7 @@ mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
default: return 0; default: return 0;
} }
case nv_clk_src_shader: case nv_clk_src_shader:
P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16; P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) { switch (mast & 0x00000030) {
case 0x00000000: case 0x00000000:
if (mast & 0x00000040) if (mast & 0x00000040)
...@@ -293,6 +296,7 @@ static int ...@@ -293,6 +296,7 @@ static int
mcp77_clk_prog(struct nvkm_clk *obj) mcp77_clk_prog(struct nvkm_clk *obj)
{ {
struct mcp77_clk *clk = container_of(obj, typeof(*clk), base); struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
struct nvkm_device *device = clk->base.subdev.device;
u32 pllmask = 0, mast; u32 pllmask = 0, mast;
unsigned long flags; unsigned long flags;
unsigned long *f = &flags; unsigned long *f = &flags;
...@@ -303,19 +307,19 @@ mcp77_clk_prog(struct nvkm_clk *obj) ...@@ -303,19 +307,19 @@ mcp77_clk_prog(struct nvkm_clk *obj)
goto out; goto out;
/* First switch to safe clocks: href */ /* First switch to safe clocks: href */
mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640); mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
mast &= ~0x00400e73; mast &= ~0x00400e73;
mast |= 0x03000000; mast |= 0x03000000;
switch (clk->csrc) { switch (clk->csrc) {
case nv_clk_src_hclkm4: case nv_clk_src_hclkm4:
nv_mask(clk, 0x4028, 0x00070000, clk->cctrl); nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
mast |= 0x00000002; mast |= 0x00000002;
break; break;
case nv_clk_src_core: case nv_clk_src_core:
nv_wr32(clk, 0x402c, clk->ccoef); nvkm_wr32(device, 0x402c, clk->ccoef);
nv_wr32(clk, 0x4028, 0x80000000 | clk->cctrl); nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
nv_wr32(clk, 0x4040, clk->cpost); nvkm_wr32(device, 0x4040, clk->cpost);
pllmask |= (0x3 << 8); pllmask |= (0x3 << 8);
mast |= 0x00000003; mast |= 0x00000003;
break; break;
...@@ -326,17 +330,17 @@ mcp77_clk_prog(struct nvkm_clk *obj) ...@@ -326,17 +330,17 @@ mcp77_clk_prog(struct nvkm_clk *obj)
switch (clk->ssrc) { switch (clk->ssrc) {
case nv_clk_src_href: case nv_clk_src_href:
nv_mask(clk, 0x4020, 0x00070000, 0x00000000); nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
/* mast |= 0x00000000; */ /* mast |= 0x00000000; */
break; break;
case nv_clk_src_core: case nv_clk_src_core:
nv_mask(clk, 0x4020, 0x00070000, clk->sctrl); nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
mast |= 0x00000020; mast |= 0x00000020;
break; break;
case nv_clk_src_shader: case nv_clk_src_shader:
nv_wr32(clk, 0x4024, clk->scoef); nvkm_wr32(device, 0x4024, clk->scoef);
nv_wr32(clk, 0x4020, 0x80000000 | clk->sctrl); nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
nv_wr32(clk, 0x4070, clk->spost); nvkm_wr32(device, 0x4070, clk->spost);
pllmask |= (0x3 << 12); pllmask |= (0x3 << 12);
mast |= 0x00000030; mast |= 0x00000030;
break; break;
...@@ -354,21 +358,21 @@ mcp77_clk_prog(struct nvkm_clk *obj) ...@@ -354,21 +358,21 @@ mcp77_clk_prog(struct nvkm_clk *obj)
case nv_clk_src_cclk: case nv_clk_src_cclk:
mast |= 0x00400000; mast |= 0x00400000;
default: default:
nv_wr32(clk, 0x4600, clk->vdiv); nvkm_wr32(device, 0x4600, clk->vdiv);
} }
nv_wr32(clk, 0xc054, mast); nvkm_wr32(device, 0xc054, mast);
resume: resume:
/* Disable some PLLs and dividers when unused */ /* Disable some PLLs and dividers when unused */
if (clk->csrc != nv_clk_src_core) { if (clk->csrc != nv_clk_src_core) {
nv_wr32(clk, 0x4040, 0x00000000); nvkm_wr32(device, 0x4040, 0x00000000);
nv_mask(clk, 0x4028, 0x80000000, 0x00000000); nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
} }
if (clk->ssrc != nv_clk_src_shader) { if (clk->ssrc != nv_clk_src_shader) {
nv_wr32(clk, 0x4070, 0x00000000); nvkm_wr32(device, 0x4070, 0x00000000);
nv_mask(clk, 0x4020, 0x80000000, 0x00000000); nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
} }
out: out:
......
...@@ -48,7 +48,8 @@ nv40_domain[] = { ...@@ -48,7 +48,8 @@ nv40_domain[] = {
static u32 static u32
read_pll_1(struct nv40_clk *clk, u32 reg) read_pll_1(struct nv40_clk *clk, u32 reg)
{ {
u32 ctrl = nv_rd32(clk, reg + 0x00); struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, reg + 0x00);
int P = (ctrl & 0x00070000) >> 16; int P = (ctrl & 0x00070000) >> 16;
int N = (ctrl & 0x0000ff00) >> 8; int N = (ctrl & 0x0000ff00) >> 8;
int M = (ctrl & 0x000000ff) >> 0; int M = (ctrl & 0x000000ff) >> 0;
...@@ -63,8 +64,9 @@ read_pll_1(struct nv40_clk *clk, u32 reg) ...@@ -63,8 +64,9 @@ read_pll_1(struct nv40_clk *clk, u32 reg)
static u32 static u32
read_pll_2(struct nv40_clk *clk, u32 reg) read_pll_2(struct nv40_clk *clk, u32 reg)
{ {
u32 ctrl = nv_rd32(clk, reg + 0x00); struct nvkm_device *device = clk->base.subdev.device;
u32 coef = nv_rd32(clk, reg + 0x04); u32 ctrl = nvkm_rd32(device, reg + 0x00);
u32 coef = nvkm_rd32(device, reg + 0x04);
int N2 = (coef & 0xff000000) >> 24; int N2 = (coef & 0xff000000) >> 24;
int M2 = (coef & 0x00ff0000) >> 16; int M2 = (coef & 0x00ff0000) >> 16;
int N1 = (coef & 0x0000ff00) >> 8; int N1 = (coef & 0x0000ff00) >> 8;
...@@ -104,11 +106,12 @@ static int ...@@ -104,11 +106,12 @@ static int
nv40_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) nv40_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct nv40_clk *clk = container_of(obj, typeof(*clk), base); struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
u32 mast = nv_rd32(clk, 0x00c040); struct nvkm_device *device = clk->base.subdev.device;
u32 mast = nvkm_rd32(device, 0x00c040);
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
return nv_device(clk)->crystal; return device->crystal;
case nv_clk_src_href: case nv_clk_src_href:
return 100000; /*XXX: PCIE/AGP differ*/ return 100000; /*XXX: PCIE/AGP differ*/
case nv_clk_src_core: case nv_clk_src_core:
...@@ -191,12 +194,13 @@ static int ...@@ -191,12 +194,13 @@ static int
nv40_clk_prog(struct nvkm_clk *obj) nv40_clk_prog(struct nvkm_clk *obj)
{ {
struct nv40_clk *clk = container_of(obj, typeof(*clk), base); struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
nv_mask(clk, 0x00c040, 0x00000333, 0x00000000); struct nvkm_device *device = clk->base.subdev.device;
nv_wr32(clk, 0x004004, clk->npll_coef); nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
nv_mask(clk, 0x004000, 0xc0070100, clk->npll_ctrl); nvkm_wr32(device, 0x004004, clk->npll_coef);
nv_mask(clk, 0x004008, 0xc007ffff, clk->spll); nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
mdelay(5); mdelay(5);
nv_mask(clk, 0x00c040, 0x00000333, clk->ctrl); nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
return 0; return 0;
} }
......
...@@ -31,17 +31,18 @@ ...@@ -31,17 +31,18 @@
static u32 static u32
read_div(struct nv50_clk *clk) read_div(struct nv50_clk *clk)
{ {
switch (nv_device(clk)->chipset) { struct nvkm_device *device = clk->base.subdev.device;
switch (device->chipset) {
case 0x50: /* it exists, but only has bit 31, not the dividers.. */ case 0x50: /* it exists, but only has bit 31, not the dividers.. */
case 0x84: case 0x84:
case 0x86: case 0x86:
case 0x98: case 0x98:
case 0xa0: case 0xa0:
return nv_rd32(clk, 0x004700); return nvkm_rd32(device, 0x004700);
case 0x92: case 0x92:
case 0x94: case 0x94:
case 0x96: case 0x96:
return nv_rd32(clk, 0x004800); return nvkm_rd32(device, 0x004800);
default: default:
return 0x00000000; return 0x00000000;
} }
...@@ -50,11 +51,12 @@ read_div(struct nv50_clk *clk) ...@@ -50,11 +51,12 @@ read_div(struct nv50_clk *clk)
static u32 static u32
read_pll_src(struct nv50_clk *clk, u32 base) read_pll_src(struct nv50_clk *clk, u32 base)
{ {
struct nvkm_device *device = clk->base.subdev.device;
u32 coef, ref = clk->base.read(&clk->base, nv_clk_src_crystal); u32 coef, ref = clk->base.read(&clk->base, nv_clk_src_crystal);
u32 rsel = nv_rd32(clk, 0x00e18c); u32 rsel = nvkm_rd32(device, 0x00e18c);
int P, N, M, id; int P, N, M, id;
switch (nv_device(clk)->chipset) { switch (device->chipset) {
case 0x50: case 0x50:
case 0xa0: case 0xa0:
switch (base) { switch (base) {
...@@ -67,7 +69,7 @@ read_pll_src(struct nv50_clk *clk, u32 base) ...@@ -67,7 +69,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
return 0; return 0;
} }
coef = nv_rd32(clk, 0x00e81c + (id * 0x0c)); coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
ref *= (coef & 0x01000000) ? 2 : 4; ref *= (coef & 0x01000000) ? 2 : 4;
P = (coef & 0x00070000) >> 16; P = (coef & 0x00070000) >> 16;
N = ((coef & 0x0000ff00) >> 8) + 1; N = ((coef & 0x0000ff00) >> 8) + 1;
...@@ -76,7 +78,7 @@ read_pll_src(struct nv50_clk *clk, u32 base) ...@@ -76,7 +78,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
case 0x84: case 0x84:
case 0x86: case 0x86:
case 0x92: case 0x92:
coef = nv_rd32(clk, 0x00e81c); coef = nvkm_rd32(device, 0x00e81c);
P = (coef & 0x00070000) >> 16; P = (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8; N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0; M = (coef & 0x000000ff) >> 0;
...@@ -84,7 +86,7 @@ read_pll_src(struct nv50_clk *clk, u32 base) ...@@ -84,7 +86,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
case 0x94: case 0x94:
case 0x96: case 0x96:
case 0x98: case 0x98:
rsel = nv_rd32(clk, 0x00c050); rsel = nvkm_rd32(device, 0x00c050);
switch (base) { switch (base) {
case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
...@@ -102,8 +104,8 @@ read_pll_src(struct nv50_clk *clk, u32 base) ...@@ -102,8 +104,8 @@ read_pll_src(struct nv50_clk *clk, u32 base)
case 3: id = 0; break; case 3: id = 0; break;
} }
coef = nv_rd32(clk, 0x00e81c + (id * 0x28)); coef = nvkm_rd32(device, 0x00e81c + (id * 0x28));
P = (nv_rd32(clk, 0x00e824 + (id * 0x28)) >> 16) & 7; P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
P += (coef & 0x00070000) >> 16; P += (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8; N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0; M = (coef & 0x000000ff) >> 0;
...@@ -121,7 +123,8 @@ read_pll_src(struct nv50_clk *clk, u32 base) ...@@ -121,7 +123,8 @@ read_pll_src(struct nv50_clk *clk, u32 base)
static u32 static u32
read_pll_ref(struct nv50_clk *clk, u32 base) read_pll_ref(struct nv50_clk *clk, u32 base)
{ {
u32 src, mast = nv_rd32(clk, 0x00c040); struct nvkm_device *device = clk->base.subdev.device;
u32 src, mast = nvkm_rd32(device, 0x00c040);
switch (base) { switch (base) {
case 0x004028: case 0x004028:
...@@ -152,16 +155,17 @@ read_pll_ref(struct nv50_clk *clk, u32 base) ...@@ -152,16 +155,17 @@ read_pll_ref(struct nv50_clk *clk, u32 base)
static u32 static u32
read_pll(struct nv50_clk *clk, u32 base) read_pll(struct nv50_clk *clk, u32 base)
{ {
u32 mast = nv_rd32(clk, 0x00c040); struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nv_rd32(clk, base + 0); u32 mast = nvkm_rd32(device, 0x00c040);
u32 coef = nv_rd32(clk, base + 4); u32 ctrl = nvkm_rd32(device, base + 0);
u32 coef = nvkm_rd32(device, base + 4);
u32 ref = read_pll_ref(clk, base); u32 ref = read_pll_ref(clk, base);
u32 freq = 0; u32 freq = 0;
int N1, N2, M1, M2; int N1, N2, M1, M2;
if (base == 0x004028 && (mast & 0x00100000)) { if (base == 0x004028 && (mast & 0x00100000)) {
/* wtf, appears to only disable post-divider on gt200 */ /* wtf, appears to only disable post-divider on gt200 */
if (nv_device(clk)->chipset != 0xa0) if (device->chipset != 0xa0)
return clk->base.read(&clk->base, nv_clk_src_dom6); return clk->base.read(&clk->base, nv_clk_src_dom6);
} }
...@@ -186,12 +190,13 @@ static int ...@@ -186,12 +190,13 @@ static int
nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
{ {
struct nv50_clk *clk = container_of(obj, typeof(*clk), base); struct nv50_clk *clk = container_of(obj, typeof(*clk), base);
u32 mast = nv_rd32(clk, 0x00c040); struct nvkm_device *device = clk->base.subdev.device;
u32 mast = nvkm_rd32(device, 0x00c040);
u32 P = 0; u32 P = 0;
switch (src) { switch (src) {
case nv_clk_src_crystal: case nv_clk_src_crystal:
return nv_device(clk)->crystal; return device->crystal;
case nv_clk_src_href: case nv_clk_src_href:
return 100000; /* PCIE reference clock */ return 100000; /* PCIE reference clock */
case nv_clk_src_hclk: case nv_clk_src_hclk:
...@@ -210,7 +215,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -210,7 +215,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
break; break;
case nv_clk_src_core: case nv_clk_src_core:
if (!(mast & 0x00100000)) if (!(mast & 0x00100000))
P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16; P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) { switch (mast & 0x00000003) {
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P; case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000001: return clk->base.read(&clk->base, nv_clk_src_dom6); case 0x00000001: return clk->base.read(&clk->base, nv_clk_src_dom6);
...@@ -219,7 +224,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -219,7 +224,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
} }
break; break;
case nv_clk_src_shader: case nv_clk_src_shader:
P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16; P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) { switch (mast & 0x00000030) {
case 0x00000000: case 0x00000000:
if (mast & 0x00000080) if (mast & 0x00000080)
...@@ -231,8 +236,8 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -231,8 +236,8 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
} }
break; break;
case nv_clk_src_mem: case nv_clk_src_mem:
P = (nv_rd32(clk, 0x004008) & 0x00070000) >> 16; P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
if (nv_rd32(clk, 0x004008) & 0x00000200) { if (nvkm_rd32(device, 0x004008) & 0x00000200) {
switch (mast & 0x0000c000) { switch (mast & 0x0000c000) {
case 0x00000000: case 0x00000000:
return clk->base.read(&clk->base, nv_clk_src_crystal) >> P; return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
...@@ -246,7 +251,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -246,7 +251,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
break; break;
case nv_clk_src_vdec: case nv_clk_src_vdec:
P = (read_div(clk) & 0x00000700) >> 8; P = (read_div(clk) & 0x00000700) >> 8;
switch (nv_device(clk)->chipset) { switch (device->chipset) {
case 0x84: case 0x84:
case 0x86: case 0x86:
case 0x92: case 0x92:
...@@ -255,7 +260,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -255,7 +260,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
case 0xa0: case 0xa0:
switch (mast & 0x00000c00) { switch (mast & 0x00000c00) {
case 0x00000000: case 0x00000000:
if (nv_device(clk)->chipset == 0xa0) /* wtf?? */ if (device->chipset == 0xa0) /* wtf?? */
return clk->base.read(&clk->base, nv_clk_src_core) >> P; return clk->base.read(&clk->base, nv_clk_src_core) >> P;
return clk->base.read(&clk->base, nv_clk_src_crystal) >> P; return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000400: case 0x00000400:
...@@ -283,7 +288,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src) ...@@ -283,7 +288,7 @@ nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
} }
break; break;
case nv_clk_src_dom6: case nv_clk_src_dom6:
switch (nv_device(clk)->chipset) { switch (device->chipset) {
case 0x50: case 0x50:
case 0xa0: case 0xa0:
return read_pll(clk, 0x00e810) >> 2; return read_pll(clk, 0x00e810) >> 2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment