mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-19 19:14:01 +08:00
drm/nouveau/clk: cosmetic changes
This is purely preparation for upcoming commits, there should be no code changes here. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
01d6b95605
commit
3eca809b3c
@ -71,7 +71,7 @@ struct nvkm_domain {
|
||||
};
|
||||
|
||||
struct nvkm_clk {
|
||||
struct nvkm_subdev base;
|
||||
struct nvkm_subdev subdev;
|
||||
|
||||
struct nvkm_domain *domains;
|
||||
struct nvkm_pstate bstate;
|
||||
@ -117,16 +117,16 @@ nvkm_clk(void *obj)
|
||||
nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
|
||||
(void **)d)
|
||||
#define nvkm_clk_destroy(p) ({ \
|
||||
struct nvkm_clk *clk = (p); \
|
||||
_nvkm_clk_dtor(nv_object(clk)); \
|
||||
struct nvkm_clk *_clk = (p); \
|
||||
_nvkm_clk_dtor(nv_object(_clk)); \
|
||||
})
|
||||
#define nvkm_clk_init(p) ({ \
|
||||
struct nvkm_clk *clk = (p); \
|
||||
_nvkm_clk_init(nv_object(clk)); \
|
||||
struct nvkm_clk *_clk = (p); \
|
||||
_nvkm_clk_init(nv_object(_clk)); \
|
||||
})
|
||||
#define nvkm_clk_fini(p,s) ({ \
|
||||
struct nvkm_clk *clk = (p); \
|
||||
_nvkm_clk_fini(nv_object(clk), (s)); \
|
||||
struct nvkm_clk *_clk = (p); \
|
||||
_nvkm_clk_fini(nv_object(_clk), (s)); \
|
||||
})
|
||||
|
||||
int nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
|
||||
|
@ -121,7 +121,7 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
|
||||
nv_error(clk, "failed to lower fan speed: %d\n", ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -474,7 +474,7 @@ _nvkm_clk_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct nvkm_clk *clk = (void *)object;
|
||||
nvkm_notify_put(&clk->pwrsrc_ntfy);
|
||||
return nvkm_subdev_fini(&clk->base, suspend);
|
||||
return nvkm_subdev_fini(&clk->subdev, suspend);
|
||||
}
|
||||
|
||||
int
|
||||
@ -484,7 +484,7 @@ _nvkm_clk_init(struct nvkm_object *object)
|
||||
struct nvkm_domain *clock = clk->domains;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_subdev_init(&clk->base);
|
||||
ret = nvkm_subdev_init(&clk->subdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -524,7 +524,7 @@ _nvkm_clk_dtor(struct nvkm_object *object)
|
||||
nvkm_pstate_del(pstate);
|
||||
}
|
||||
|
||||
nvkm_subdev_destroy(&clk->base);
|
||||
nvkm_subdev_destroy(&clk->subdev);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -37,29 +37,27 @@ struct gf100_clk_info {
|
||||
u32 coef;
|
||||
};
|
||||
|
||||
struct gf100_clk_priv {
|
||||
struct gf100_clk {
|
||||
struct nvkm_clk base;
|
||||
struct gf100_clk_info eng[16];
|
||||
};
|
||||
|
||||
static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
|
||||
static u32 read_div(struct gf100_clk *, int, u32, u32);
|
||||
|
||||
static u32
|
||||
read_vco(struct gf100_clk_priv *priv, u32 dsrc)
|
||||
read_vco(struct gf100_clk *clk, u32 dsrc)
|
||||
{
|
||||
struct nvkm_clk *clk = &priv->base;
|
||||
u32 ssrc = nv_rd32(priv, dsrc);
|
||||
u32 ssrc = nv_rd32(clk, dsrc);
|
||||
if (!(ssrc & 0x00000100))
|
||||
return clk->read(clk, nv_clk_src_sppll0);
|
||||
return clk->read(clk, nv_clk_src_sppll1);
|
||||
return clk->base.read(&clk->base, nv_clk_src_sppll0);
|
||||
return clk->base.read(&clk->base, nv_clk_src_sppll1);
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll(struct gf100_clk_priv *priv, u32 pll)
|
||||
read_pll(struct gf100_clk *clk, u32 pll)
|
||||
{
|
||||
struct nvkm_clk *clk = &priv->base;
|
||||
u32 ctrl = nv_rd32(priv, pll + 0x00);
|
||||
u32 coef = nv_rd32(priv, pll + 0x04);
|
||||
u32 ctrl = nv_rd32(clk, pll + 0x00);
|
||||
u32 coef = nv_rd32(clk, pll + 0x04);
|
||||
u32 P = (coef & 0x003f0000) >> 16;
|
||||
u32 N = (coef & 0x0000ff00) >> 8;
|
||||
u32 M = (coef & 0x000000ff) >> 0;
|
||||
@ -71,20 +69,20 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
|
||||
switch (pll) {
|
||||
case 0x00e800:
|
||||
case 0x00e820:
|
||||
sclk = nv_device(priv)->crystal;
|
||||
sclk = nv_device(clk)->crystal;
|
||||
P = 1;
|
||||
break;
|
||||
case 0x132000:
|
||||
sclk = clk->read(clk, nv_clk_src_mpllsrc);
|
||||
sclk = clk->base.read(&clk->base, nv_clk_src_mpllsrc);
|
||||
break;
|
||||
case 0x132020:
|
||||
sclk = clk->read(clk, nv_clk_src_mpllsrcref);
|
||||
sclk = clk->base.read(&clk->base, nv_clk_src_mpllsrcref);
|
||||
break;
|
||||
case 0x137000:
|
||||
case 0x137020:
|
||||
case 0x137040:
|
||||
case 0x1370e0:
|
||||
sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
|
||||
sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
@ -94,46 +92,46 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
|
||||
}
|
||||
|
||||
static u32
|
||||
read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
|
||||
read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
|
||||
{
|
||||
u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
|
||||
u32 sctl = nv_rd32(priv, dctl + (doff * 4));
|
||||
u32 ssrc = nv_rd32(clk, dsrc + (doff * 4));
|
||||
u32 sctl = nv_rd32(clk, dctl + (doff * 4));
|
||||
|
||||
switch (ssrc & 0x00000003) {
|
||||
case 0:
|
||||
if ((ssrc & 0x00030000) != 0x00030000)
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
return 108000;
|
||||
case 2:
|
||||
return 100000;
|
||||
case 3:
|
||||
if (sctl & 0x80000000) {
|
||||
u32 sclk = read_vco(priv, dsrc + (doff * 4));
|
||||
u32 sclk = read_vco(clk, dsrc + (doff * 4));
|
||||
u32 sdiv = (sctl & 0x0000003f) + 2;
|
||||
return (sclk * 2) / sdiv;
|
||||
}
|
||||
|
||||
return read_vco(priv, dsrc + (doff * 4));
|
||||
return read_vco(clk, dsrc + (doff * 4));
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
read_clk(struct gf100_clk_priv *priv, int clk)
|
||||
read_clk(struct gf100_clk *clk, int idx)
|
||||
{
|
||||
u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
|
||||
u32 ssel = nv_rd32(priv, 0x137100);
|
||||
u32 sctl = nv_rd32(clk, 0x137250 + (idx * 4));
|
||||
u32 ssel = nv_rd32(clk, 0x137100);
|
||||
u32 sclk, sdiv;
|
||||
|
||||
if (ssel & (1 << clk)) {
|
||||
if (clk < 7)
|
||||
sclk = read_pll(priv, 0x137000 + (clk * 0x20));
|
||||
if (ssel & (1 << idx)) {
|
||||
if (idx < 7)
|
||||
sclk = read_pll(clk, 0x137000 + (idx * 0x20));
|
||||
else
|
||||
sclk = read_pll(priv, 0x1370e0);
|
||||
sclk = read_pll(clk, 0x1370e0);
|
||||
sdiv = ((sctl & 0x00003f00) >> 8) + 2;
|
||||
} else {
|
||||
sclk = read_div(priv, clk, 0x137160, 0x1371d0);
|
||||
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
|
||||
sdiv = ((sctl & 0x0000003f) >> 0) + 2;
|
||||
}
|
||||
|
||||
@ -144,10 +142,10 @@ read_clk(struct gf100_clk_priv *priv, int clk)
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
gf100_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct gf100_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct nvkm_device *device = nv_device(clk);
|
||||
struct gf100_clk_priv *priv = (void *)clk;
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
@ -155,39 +153,39 @@ gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
case nv_clk_src_href:
|
||||
return 100000;
|
||||
case nv_clk_src_sppll0:
|
||||
return read_pll(priv, 0x00e800);
|
||||
return read_pll(clk, 0x00e800);
|
||||
case nv_clk_src_sppll1:
|
||||
return read_pll(priv, 0x00e820);
|
||||
return read_pll(clk, 0x00e820);
|
||||
|
||||
case nv_clk_src_mpllsrcref:
|
||||
return read_div(priv, 0, 0x137320, 0x137330);
|
||||
return read_div(clk, 0, 0x137320, 0x137330);
|
||||
case nv_clk_src_mpllsrc:
|
||||
return read_pll(priv, 0x132020);
|
||||
return read_pll(clk, 0x132020);
|
||||
case nv_clk_src_mpll:
|
||||
return read_pll(priv, 0x132000);
|
||||
return read_pll(clk, 0x132000);
|
||||
case nv_clk_src_mdiv:
|
||||
return read_div(priv, 0, 0x137300, 0x137310);
|
||||
return read_div(clk, 0, 0x137300, 0x137310);
|
||||
case nv_clk_src_mem:
|
||||
if (nv_rd32(priv, 0x1373f0) & 0x00000002)
|
||||
return clk->read(clk, nv_clk_src_mpll);
|
||||
return clk->read(clk, nv_clk_src_mdiv);
|
||||
if (nv_rd32(clk, 0x1373f0) & 0x00000002)
|
||||
return clk->base.read(&clk->base, nv_clk_src_mpll);
|
||||
return clk->base.read(&clk->base, nv_clk_src_mdiv);
|
||||
|
||||
case nv_clk_src_gpc:
|
||||
return read_clk(priv, 0x00);
|
||||
return read_clk(clk, 0x00);
|
||||
case nv_clk_src_rop:
|
||||
return read_clk(priv, 0x01);
|
||||
return read_clk(clk, 0x01);
|
||||
case nv_clk_src_hubk07:
|
||||
return read_clk(priv, 0x02);
|
||||
return read_clk(clk, 0x02);
|
||||
case nv_clk_src_hubk06:
|
||||
return read_clk(priv, 0x07);
|
||||
return read_clk(clk, 0x07);
|
||||
case nv_clk_src_hubk01:
|
||||
return read_clk(priv, 0x08);
|
||||
return read_clk(clk, 0x08);
|
||||
case nv_clk_src_copy:
|
||||
return read_clk(priv, 0x09);
|
||||
return read_clk(clk, 0x09);
|
||||
case nv_clk_src_daemon:
|
||||
return read_clk(priv, 0x0c);
|
||||
return read_clk(clk, 0x0c);
|
||||
case nv_clk_src_vdec:
|
||||
return read_clk(priv, 0x0e);
|
||||
return read_clk(clk, 0x0e);
|
||||
default:
|
||||
nv_error(clk, "invalid clock source %d\n", src);
|
||||
return -EINVAL;
|
||||
@ -195,7 +193,7 @@ gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
|
||||
calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
|
||||
{
|
||||
u32 div = min((ref * 2) / freq, (u32)65);
|
||||
if (div < 2)
|
||||
@ -206,7 +204,7 @@ calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
|
||||
calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
|
||||
{
|
||||
u32 sclk;
|
||||
|
||||
@ -228,28 +226,28 @@ calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
|
||||
}
|
||||
|
||||
/* otherwise, calculate the closest divider */
|
||||
sclk = read_vco(priv, 0x137160 + (clk * 4));
|
||||
if (clk < 7)
|
||||
sclk = calc_div(priv, clk, sclk, freq, ddiv);
|
||||
sclk = read_vco(clk, 0x137160 + (idx * 4));
|
||||
if (idx < 7)
|
||||
sclk = calc_div(clk, idx, sclk, freq, ddiv);
|
||||
return sclk;
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
|
||||
calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(priv);
|
||||
struct nvkm_bios *bios = nvkm_bios(clk);
|
||||
struct nvbios_pll limits;
|
||||
int N, M, P, ret;
|
||||
|
||||
ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
|
||||
ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
|
||||
limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
|
||||
if (!limits.refclk)
|
||||
return 0;
|
||||
|
||||
ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
|
||||
ret = gt215_pll_calc(nv_subdev(clk), &limits, freq, &N, NULL, &M, &P);
|
||||
if (ret <= 0)
|
||||
return 0;
|
||||
|
||||
@ -258,10 +256,9 @@ calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
|
||||
}
|
||||
|
||||
static int
|
||||
calc_clk(struct gf100_clk_priv *priv,
|
||||
struct nvkm_cstate *cstate, int clk, int dom)
|
||||
calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
|
||||
{
|
||||
struct gf100_clk_info *info = &priv->eng[clk];
|
||||
struct gf100_clk_info *info = &clk->eng[idx];
|
||||
u32 freq = cstate->domain[dom];
|
||||
u32 src0, div0, div1D, div1P = 0;
|
||||
u32 clk0, clk1 = 0;
|
||||
@ -271,16 +268,16 @@ calc_clk(struct gf100_clk_priv *priv,
|
||||
return 0;
|
||||
|
||||
/* first possible path, using only dividers */
|
||||
clk0 = calc_src(priv, clk, freq, &src0, &div0);
|
||||
clk0 = calc_div(priv, clk, clk0, freq, &div1D);
|
||||
clk0 = calc_src(clk, idx, freq, &src0, &div0);
|
||||
clk0 = calc_div(clk, idx, clk0, freq, &div1D);
|
||||
|
||||
/* see if we can get any closer using PLLs */
|
||||
if (clk0 != freq && (0x00004387 & (1 << clk))) {
|
||||
if (clk <= 7)
|
||||
clk1 = calc_pll(priv, clk, freq, &info->coef);
|
||||
if (clk0 != freq && (0x00004387 & (1 << idx))) {
|
||||
if (idx <= 7)
|
||||
clk1 = calc_pll(clk, idx, freq, &info->coef);
|
||||
else
|
||||
clk1 = cstate->domain[nv_clk_src_hubk06];
|
||||
clk1 = calc_div(priv, clk, clk1, freq, &div1P);
|
||||
clk1 = calc_div(clk, idx, clk1, freq, &div1P);
|
||||
}
|
||||
|
||||
/* select the method which gets closest to target freq */
|
||||
@ -302,7 +299,7 @@ calc_clk(struct gf100_clk_priv *priv,
|
||||
info->mdiv |= 0x80000000;
|
||||
info->mdiv |= div1P << 8;
|
||||
}
|
||||
info->ssel = (1 << clk);
|
||||
info->ssel = (1 << idx);
|
||||
info->freq = clk1;
|
||||
}
|
||||
|
||||
@ -310,81 +307,81 @@ calc_clk(struct gf100_clk_priv *priv,
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
gf100_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct gf100_clk_priv *priv = (void *)clk;
|
||||
struct gf100_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
int ret;
|
||||
|
||||
if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
|
||||
if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
|
||||
gf100_clk_prog_0(struct gf100_clk *clk, int idx)
|
||||
{
|
||||
struct gf100_clk_info *info = &priv->eng[clk];
|
||||
if (clk < 7 && !info->ssel) {
|
||||
nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
|
||||
nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
|
||||
struct gf100_clk_info *info = &clk->eng[idx];
|
||||
if (idx < 7 && !info->ssel) {
|
||||
nv_mask(clk, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
|
||||
nv_wr32(clk, 0x137160 + (idx * 0x04), info->dsrc);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
|
||||
gf100_clk_prog_1(struct gf100_clk *clk, int idx)
|
||||
{
|
||||
nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
|
||||
nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
|
||||
nv_mask(clk, 0x137100, (1 << idx), 0x00000000);
|
||||
nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
|
||||
gf100_clk_prog_2(struct gf100_clk *clk, int idx)
|
||||
{
|
||||
struct gf100_clk_info *info = &priv->eng[clk];
|
||||
const u32 addr = 0x137000 + (clk * 0x20);
|
||||
if (clk <= 7) {
|
||||
nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
|
||||
nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
|
||||
struct gf100_clk_info *info = &clk->eng[idx];
|
||||
const u32 addr = 0x137000 + (idx * 0x20);
|
||||
if (idx <= 7) {
|
||||
nv_mask(clk, addr + 0x00, 0x00000004, 0x00000000);
|
||||
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000000);
|
||||
if (info->coef) {
|
||||
nv_wr32(priv, addr + 0x04, info->coef);
|
||||
nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
|
||||
nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
|
||||
nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
|
||||
nv_wr32(clk, addr + 0x04, info->coef);
|
||||
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000001);
|
||||
nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
|
||||
nv_mask(clk, addr + 0x00, 0x00020004, 0x00000004);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
|
||||
gf100_clk_prog_3(struct gf100_clk *clk, int idx)
|
||||
{
|
||||
struct gf100_clk_info *info = &priv->eng[clk];
|
||||
struct gf100_clk_info *info = &clk->eng[idx];
|
||||
if (info->ssel) {
|
||||
nv_mask(priv, 0x137100, (1 << clk), info->ssel);
|
||||
nv_wait(priv, 0x137100, (1 << clk), info->ssel);
|
||||
nv_mask(clk, 0x137100, (1 << idx), info->ssel);
|
||||
nv_wait(clk, 0x137100, (1 << idx), info->ssel);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
|
||||
gf100_clk_prog_4(struct gf100_clk *clk, int idx)
|
||||
{
|
||||
struct gf100_clk_info *info = &priv->eng[clk];
|
||||
nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
|
||||
struct gf100_clk_info *info = &clk->eng[idx];
|
||||
nv_mask(clk, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_clk_prog(struct nvkm_clk *clk)
|
||||
gf100_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct gf100_clk_priv *priv = (void *)clk;
|
||||
struct gf100_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct {
|
||||
void (*exec)(struct gf100_clk_priv *, int);
|
||||
void (*exec)(struct gf100_clk *, int);
|
||||
} stage[] = {
|
||||
{ gf100_clk_prog_0 }, /* div programming */
|
||||
{ gf100_clk_prog_1 }, /* select div mode */
|
||||
@ -395,10 +392,10 @@ gf100_clk_prog(struct nvkm_clk *clk)
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stage); i++) {
|
||||
for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
|
||||
if (!priv->eng[j].freq)
|
||||
for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
|
||||
if (!clk->eng[j].freq)
|
||||
continue;
|
||||
stage[i].exec(priv, j);
|
||||
stage[i].exec(clk, j);
|
||||
}
|
||||
}
|
||||
|
||||
@ -406,10 +403,10 @@ gf100_clk_prog(struct nvkm_clk *clk)
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_clk_tidy(struct nvkm_clk *clk)
|
||||
gf100_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
struct gf100_clk_priv *priv = (void *)clk;
|
||||
memset(priv->eng, 0x00, sizeof(priv->eng));
|
||||
struct gf100_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
memset(clk->eng, 0x00, sizeof(clk->eng));
|
||||
}
|
||||
|
||||
static struct nvkm_domain
|
||||
@ -433,19 +430,19 @@ gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct gf100_clk_priv *priv;
|
||||
struct gf100_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
|
||||
NULL, 0, false, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
NULL, 0, false, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.read = gf100_clk_read;
|
||||
priv->base.calc = gf100_clk_calc;
|
||||
priv->base.prog = gf100_clk_prog;
|
||||
priv->base.tidy = gf100_clk_tidy;
|
||||
clk->base.read = gf100_clk_read;
|
||||
clk->base.calc = gf100_clk_calc;
|
||||
clk->base.prog = gf100_clk_prog;
|
||||
clk->base.tidy = gf100_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -37,28 +37,28 @@ struct gk104_clk_info {
|
||||
u32 coef;
|
||||
};
|
||||
|
||||
struct gk104_clk_priv {
|
||||
struct gk104_clk {
|
||||
struct nvkm_clk base;
|
||||
struct gk104_clk_info eng[16];
|
||||
};
|
||||
|
||||
static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
|
||||
static u32 read_pll(struct gk104_clk_priv *, u32);
|
||||
static u32 read_div(struct gk104_clk *, int, u32, u32);
|
||||
static u32 read_pll(struct gk104_clk *, u32);
|
||||
|
||||
static u32
|
||||
read_vco(struct gk104_clk_priv *priv, u32 dsrc)
|
||||
read_vco(struct gk104_clk *clk, u32 dsrc)
|
||||
{
|
||||
u32 ssrc = nv_rd32(priv, dsrc);
|
||||
u32 ssrc = nv_rd32(clk, dsrc);
|
||||
if (!(ssrc & 0x00000100))
|
||||
return read_pll(priv, 0x00e800);
|
||||
return read_pll(priv, 0x00e820);
|
||||
return read_pll(clk, 0x00e800);
|
||||
return read_pll(clk, 0x00e820);
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll(struct gk104_clk_priv *priv, u32 pll)
|
||||
read_pll(struct gk104_clk *clk, u32 pll)
|
||||
{
|
||||
u32 ctrl = nv_rd32(priv, pll + 0x00);
|
||||
u32 coef = nv_rd32(priv, pll + 0x04);
|
||||
u32 ctrl = nv_rd32(clk, pll + 0x00);
|
||||
u32 coef = nv_rd32(clk, pll + 0x04);
|
||||
u32 P = (coef & 0x003f0000) >> 16;
|
||||
u32 N = (coef & 0x0000ff00) >> 8;
|
||||
u32 M = (coef & 0x000000ff) >> 0;
|
||||
@ -71,22 +71,22 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
|
||||
switch (pll) {
|
||||
case 0x00e800:
|
||||
case 0x00e820:
|
||||
sclk = nv_device(priv)->crystal;
|
||||
sclk = nv_device(clk)->crystal;
|
||||
P = 1;
|
||||
break;
|
||||
case 0x132000:
|
||||
sclk = read_pll(priv, 0x132020);
|
||||
sclk = read_pll(clk, 0x132020);
|
||||
P = (coef & 0x10000000) ? 2 : 1;
|
||||
break;
|
||||
case 0x132020:
|
||||
sclk = read_div(priv, 0, 0x137320, 0x137330);
|
||||
fN = nv_rd32(priv, pll + 0x10) >> 16;
|
||||
sclk = read_div(clk, 0, 0x137320, 0x137330);
|
||||
fN = nv_rd32(clk, pll + 0x10) >> 16;
|
||||
break;
|
||||
case 0x137000:
|
||||
case 0x137020:
|
||||
case 0x137040:
|
||||
case 0x1370e0:
|
||||
sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
|
||||
sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
@ -100,70 +100,70 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
|
||||
}
|
||||
|
||||
static u32
|
||||
read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
|
||||
read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
|
||||
{
|
||||
u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
|
||||
u32 sctl = nv_rd32(priv, dctl + (doff * 4));
|
||||
u32 ssrc = nv_rd32(clk, dsrc + (doff * 4));
|
||||
u32 sctl = nv_rd32(clk, dctl + (doff * 4));
|
||||
|
||||
switch (ssrc & 0x00000003) {
|
||||
case 0:
|
||||
if ((ssrc & 0x00030000) != 0x00030000)
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
return 108000;
|
||||
case 2:
|
||||
return 100000;
|
||||
case 3:
|
||||
if (sctl & 0x80000000) {
|
||||
u32 sclk = read_vco(priv, dsrc + (doff * 4));
|
||||
u32 sclk = read_vco(clk, dsrc + (doff * 4));
|
||||
u32 sdiv = (sctl & 0x0000003f) + 2;
|
||||
return (sclk * 2) / sdiv;
|
||||
}
|
||||
|
||||
return read_vco(priv, dsrc + (doff * 4));
|
||||
return read_vco(clk, dsrc + (doff * 4));
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
read_mem(struct gk104_clk_priv *priv)
|
||||
read_mem(struct gk104_clk *clk)
|
||||
{
|
||||
switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
|
||||
case 1: return read_pll(priv, 0x132020);
|
||||
case 2: return read_pll(priv, 0x132000);
|
||||
switch (nv_rd32(clk, 0x1373f4) & 0x0000000f) {
|
||||
case 1: return read_pll(clk, 0x132020);
|
||||
case 2: return read_pll(clk, 0x132000);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
read_clk(struct gk104_clk_priv *priv, int clk)
|
||||
read_clk(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
|
||||
u32 sctl = nv_rd32(clk, 0x137250 + (idx * 4));
|
||||
u32 sclk, sdiv;
|
||||
|
||||
if (clk < 7) {
|
||||
u32 ssel = nv_rd32(priv, 0x137100);
|
||||
if (ssel & (1 << clk)) {
|
||||
sclk = read_pll(priv, 0x137000 + (clk * 0x20));
|
||||
if (idx < 7) {
|
||||
u32 ssel = nv_rd32(clk, 0x137100);
|
||||
if (ssel & (1 << idx)) {
|
||||
sclk = read_pll(clk, 0x137000 + (idx * 0x20));
|
||||
sdiv = 1;
|
||||
} else {
|
||||
sclk = read_div(priv, clk, 0x137160, 0x1371d0);
|
||||
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
|
||||
sdiv = 0;
|
||||
}
|
||||
} else {
|
||||
u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
|
||||
u32 ssrc = nv_rd32(clk, 0x137160 + (idx * 0x04));
|
||||
if ((ssrc & 0x00000003) == 0x00000003) {
|
||||
sclk = read_div(priv, clk, 0x137160, 0x1371d0);
|
||||
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
|
||||
if (ssrc & 0x00000100) {
|
||||
if (ssrc & 0x40000000)
|
||||
sclk = read_pll(priv, 0x1370e0);
|
||||
sclk = read_pll(clk, 0x1370e0);
|
||||
sdiv = 1;
|
||||
} else {
|
||||
sdiv = 0;
|
||||
}
|
||||
} else {
|
||||
sclk = read_div(priv, clk, 0x137160, 0x1371d0);
|
||||
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
|
||||
sdiv = 0;
|
||||
}
|
||||
}
|
||||
@ -180,10 +180,10 @@ read_clk(struct gk104_clk_priv *priv, int clk)
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
gk104_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct gk104_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct nvkm_device *device = nv_device(clk);
|
||||
struct gk104_clk_priv *priv = (void *)clk;
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
@ -191,21 +191,21 @@ gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
case nv_clk_src_href:
|
||||
return 100000;
|
||||
case nv_clk_src_mem:
|
||||
return read_mem(priv);
|
||||
return read_mem(clk);
|
||||
case nv_clk_src_gpc:
|
||||
return read_clk(priv, 0x00);
|
||||
return read_clk(clk, 0x00);
|
||||
case nv_clk_src_rop:
|
||||
return read_clk(priv, 0x01);
|
||||
return read_clk(clk, 0x01);
|
||||
case nv_clk_src_hubk07:
|
||||
return read_clk(priv, 0x02);
|
||||
return read_clk(clk, 0x02);
|
||||
case nv_clk_src_hubk06:
|
||||
return read_clk(priv, 0x07);
|
||||
return read_clk(clk, 0x07);
|
||||
case nv_clk_src_hubk01:
|
||||
return read_clk(priv, 0x08);
|
||||
return read_clk(clk, 0x08);
|
||||
case nv_clk_src_daemon:
|
||||
return read_clk(priv, 0x0c);
|
||||
return read_clk(clk, 0x0c);
|
||||
case nv_clk_src_vdec:
|
||||
return read_clk(priv, 0x0e);
|
||||
return read_clk(clk, 0x0e);
|
||||
default:
|
||||
nv_error(clk, "invalid clock source %d\n", src);
|
||||
return -EINVAL;
|
||||
@ -213,7 +213,7 @@ gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
|
||||
calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
|
||||
{
|
||||
u32 div = min((ref * 2) / freq, (u32)65);
|
||||
if (div < 2)
|
||||
@ -224,7 +224,7 @@ calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
|
||||
calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
|
||||
{
|
||||
u32 sclk;
|
||||
|
||||
@ -246,28 +246,28 @@ calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
|
||||
}
|
||||
|
||||
/* otherwise, calculate the closest divider */
|
||||
sclk = read_vco(priv, 0x137160 + (clk * 4));
|
||||
if (clk < 7)
|
||||
sclk = calc_div(priv, clk, sclk, freq, ddiv);
|
||||
sclk = read_vco(clk, 0x137160 + (idx * 4));
|
||||
if (idx < 7)
|
||||
sclk = calc_div(clk, idx, sclk, freq, ddiv);
|
||||
return sclk;
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
|
||||
calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(priv);
|
||||
struct nvkm_bios *bios = nvkm_bios(clk);
|
||||
struct nvbios_pll limits;
|
||||
int N, M, P, ret;
|
||||
|
||||
ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
|
||||
ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
|
||||
limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
|
||||
if (!limits.refclk)
|
||||
return 0;
|
||||
|
||||
ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
|
||||
ret = gt215_pll_calc(nv_subdev(clk), &limits, freq, &N, NULL, &M, &P);
|
||||
if (ret <= 0)
|
||||
return 0;
|
||||
|
||||
@ -276,10 +276,10 @@ calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
|
||||
}
|
||||
|
||||
static int
|
||||
calc_clk(struct gk104_clk_priv *priv,
|
||||
struct nvkm_cstate *cstate, int clk, int dom)
|
||||
calc_clk(struct gk104_clk *clk,
|
||||
struct nvkm_cstate *cstate, int idx, int dom)
|
||||
{
|
||||
struct gk104_clk_info *info = &priv->eng[clk];
|
||||
struct gk104_clk_info *info = &clk->eng[idx];
|
||||
u32 freq = cstate->domain[dom];
|
||||
u32 src0, div0, div1D, div1P = 0;
|
||||
u32 clk0, clk1 = 0;
|
||||
@ -289,16 +289,16 @@ calc_clk(struct gk104_clk_priv *priv,
|
||||
return 0;
|
||||
|
||||
/* first possible path, using only dividers */
|
||||
clk0 = calc_src(priv, clk, freq, &src0, &div0);
|
||||
clk0 = calc_div(priv, clk, clk0, freq, &div1D);
|
||||
clk0 = calc_src(clk, idx, freq, &src0, &div0);
|
||||
clk0 = calc_div(clk, idx, clk0, freq, &div1D);
|
||||
|
||||
/* see if we can get any closer using PLLs */
|
||||
if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
|
||||
if (clk <= 7)
|
||||
clk1 = calc_pll(priv, clk, freq, &info->coef);
|
||||
if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
|
||||
if (idx <= 7)
|
||||
clk1 = calc_pll(clk, idx, freq, &info->coef);
|
||||
else
|
||||
clk1 = cstate->domain[nv_clk_src_hubk06];
|
||||
clk1 = calc_div(priv, clk, clk1, freq, &div1P);
|
||||
clk1 = calc_div(clk, idx, clk1, freq, &div1P);
|
||||
}
|
||||
|
||||
/* select the method which gets closest to target freq */
|
||||
@ -319,7 +319,7 @@ calc_clk(struct gk104_clk_priv *priv,
|
||||
info->mdiv |= 0x80000000;
|
||||
info->mdiv |= div1P << 8;
|
||||
}
|
||||
info->ssel = (1 << clk);
|
||||
info->ssel = (1 << idx);
|
||||
info->dsrc = 0x40000100;
|
||||
info->freq = clk1;
|
||||
}
|
||||
@ -328,98 +328,98 @@ calc_clk(struct gk104_clk_priv *priv,
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
gk104_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct gk104_clk_priv *priv = (void *)clk;
|
||||
struct gk104_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
int ret;
|
||||
|
||||
if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
|
||||
if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_0(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
struct gk104_clk_info *info = &priv->eng[clk];
|
||||
struct gk104_clk_info *info = &clk->eng[idx];
|
||||
if (!info->ssel) {
|
||||
nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
|
||||
nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
|
||||
nv_mask(clk, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
|
||||
nv_wr32(clk, 0x137160 + (idx * 0x04), info->dsrc);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
|
||||
nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
|
||||
nv_mask(clk, 0x137100, (1 << idx), 0x00000000);
|
||||
nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
|
||||
nv_mask(clk, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_2(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
struct gk104_clk_info *info = &priv->eng[clk];
|
||||
const u32 addr = 0x137000 + (clk * 0x20);
|
||||
nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
|
||||
nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
|
||||
struct gk104_clk_info *info = &clk->eng[idx];
|
||||
const u32 addr = 0x137000 + (idx * 0x20);
|
||||
nv_mask(clk, addr + 0x00, 0x00000004, 0x00000000);
|
||||
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000000);
|
||||
if (info->coef) {
|
||||
nv_wr32(priv, addr + 0x04, info->coef);
|
||||
nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
|
||||
nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
|
||||
nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
|
||||
nv_wr32(clk, addr + 0x04, info->coef);
|
||||
nv_mask(clk, addr + 0x00, 0x00000001, 0x00000001);
|
||||
nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
|
||||
nv_mask(clk, addr + 0x00, 0x00020004, 0x00000004);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_3(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
struct gk104_clk_info *info = &priv->eng[clk];
|
||||
struct gk104_clk_info *info = &clk->eng[idx];
|
||||
if (info->ssel)
|
||||
nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
|
||||
nv_mask(clk, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
|
||||
else
|
||||
nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
|
||||
nv_mask(clk, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
struct gk104_clk_info *info = &priv->eng[clk];
|
||||
struct gk104_clk_info *info = &clk->eng[idx];
|
||||
if (info->ssel) {
|
||||
nv_mask(priv, 0x137100, (1 << clk), info->ssel);
|
||||
nv_wait(priv, 0x137100, (1 << clk), info->ssel);
|
||||
nv_mask(clk, 0x137100, (1 << idx), info->ssel);
|
||||
nv_wait(clk, 0x137100, (1 << idx), info->ssel);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
|
||||
gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
|
||||
{
|
||||
struct gk104_clk_info *info = &priv->eng[clk];
|
||||
struct gk104_clk_info *info = &clk->eng[idx];
|
||||
if (info->ssel) {
|
||||
nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
|
||||
nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
|
||||
nv_mask(clk, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
|
||||
nv_mask(clk, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_clk_prog(struct nvkm_clk *clk)
|
||||
gk104_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct gk104_clk_priv *priv = (void *)clk;
|
||||
struct gk104_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct {
|
||||
u32 mask;
|
||||
void (*exec)(struct gk104_clk_priv *, int);
|
||||
void (*exec)(struct gk104_clk *, int);
|
||||
} stage[] = {
|
||||
{ 0x007f, gk104_clk_prog_0 }, /* div programming */
|
||||
{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
|
||||
@ -432,12 +432,12 @@ gk104_clk_prog(struct nvkm_clk *clk)
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stage); i++) {
|
||||
for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
|
||||
for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
|
||||
if (!(stage[i].mask & (1 << j)))
|
||||
continue;
|
||||
if (!priv->eng[j].freq)
|
||||
if (!clk->eng[j].freq)
|
||||
continue;
|
||||
stage[i].exec(priv, j);
|
||||
stage[i].exec(clk, j);
|
||||
}
|
||||
}
|
||||
|
||||
@ -445,10 +445,10 @@ gk104_clk_prog(struct nvkm_clk *clk)
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_clk_tidy(struct nvkm_clk *clk)
|
||||
gk104_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
struct gk104_clk_priv *priv = (void *)clk;
|
||||
memset(priv->eng, 0x00, sizeof(priv->eng));
|
||||
struct gk104_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
memset(clk->eng, 0x00, sizeof(clk->eng));
|
||||
}
|
||||
|
||||
static struct nvkm_domain
|
||||
@ -471,19 +471,19 @@ gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct gk104_clk_priv *priv;
|
||||
struct gk104_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
|
||||
NULL, 0, true, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
NULL, 0, true, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.read = gk104_clk_read;
|
||||
priv->base.calc = gk104_clk_calc;
|
||||
priv->base.prog = gk104_clk_prog;
|
||||
priv->base.tidy = gk104_clk_tidy;
|
||||
clk->base.read = gk104_clk_read;
|
||||
clk->base.calc = gk104_clk_calc;
|
||||
clk->base.prog = gk104_clk_prog;
|
||||
clk->base.tidy = gk104_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -115,40 +115,40 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
|
||||
.min_pl = 1, .max_pl = 32,
|
||||
};
|
||||
|
||||
struct gk20a_clk_priv {
|
||||
struct gk20a_clk {
|
||||
struct nvkm_clk base;
|
||||
const struct gk20a_clk_pllg_params *params;
|
||||
u32 m, n, pl;
|
||||
u32 parent_rate;
|
||||
};
|
||||
#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
|
||||
#define to_gk20a_clk(base) container_of(base, struct gk20a_clk, base)
|
||||
|
||||
static void
|
||||
gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
|
||||
gk20a_pllg_read_mnp(struct gk20a_clk *clk)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = nv_rd32(priv, GPCPLL_COEFF);
|
||||
priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
|
||||
priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
|
||||
priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
|
||||
val = nv_rd32(clk, GPCPLL_COEFF);
|
||||
clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
|
||||
clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
|
||||
clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
|
||||
}
|
||||
|
||||
static u32
|
||||
gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
|
||||
gk20a_pllg_calc_rate(struct gk20a_clk *clk)
|
||||
{
|
||||
u32 rate;
|
||||
u32 divider;
|
||||
|
||||
rate = priv->parent_rate * priv->n;
|
||||
divider = priv->m * pl_to_div[priv->pl];
|
||||
rate = clk->parent_rate * clk->n;
|
||||
divider = clk->m * pl_to_div[clk->pl];
|
||||
do_div(rate, divider);
|
||||
|
||||
return rate / 2;
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
|
||||
gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
|
||||
{
|
||||
u32 target_clk_f, ref_clk_f, target_freq;
|
||||
u32 min_vco_f, max_vco_f;
|
||||
@ -161,13 +161,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
|
||||
u32 pl;
|
||||
|
||||
target_clk_f = rate * 2 / MHZ;
|
||||
ref_clk_f = priv->parent_rate / MHZ;
|
||||
ref_clk_f = clk->parent_rate / MHZ;
|
||||
|
||||
max_vco_f = priv->params->max_vco;
|
||||
min_vco_f = priv->params->min_vco;
|
||||
best_m = priv->params->max_m;
|
||||
best_n = priv->params->min_n;
|
||||
best_pl = priv->params->min_pl;
|
||||
max_vco_f = clk->params->max_vco;
|
||||
min_vco_f = clk->params->min_vco;
|
||||
best_m = clk->params->max_m;
|
||||
best_n = clk->params->min_n;
|
||||
best_pl = clk->params->min_pl;
|
||||
|
||||
target_vco_f = target_clk_f + target_clk_f / 50;
|
||||
if (max_vco_f < target_vco_f)
|
||||
@ -175,13 +175,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
|
||||
|
||||
/* min_pl <= high_pl <= max_pl */
|
||||
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
|
||||
high_pl = min(high_pl, priv->params->max_pl);
|
||||
high_pl = max(high_pl, priv->params->min_pl);
|
||||
high_pl = min(high_pl, clk->params->max_pl);
|
||||
high_pl = max(high_pl, clk->params->min_pl);
|
||||
|
||||
/* min_pl <= low_pl <= max_pl */
|
||||
low_pl = min_vco_f / target_vco_f;
|
||||
low_pl = min(low_pl, priv->params->max_pl);
|
||||
low_pl = max(low_pl, priv->params->min_pl);
|
||||
low_pl = min(low_pl, clk->params->max_pl);
|
||||
low_pl = max(low_pl, clk->params->min_pl);
|
||||
|
||||
/* Find Indices of high_pl and low_pl */
|
||||
for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
|
||||
@ -197,30 +197,30 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
|
||||
}
|
||||
}
|
||||
|
||||
nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
|
||||
nv_debug(clk, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
|
||||
pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
|
||||
|
||||
/* Select lowest possible VCO */
|
||||
for (pl = low_pl; pl <= high_pl; pl++) {
|
||||
target_vco_f = target_clk_f * pl_to_div[pl];
|
||||
for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
|
||||
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
|
||||
u_f = ref_clk_f / m;
|
||||
|
||||
if (u_f < priv->params->min_u)
|
||||
if (u_f < clk->params->min_u)
|
||||
break;
|
||||
if (u_f > priv->params->max_u)
|
||||
if (u_f > clk->params->max_u)
|
||||
continue;
|
||||
|
||||
n = (target_vco_f * m) / ref_clk_f;
|
||||
n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
|
||||
|
||||
if (n > priv->params->max_n)
|
||||
if (n > clk->params->max_n)
|
||||
break;
|
||||
|
||||
for (; n <= n2; n++) {
|
||||
if (n < priv->params->min_n)
|
||||
if (n < clk->params->min_n)
|
||||
continue;
|
||||
if (n > priv->params->max_n)
|
||||
if (n > clk->params->max_n)
|
||||
break;
|
||||
|
||||
vco_f = ref_clk_f * n / m;
|
||||
@ -248,71 +248,71 @@ found_match:
|
||||
WARN_ON(best_delta == ~0);
|
||||
|
||||
if (best_delta != 0)
|
||||
nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
|
||||
nv_debug(clk, "no best match for target @ %dMHz on gpc_pll",
|
||||
target_clk_f);
|
||||
|
||||
priv->m = best_m;
|
||||
priv->n = best_n;
|
||||
priv->pl = best_pl;
|
||||
clk->m = best_m;
|
||||
clk->n = best_n;
|
||||
clk->pl = best_pl;
|
||||
|
||||
target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
|
||||
target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
|
||||
|
||||
nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
|
||||
target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
|
||||
nv_debug(clk, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
|
||||
target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
|
||||
gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
|
||||
{
|
||||
u32 val;
|
||||
int ramp_timeout;
|
||||
|
||||
/* get old coefficients */
|
||||
val = nv_rd32(priv, GPCPLL_COEFF);
|
||||
val = nv_rd32(clk, GPCPLL_COEFF);
|
||||
/* do nothing if NDIV is the same */
|
||||
if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
|
||||
return 0;
|
||||
|
||||
/* setup */
|
||||
nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
|
||||
nv_mask(clk, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
|
||||
0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
|
||||
nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
|
||||
nv_mask(clk, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
|
||||
0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
|
||||
|
||||
/* pll slowdown mode */
|
||||
nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
|
||||
nv_mask(clk, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
|
||||
|
||||
/* new ndiv ready for ramp */
|
||||
val = nv_rd32(priv, GPCPLL_COEFF);
|
||||
val = nv_rd32(clk, GPCPLL_COEFF);
|
||||
val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
|
||||
val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
|
||||
udelay(1);
|
||||
nv_wr32(priv, GPCPLL_COEFF, val);
|
||||
nv_wr32(clk, GPCPLL_COEFF, val);
|
||||
|
||||
/* dynamic ramp to new ndiv */
|
||||
val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
|
||||
val = nv_rd32(clk, GPCPLL_NDIV_SLOWDOWN);
|
||||
val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
|
||||
udelay(1);
|
||||
nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
|
||||
nv_wr32(clk, GPCPLL_NDIV_SLOWDOWN, val);
|
||||
|
||||
for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
|
||||
udelay(1);
|
||||
val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
|
||||
val = nv_rd32(clk, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
|
||||
if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
|
||||
break;
|
||||
}
|
||||
|
||||
/* exit slowdown mode */
|
||||
nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
|
||||
nv_mask(clk, GPCPLL_NDIV_SLOWDOWN,
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
|
||||
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
|
||||
nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
|
||||
nv_rd32(clk, GPCPLL_NDIV_SLOWDOWN);
|
||||
|
||||
if (ramp_timeout <= 0) {
|
||||
nv_error(priv, "gpcpll dynamic ramp timeout\n");
|
||||
nv_error(clk, "gpcpll dynamic ramp timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -320,138 +320,138 @@ gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
|
||||
}
|
||||
|
||||
static void
|
||||
_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
|
||||
_gk20a_pllg_enable(struct gk20a_clk *clk)
|
||||
{
|
||||
nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
|
||||
nv_rd32(priv, GPCPLL_CFG);
|
||||
nv_mask(clk, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
|
||||
nv_rd32(clk, GPCPLL_CFG);
|
||||
}
|
||||
|
||||
static void
|
||||
_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
|
||||
_gk20a_pllg_disable(struct gk20a_clk *clk)
|
||||
{
|
||||
nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
|
||||
nv_rd32(priv, GPCPLL_CFG);
|
||||
nv_mask(clk, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
|
||||
nv_rd32(clk, GPCPLL_CFG);
|
||||
}
|
||||
|
||||
static int
|
||||
_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
|
||||
_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
|
||||
{
|
||||
u32 val, cfg;
|
||||
u32 m_old, pl_old, n_lo;
|
||||
|
||||
/* get old coefficients */
|
||||
val = nv_rd32(priv, GPCPLL_COEFF);
|
||||
val = nv_rd32(clk, GPCPLL_COEFF);
|
||||
m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
|
||||
pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
|
||||
|
||||
/* do NDIV slide if there is no change in M and PL */
|
||||
cfg = nv_rd32(priv, GPCPLL_CFG);
|
||||
if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
|
||||
cfg = nv_rd32(clk, GPCPLL_CFG);
|
||||
if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
|
||||
(cfg & GPCPLL_CFG_ENABLE)) {
|
||||
return gk20a_pllg_slide(priv, priv->n);
|
||||
return gk20a_pllg_slide(clk, clk->n);
|
||||
}
|
||||
|
||||
/* slide down to NDIV_LO */
|
||||
n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
|
||||
priv->parent_rate / MHZ);
|
||||
n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
|
||||
clk->parent_rate / MHZ);
|
||||
if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
|
||||
int ret = gk20a_pllg_slide(priv, n_lo);
|
||||
int ret = gk20a_pllg_slide(clk, n_lo);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
|
||||
nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
nv_mask(clk, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
|
||||
0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
|
||||
|
||||
/* put PLL in bypass before programming it */
|
||||
val = nv_rd32(priv, SEL_VCO);
|
||||
val = nv_rd32(clk, SEL_VCO);
|
||||
val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
udelay(2);
|
||||
nv_wr32(priv, SEL_VCO, val);
|
||||
nv_wr32(clk, SEL_VCO, val);
|
||||
|
||||
/* get out from IDDQ */
|
||||
val = nv_rd32(priv, GPCPLL_CFG);
|
||||
val = nv_rd32(clk, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_IDDQ) {
|
||||
val &= ~GPCPLL_CFG_IDDQ;
|
||||
nv_wr32(priv, GPCPLL_CFG, val);
|
||||
nv_rd32(priv, GPCPLL_CFG);
|
||||
nv_wr32(clk, GPCPLL_CFG, val);
|
||||
nv_rd32(clk, GPCPLL_CFG);
|
||||
udelay(2);
|
||||
}
|
||||
|
||||
_gk20a_pllg_disable(priv);
|
||||
_gk20a_pllg_disable(clk);
|
||||
|
||||
nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
|
||||
priv->pl);
|
||||
nv_debug(clk, "%s: m=%d n=%d pl=%d\n", __func__, clk->m, clk->n,
|
||||
clk->pl);
|
||||
|
||||
n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
|
||||
priv->parent_rate / MHZ);
|
||||
val = priv->m << GPCPLL_COEFF_M_SHIFT;
|
||||
val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
|
||||
val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
|
||||
nv_wr32(priv, GPCPLL_COEFF, val);
|
||||
n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
|
||||
clk->parent_rate / MHZ);
|
||||
val = clk->m << GPCPLL_COEFF_M_SHIFT;
|
||||
val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
|
||||
val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
|
||||
nv_wr32(clk, GPCPLL_COEFF, val);
|
||||
|
||||
_gk20a_pllg_enable(priv);
|
||||
_gk20a_pllg_enable(clk);
|
||||
|
||||
val = nv_rd32(priv, GPCPLL_CFG);
|
||||
val = nv_rd32(clk, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
|
||||
val &= ~GPCPLL_CFG_LOCK_DET_OFF;
|
||||
nv_wr32(priv, GPCPLL_CFG, val);
|
||||
nv_wr32(clk, GPCPLL_CFG, val);
|
||||
}
|
||||
|
||||
if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
|
||||
if (!nvkm_timer_wait_eq(clk, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
|
||||
GPCPLL_CFG_LOCK)) {
|
||||
nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
|
||||
nv_error(clk, "%s: timeout waiting for pllg lock\n", __func__);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* switch to VCO mode */
|
||||
nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
nv_mask(clk, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
|
||||
|
||||
/* restore out divider 1:1 */
|
||||
val = nv_rd32(priv, GPC2CLK_OUT);
|
||||
val = nv_rd32(clk, GPC2CLK_OUT);
|
||||
val &= ~GPC2CLK_OUT_VCODIV_MASK;
|
||||
udelay(2);
|
||||
nv_wr32(priv, GPC2CLK_OUT, val);
|
||||
nv_wr32(clk, GPC2CLK_OUT, val);
|
||||
|
||||
/* slide up to new NDIV */
|
||||
return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
|
||||
return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
|
||||
gk20a_pllg_program_mnp(struct gk20a_clk *clk)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = _gk20a_pllg_program_mnp(priv, true);
|
||||
err = _gk20a_pllg_program_mnp(clk, true);
|
||||
if (err)
|
||||
err = _gk20a_pllg_program_mnp(priv, false);
|
||||
err = _gk20a_pllg_program_mnp(clk, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
gk20a_pllg_disable(struct gk20a_clk_priv *priv)
|
||||
gk20a_pllg_disable(struct gk20a_clk *clk)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* slide to VCO min */
|
||||
val = nv_rd32(priv, GPCPLL_CFG);
|
||||
val = nv_rd32(clk, GPCPLL_CFG);
|
||||
if (val & GPCPLL_CFG_ENABLE) {
|
||||
u32 coeff, m, n_lo;
|
||||
|
||||
coeff = nv_rd32(priv, GPCPLL_COEFF);
|
||||
coeff = nv_rd32(clk, GPCPLL_COEFF);
|
||||
m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
|
||||
n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
|
||||
priv->parent_rate / MHZ);
|
||||
gk20a_pllg_slide(priv, n_lo);
|
||||
n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
|
||||
clk->parent_rate / MHZ);
|
||||
gk20a_pllg_slide(clk, n_lo);
|
||||
}
|
||||
|
||||
/* put PLL in bypass before disabling it */
|
||||
nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
|
||||
nv_mask(clk, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
|
||||
|
||||
_gk20a_pllg_disable(priv);
|
||||
_gk20a_pllg_disable(clk);
|
||||
}
|
||||
|
||||
#define GK20A_CLK_GPC_MDIV 1000
|
||||
@ -558,16 +558,16 @@ gk20a_pstates[] = {
|
||||
};
|
||||
|
||||
static int
|
||||
gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
gk20a_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct gk20a_clk_priv *priv = (void *)clk;
|
||||
struct gk20a_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
return nv_device(clk)->crystal;
|
||||
case nv_clk_src_gpc:
|
||||
gk20a_pllg_read_mnp(priv);
|
||||
return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
|
||||
gk20a_pllg_read_mnp(clk);
|
||||
return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
|
||||
default:
|
||||
nv_error(clk, "invalid clock source %d\n", src);
|
||||
return -EINVAL;
|
||||
@ -575,36 +575,36 @@ gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
gk20a_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct gk20a_clk_priv *priv = (void *)clk;
|
||||
struct gk20a_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
|
||||
return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
|
||||
return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
|
||||
GK20A_CLK_GPC_MDIV);
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_clk_prog(struct nvkm_clk *clk)
|
||||
gk20a_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct gk20a_clk_priv *priv = (void *)clk;
|
||||
struct gk20a_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
|
||||
return gk20a_pllg_program_mnp(priv);
|
||||
return gk20a_pllg_program_mnp(clk);
|
||||
}
|
||||
|
||||
static void
|
||||
gk20a_clk_tidy(struct nvkm_clk *clk)
|
||||
gk20a_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
gk20a_clk_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct gk20a_clk_priv *priv = (void *)object;
|
||||
struct gk20a_clk *clk = (void *)object;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_fini(&priv->base, false);
|
||||
ret = nvkm_clk_fini(&clk->base, false);
|
||||
|
||||
gk20a_pllg_disable(priv);
|
||||
gk20a_pllg_disable(clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -612,18 +612,18 @@ gk20a_clk_fini(struct nvkm_object *object, bool suspend)
|
||||
static int
|
||||
gk20a_clk_init(struct nvkm_object *object)
|
||||
{
|
||||
struct gk20a_clk_priv *priv = (void *)object;
|
||||
struct gk20a_clk *clk = (void *)object;
|
||||
int ret;
|
||||
|
||||
nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
|
||||
nv_mask(clk, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
|
||||
|
||||
ret = nvkm_clk_init(&priv->base);
|
||||
ret = nvkm_clk_init(&clk->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gk20a_clk_prog(&priv->base);
|
||||
ret = gk20a_clk_prog(&clk->base);
|
||||
if (ret) {
|
||||
nv_error(priv, "cannot initialize clock\n");
|
||||
nv_error(clk, "cannot initialize clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -635,7 +635,7 @@ gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct gk20a_clk_priv *priv;
|
||||
struct gk20a_clk *clk;
|
||||
struct nouveau_platform_device *plat;
|
||||
int ret;
|
||||
int i;
|
||||
@ -648,21 +648,21 @@ gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
|
||||
gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
|
||||
true, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
true, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->params = &gk20a_pllg_params;
|
||||
clk->params = &gk20a_pllg_params;
|
||||
|
||||
plat = nv_device_to_platform(nv_device(parent));
|
||||
priv->parent_rate = clk_get_rate(plat->gpu->clk);
|
||||
nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
|
||||
clk->parent_rate = clk_get_rate(plat->gpu->clk);
|
||||
nv_info(clk, "parent clock rate: %d Mhz\n", clk->parent_rate / MHZ);
|
||||
|
||||
priv->base.read = gk20a_clk_read;
|
||||
priv->base.calc = gk20a_clk_calc;
|
||||
priv->base.prog = gk20a_clk_prog;
|
||||
priv->base.tidy = gk20a_clk_tidy;
|
||||
clk->base.read = gk20a_clk_read;
|
||||
clk->base.calc = gk20a_clk_calc;
|
||||
clk->base.prog = gk20a_clk_prog;
|
||||
clk->base.tidy = gk20a_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,47 +30,47 @@
|
||||
#include <subdev/bios/pll.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
struct gt215_clk_priv {
|
||||
struct gt215_clk {
|
||||
struct nvkm_clk base;
|
||||
struct gt215_clk_info eng[nv_clk_src_max];
|
||||
};
|
||||
|
||||
static u32 read_clk(struct gt215_clk_priv *, int, bool);
|
||||
static u32 read_pll(struct gt215_clk_priv *, int, u32);
|
||||
static u32 read_clk(struct gt215_clk *, int, bool);
|
||||
static u32 read_pll(struct gt215_clk *, int, u32);
|
||||
|
||||
static u32
|
||||
read_vco(struct gt215_clk_priv *priv, int clk)
|
||||
read_vco(struct gt215_clk *clk, int idx)
|
||||
{
|
||||
u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
|
||||
u32 sctl = nv_rd32(clk, 0x4120 + (idx * 4));
|
||||
|
||||
switch (sctl & 0x00000030) {
|
||||
case 0x00000000:
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
case 0x00000020:
|
||||
return read_pll(priv, 0x41, 0x00e820);
|
||||
return read_pll(clk, 0x41, 0x00e820);
|
||||
case 0x00000030:
|
||||
return read_pll(priv, 0x42, 0x00e8a0);
|
||||
return read_pll(clk, 0x42, 0x00e8a0);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
|
||||
read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
|
||||
{
|
||||
u32 sctl, sdiv, sclk;
|
||||
|
||||
/* refclk for the 0xe8xx plls is a fixed frequency */
|
||||
if (clk >= 0x40) {
|
||||
if (nv_device(priv)->chipset == 0xaf) {
|
||||
if (idx >= 0x40) {
|
||||
if (nv_device(clk)->chipset == 0xaf) {
|
||||
/* no joke.. seriously.. sigh.. */
|
||||
return nv_rd32(priv, 0x00471c) * 1000;
|
||||
return nv_rd32(clk, 0x00471c) * 1000;
|
||||
}
|
||||
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
}
|
||||
|
||||
sctl = nv_rd32(priv, 0x4120 + (clk * 4));
|
||||
sctl = nv_rd32(clk, 0x4120 + (idx * 4));
|
||||
if (!ignore_en && !(sctl & 0x00000100))
|
||||
return 0;
|
||||
|
||||
@ -82,7 +82,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
|
||||
switch (sctl & 0x00003000) {
|
||||
case 0x00000000:
|
||||
if (!(sctl & 0x00000200))
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
return 0;
|
||||
case 0x00002000:
|
||||
if (sctl & 0x00000040)
|
||||
@ -93,7 +93,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
|
||||
if (!(sctl & 0x00000001))
|
||||
return 0;
|
||||
|
||||
sclk = read_vco(priv, clk);
|
||||
sclk = read_vco(clk, idx);
|
||||
sdiv = ((sctl & 0x003f0000) >> 16) + 2;
|
||||
return (sclk * 2) / sdiv;
|
||||
default:
|
||||
@ -102,14 +102,14 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
|
||||
read_pll(struct gt215_clk *clk, int idx, u32 pll)
|
||||
{
|
||||
u32 ctrl = nv_rd32(priv, pll + 0);
|
||||
u32 ctrl = nv_rd32(clk, pll + 0);
|
||||
u32 sclk = 0, P = 1, N = 1, M = 1;
|
||||
|
||||
if (!(ctrl & 0x00000008)) {
|
||||
if (ctrl & 0x00000001) {
|
||||
u32 coef = nv_rd32(priv, pll + 4);
|
||||
u32 coef = nv_rd32(clk, pll + 4);
|
||||
M = (coef & 0x000000ff) >> 0;
|
||||
N = (coef & 0x0000ff00) >> 8;
|
||||
P = (coef & 0x003f0000) >> 16;
|
||||
@ -120,10 +120,10 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
|
||||
if ((pll & 0x00ff00) == 0x00e800)
|
||||
P = 1;
|
||||
|
||||
sclk = read_clk(priv, 0x00 + clk, false);
|
||||
sclk = read_clk(clk, 0x00 + idx, false);
|
||||
}
|
||||
} else {
|
||||
sclk = read_clk(priv, 0x10 + clk, false);
|
||||
sclk = read_clk(clk, 0x10 + idx, false);
|
||||
}
|
||||
|
||||
if (M * P)
|
||||
@ -133,32 +133,32 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct gt215_clk_priv *priv = (void *)clk;
|
||||
struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
u32 hsrc;
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
case nv_clk_src_core:
|
||||
case nv_clk_src_core_intm:
|
||||
return read_pll(priv, 0x00, 0x4200);
|
||||
return read_pll(clk, 0x00, 0x4200);
|
||||
case nv_clk_src_shader:
|
||||
return read_pll(priv, 0x01, 0x4220);
|
||||
return read_pll(clk, 0x01, 0x4220);
|
||||
case nv_clk_src_mem:
|
||||
return read_pll(priv, 0x02, 0x4000);
|
||||
return read_pll(clk, 0x02, 0x4000);
|
||||
case nv_clk_src_disp:
|
||||
return read_clk(priv, 0x20, false);
|
||||
return read_clk(clk, 0x20, false);
|
||||
case nv_clk_src_vdec:
|
||||
return read_clk(priv, 0x21, false);
|
||||
return read_clk(clk, 0x21, false);
|
||||
case nv_clk_src_daemon:
|
||||
return read_clk(priv, 0x25, false);
|
||||
return read_clk(clk, 0x25, false);
|
||||
case nv_clk_src_host:
|
||||
hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
|
||||
hsrc = (nv_rd32(clk, 0xc040) & 0x30000000) >> 28;
|
||||
switch (hsrc) {
|
||||
case 0:
|
||||
return read_clk(priv, 0x1d, false);
|
||||
return read_clk(clk, 0x1d, false);
|
||||
case 2:
|
||||
case 3:
|
||||
return 277000;
|
||||
@ -175,10 +175,10 @@ gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
}
|
||||
|
||||
int
|
||||
gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
|
||||
gt215_clk_info(struct nvkm_clk *obj, int idx, u32 khz,
|
||||
struct gt215_clk_info *info)
|
||||
{
|
||||
struct gt215_clk_priv *priv = (void *)clock;
|
||||
struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
u32 oclk, sclk, sdiv;
|
||||
s32 diff;
|
||||
|
||||
@ -195,7 +195,7 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
|
||||
info->clk = 0x00002140;
|
||||
return khz;
|
||||
default:
|
||||
sclk = read_vco(priv, clk);
|
||||
sclk = read_vco(clk, idx);
|
||||
sdiv = min((sclk * 2) / khz, (u32)65);
|
||||
oclk = (sclk * 2) / sdiv;
|
||||
diff = ((khz + 3000) - oclk);
|
||||
@ -223,11 +223,11 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
|
||||
}
|
||||
|
||||
int
|
||||
gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
|
||||
gt215_pll_info(struct nvkm_clk *clock, int idx, u32 pll, u32 khz,
|
||||
struct gt215_clk_info *info)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(clock);
|
||||
struct gt215_clk_priv *priv = (void *)clock;
|
||||
struct gt215_clk *clk = (void *)clock;
|
||||
struct nvbios_pll limits;
|
||||
int P, N, M, diff;
|
||||
int ret;
|
||||
@ -236,7 +236,7 @@ gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
|
||||
|
||||
/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
|
||||
* PLL and use the divider instead. */
|
||||
ret = gt215_clk_info(clock, clk, khz, info);
|
||||
ret = gt215_clk_info(clock, idx, khz, info);
|
||||
diff = khz - ret;
|
||||
if (!pll || (diff >= -2000 && diff < 3000)) {
|
||||
goto out;
|
||||
@ -247,11 +247,11 @@ gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
|
||||
ret = gt215_clk_info(clock, idx - 0x10, limits.refclk, info);
|
||||
if (ret != limits.refclk)
|
||||
return -EINVAL;
|
||||
|
||||
ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
|
||||
ret = gt215_pll_calc(nv_subdev(clk), &limits, khz, &N, NULL, &M, &P);
|
||||
if (ret >= 0) {
|
||||
info->pll = (P << 16) | (N << 8) | M;
|
||||
}
|
||||
@ -262,22 +262,22 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
|
||||
int clk, u32 pll, int idx)
|
||||
calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
|
||||
int idx, u32 pll, int dom)
|
||||
{
|
||||
int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
|
||||
&priv->eng[idx]);
|
||||
int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
|
||||
&clk->eng[dom]);
|
||||
if (ret >= 0)
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
|
||||
calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 kHz = cstate->domain[nv_clk_src_host];
|
||||
struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
|
||||
struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
|
||||
|
||||
if (kHz == 277000) {
|
||||
info->clk = 0;
|
||||
@ -287,7 +287,7 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
|
||||
|
||||
info->host_out = NVA3_HOST_CLK;
|
||||
|
||||
ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
|
||||
ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
|
||||
if (ret >= 0)
|
||||
return 0;
|
||||
|
||||
@ -330,76 +330,76 @@ gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
|
||||
}
|
||||
|
||||
static void
|
||||
disable_clk_src(struct gt215_clk_priv *priv, u32 src)
|
||||
disable_clk_src(struct gt215_clk *clk, u32 src)
|
||||
{
|
||||
nv_mask(priv, src, 0x00000100, 0x00000000);
|
||||
nv_mask(priv, src, 0x00000001, 0x00000000);
|
||||
nv_mask(clk, src, 0x00000100, 0x00000000);
|
||||
nv_mask(clk, src, 0x00000001, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
|
||||
prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
|
||||
{
|
||||
struct gt215_clk_info *info = &priv->eng[idx];
|
||||
const u32 src0 = 0x004120 + (clk * 4);
|
||||
const u32 src1 = 0x004160 + (clk * 4);
|
||||
struct gt215_clk_info *info = &clk->eng[dom];
|
||||
const u32 src0 = 0x004120 + (idx * 4);
|
||||
const u32 src1 = 0x004160 + (idx * 4);
|
||||
const u32 ctrl = pll + 0;
|
||||
const u32 coef = pll + 4;
|
||||
u32 bypass;
|
||||
|
||||
if (info->pll) {
|
||||
/* Always start from a non-PLL clock */
|
||||
bypass = nv_rd32(priv, ctrl) & 0x00000008;
|
||||
bypass = nv_rd32(clk, ctrl) & 0x00000008;
|
||||
if (!bypass) {
|
||||
nv_mask(priv, src1, 0x00000101, 0x00000101);
|
||||
nv_mask(priv, ctrl, 0x00000008, 0x00000008);
|
||||
nv_mask(clk, src1, 0x00000101, 0x00000101);
|
||||
nv_mask(clk, ctrl, 0x00000008, 0x00000008);
|
||||
udelay(20);
|
||||
}
|
||||
|
||||
nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
|
||||
nv_wr32(priv, coef, info->pll);
|
||||
nv_mask(priv, ctrl, 0x00000015, 0x00000015);
|
||||
nv_mask(priv, ctrl, 0x00000010, 0x00000000);
|
||||
if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
|
||||
nv_mask(priv, ctrl, 0x00000010, 0x00000010);
|
||||
nv_mask(priv, src0, 0x00000101, 0x00000000);
|
||||
nv_mask(clk, src0, 0x003f3141, 0x00000101 | info->clk);
|
||||
nv_wr32(clk, coef, info->pll);
|
||||
nv_mask(clk, ctrl, 0x00000015, 0x00000015);
|
||||
nv_mask(clk, ctrl, 0x00000010, 0x00000000);
|
||||
if (!nv_wait(clk, ctrl, 0x00020000, 0x00020000)) {
|
||||
nv_mask(clk, ctrl, 0x00000010, 0x00000010);
|
||||
nv_mask(clk, src0, 0x00000101, 0x00000000);
|
||||
return;
|
||||
}
|
||||
nv_mask(priv, ctrl, 0x00000010, 0x00000010);
|
||||
nv_mask(priv, ctrl, 0x00000008, 0x00000000);
|
||||
disable_clk_src(priv, src1);
|
||||
nv_mask(clk, ctrl, 0x00000010, 0x00000010);
|
||||
nv_mask(clk, ctrl, 0x00000008, 0x00000000);
|
||||
disable_clk_src(clk, src1);
|
||||
} else {
|
||||
nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
|
||||
nv_mask(priv, ctrl, 0x00000018, 0x00000018);
|
||||
nv_mask(clk, src1, 0x003f3141, 0x00000101 | info->clk);
|
||||
nv_mask(clk, ctrl, 0x00000018, 0x00000018);
|
||||
udelay(20);
|
||||
nv_mask(priv, ctrl, 0x00000001, 0x00000000);
|
||||
disable_clk_src(priv, src0);
|
||||
nv_mask(clk, ctrl, 0x00000001, 0x00000000);
|
||||
disable_clk_src(clk, src0);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
|
||||
prog_clk(struct gt215_clk *clk, int idx, int dom)
|
||||
{
|
||||
struct gt215_clk_info *info = &priv->eng[idx];
|
||||
nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
|
||||
struct gt215_clk_info *info = &clk->eng[dom];
|
||||
nv_mask(clk, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
|
||||
}
|
||||
|
||||
static void
|
||||
prog_host(struct gt215_clk_priv *priv)
|
||||
prog_host(struct gt215_clk *clk)
|
||||
{
|
||||
struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
|
||||
u32 hsrc = (nv_rd32(priv, 0xc040));
|
||||
struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
|
||||
u32 hsrc = (nv_rd32(clk, 0xc040));
|
||||
|
||||
switch (info->host_out) {
|
||||
case NVA3_HOST_277:
|
||||
if ((hsrc & 0x30000000) == 0) {
|
||||
nv_wr32(priv, 0xc040, hsrc | 0x20000000);
|
||||
disable_clk_src(priv, 0x4194);
|
||||
nv_wr32(clk, 0xc040, hsrc | 0x20000000);
|
||||
disable_clk_src(clk, 0x4194);
|
||||
}
|
||||
break;
|
||||
case NVA3_HOST_CLK:
|
||||
prog_clk(priv, 0x1d, nv_clk_src_host);
|
||||
prog_clk(clk, 0x1d, nv_clk_src_host);
|
||||
if ((hsrc & 0x30000000) >= 0x20000000) {
|
||||
nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
|
||||
nv_wr32(clk, 0xc040, hsrc & ~0x30000000);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -407,44 +407,44 @@ prog_host(struct gt215_clk_priv *priv)
|
||||
}
|
||||
|
||||
/* This seems to be a clock gating factor on idle, always set to 64 */
|
||||
nv_wr32(priv, 0xc044, 0x3e);
|
||||
nv_wr32(clk, 0xc044, 0x3e);
|
||||
}
|
||||
|
||||
static void
|
||||
prog_core(struct gt215_clk_priv *priv, int idx)
|
||||
prog_core(struct gt215_clk *clk, int dom)
|
||||
{
|
||||
struct gt215_clk_info *info = &priv->eng[idx];
|
||||
u32 fb_delay = nv_rd32(priv, 0x10002c);
|
||||
struct gt215_clk_info *info = &clk->eng[dom];
|
||||
u32 fb_delay = nv_rd32(clk, 0x10002c);
|
||||
|
||||
if (fb_delay < info->fb_delay)
|
||||
nv_wr32(priv, 0x10002c, info->fb_delay);
|
||||
nv_wr32(clk, 0x10002c, info->fb_delay);
|
||||
|
||||
prog_pll(priv, 0x00, 0x004200, idx);
|
||||
prog_pll(clk, 0x00, 0x004200, dom);
|
||||
|
||||
if (fb_delay > info->fb_delay)
|
||||
nv_wr32(priv, 0x10002c, info->fb_delay);
|
||||
nv_wr32(clk, 0x10002c, info->fb_delay);
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
gt215_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct gt215_clk_priv *priv = (void *)clk;
|
||||
struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
|
||||
struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
|
||||
int ret;
|
||||
|
||||
if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
|
||||
(ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
|
||||
(ret = calc_host(priv, cstate)))
|
||||
if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
|
||||
(ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
|
||||
(ret = calc_host(clk, cstate)))
|
||||
return ret;
|
||||
|
||||
/* XXX: Should be reading the highest bit in the VBIOS clock to decide
|
||||
* whether to use a PLL or not... but using a PLL defeats the purpose */
|
||||
if (core->pll) {
|
||||
ret = gt215_clk_info(clk, 0x10,
|
||||
ret = gt215_clk_info(&clk->base, 0x10,
|
||||
cstate->domain[nv_clk_src_core_intm],
|
||||
&priv->eng[nv_clk_src_core_intm]);
|
||||
&clk->eng[nv_clk_src_core_intm]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -453,37 +453,37 @@ gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_clk_prog(struct nvkm_clk *clk)
|
||||
gt215_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct gt215_clk_priv *priv = (void *)clk;
|
||||
struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
|
||||
struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
unsigned long *f = &flags;
|
||||
|
||||
ret = gt215_clk_pre(clk, f);
|
||||
ret = gt215_clk_pre(&clk->base, f);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (core->pll)
|
||||
prog_core(priv, nv_clk_src_core_intm);
|
||||
prog_core(clk, nv_clk_src_core_intm);
|
||||
|
||||
prog_core(priv, nv_clk_src_core);
|
||||
prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
|
||||
prog_clk(priv, 0x20, nv_clk_src_disp);
|
||||
prog_clk(priv, 0x21, nv_clk_src_vdec);
|
||||
prog_host(priv);
|
||||
prog_core(clk, nv_clk_src_core);
|
||||
prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
|
||||
prog_clk(clk, 0x20, nv_clk_src_disp);
|
||||
prog_clk(clk, 0x21, nv_clk_src_vdec);
|
||||
prog_host(clk);
|
||||
|
||||
out:
|
||||
if (ret == -EBUSY)
|
||||
f = NULL;
|
||||
|
||||
gt215_clk_post(clk, f);
|
||||
gt215_clk_post(&clk->base, f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
gt215_clk_tidy(struct nvkm_clk *clk)
|
||||
gt215_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
}
|
||||
|
||||
@ -505,19 +505,19 @@ gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct gt215_clk_priv *priv;
|
||||
struct gt215_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
|
||||
NULL, 0, true, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
NULL, 0, true, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.read = gt215_clk_read;
|
||||
priv->base.calc = gt215_clk_calc;
|
||||
priv->base.prog = gt215_clk_prog;
|
||||
priv->base.tidy = gt215_clk_tidy;
|
||||
clk->base.read = gt215_clk_read;
|
||||
clk->base.calc = gt215_clk_calc;
|
||||
clk->base.prog = gt215_clk_prog;
|
||||
clk->base.tidy = gt215_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,6 @@ struct gt215_clk_info {
|
||||
};
|
||||
|
||||
int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
|
||||
int gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
|
||||
void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
|
||||
int gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
|
||||
void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
|
||||
#endif
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <subdev/bios/pll.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
struct mcp77_clk_priv {
|
||||
struct mcp77_clk {
|
||||
struct nvkm_clk base;
|
||||
enum nv_clk_src csrc, ssrc, vsrc;
|
||||
u32 cctrl, sctrl;
|
||||
@ -38,17 +38,17 @@ struct mcp77_clk_priv {
|
||||
};
|
||||
|
||||
static u32
|
||||
read_div(struct nvkm_clk *clk)
|
||||
read_div(struct mcp77_clk *clk)
|
||||
{
|
||||
return nv_rd32(clk, 0x004600);
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll(struct nvkm_clk *clk, u32 base)
|
||||
read_pll(struct mcp77_clk *clk, u32 base)
|
||||
{
|
||||
u32 ctrl = nv_rd32(clk, base + 0);
|
||||
u32 coef = nv_rd32(clk, base + 4);
|
||||
u32 ref = clk->read(clk, nv_clk_src_href);
|
||||
u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
|
||||
u32 post_div = 0;
|
||||
u32 clock = 0;
|
||||
int N1, M1;
|
||||
@ -75,50 +75,50 @@ read_pll(struct nvkm_clk *clk, u32 base)
|
||||
}
|
||||
|
||||
static int
|
||||
mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct mcp77_clk_priv *priv = (void *)clk;
|
||||
struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
u32 mast = nv_rd32(clk, 0x00c054);
|
||||
u32 P = 0;
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
case nv_clk_src_href:
|
||||
return 100000; /* PCIE reference clock */
|
||||
case nv_clk_src_hclkm4:
|
||||
return clk->read(clk, nv_clk_src_href) * 4;
|
||||
return clk->base.read(&clk->base, nv_clk_src_href) * 4;
|
||||
case nv_clk_src_hclkm2d3:
|
||||
return clk->read(clk, nv_clk_src_href) * 2 / 3;
|
||||
return clk->base.read(&clk->base, nv_clk_src_href) * 2 / 3;
|
||||
case nv_clk_src_host:
|
||||
switch (mast & 0x000c0000) {
|
||||
case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
|
||||
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
|
||||
case 0x00040000: break;
|
||||
case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
|
||||
case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
|
||||
case 0x00080000: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
|
||||
case 0x000c0000: return clk->base.read(&clk->base, nv_clk_src_cclk);
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_core:
|
||||
P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
|
||||
|
||||
switch (mast & 0x00000003) {
|
||||
case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
|
||||
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
|
||||
case 0x00000001: return 0;
|
||||
case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
|
||||
case 0x00000002: return clk->base.read(&clk->base, nv_clk_src_hclkm4) >> P;
|
||||
case 0x00000003: return read_pll(clk, 0x004028) >> P;
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_cclk:
|
||||
if ((mast & 0x03000000) != 0x03000000)
|
||||
return clk->read(clk, nv_clk_src_core);
|
||||
return clk->base.read(&clk->base, nv_clk_src_core);
|
||||
|
||||
if ((mast & 0x00000200) == 0x00000000)
|
||||
return clk->read(clk, nv_clk_src_core);
|
||||
return clk->base.read(&clk->base, nv_clk_src_core);
|
||||
|
||||
switch (mast & 0x00000c00) {
|
||||
case 0x00000000: return clk->read(clk, nv_clk_src_href);
|
||||
case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
|
||||
case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
|
||||
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
|
||||
case 0x00000400: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
|
||||
case 0x00000800: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
|
||||
default: return 0;
|
||||
}
|
||||
case nv_clk_src_shader:
|
||||
@ -126,8 +126,8 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
switch (mast & 0x00000030) {
|
||||
case 0x00000000:
|
||||
if (mast & 0x00000040)
|
||||
return clk->read(clk, nv_clk_src_href) >> P;
|
||||
return clk->read(clk, nv_clk_src_crystal) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_href) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
|
||||
case 0x00000010: break;
|
||||
case 0x00000020: return read_pll(clk, 0x004028) >> P;
|
||||
case 0x00000030: return read_pll(clk, 0x004020) >> P;
|
||||
@ -141,7 +141,7 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
|
||||
switch (mast & 0x00400000) {
|
||||
case 0x00400000:
|
||||
return clk->read(clk, nv_clk_src_core) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_core) >> P;
|
||||
break;
|
||||
default:
|
||||
return 500000 >> P;
|
||||
@ -152,17 +152,16 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
break;
|
||||
}
|
||||
|
||||
nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
|
||||
nv_debug(clk, "unknown clock source %d 0x%08x\n", src, mast);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_pll(struct mcp77_clk_priv *priv, u32 reg,
|
||||
calc_pll(struct mcp77_clk *clk, u32 reg,
|
||||
u32 clock, int *N, int *M, int *P)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(priv);
|
||||
struct nvkm_bios *bios = nvkm_bios(clk);
|
||||
struct nvbios_pll pll;
|
||||
struct nvkm_clk *clk = &priv->base;
|
||||
int ret;
|
||||
|
||||
ret = nvbios_pll_parse(bios, reg, &pll);
|
||||
@ -170,11 +169,11 @@ calc_pll(struct mcp77_clk_priv *priv, u32 reg,
|
||||
return 0;
|
||||
|
||||
pll.vco2.max_freq = 0;
|
||||
pll.refclk = clk->read(clk, nv_clk_src_href);
|
||||
pll.refclk = clk->base.read(&clk->base, nv_clk_src_href);
|
||||
if (!pll.refclk)
|
||||
return 0;
|
||||
|
||||
return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
|
||||
return nv04_pll_calc(nv_subdev(clk), &pll, clock, N, M, NULL, NULL, P);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
@ -196,9 +195,9 @@ calc_P(u32 src, u32 target, int *div)
|
||||
}
|
||||
|
||||
static int
|
||||
mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
mcp77_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct mcp77_clk_priv *priv = (void *)clk;
|
||||
struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
const int shader = cstate->domain[nv_clk_src_shader];
|
||||
const int core = cstate->domain[nv_clk_src_core];
|
||||
const int vdec = cstate->domain[nv_clk_src_vdec];
|
||||
@ -207,15 +206,15 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
int divs = 0;
|
||||
|
||||
/* cclk: find suitable source, disable PLL if we can */
|
||||
if (core < clk->read(clk, nv_clk_src_hclkm4))
|
||||
out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
|
||||
if (core < clk->base.read(&clk->base, nv_clk_src_hclkm4))
|
||||
out = calc_P(clk->base.read(&clk->base, nv_clk_src_hclkm4), core, &divs);
|
||||
|
||||
/* Calculate clock * 2, so shader clock can use it too */
|
||||
clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
|
||||
clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
|
||||
|
||||
if (abs(core - out) <= abs(core - (clock >> 1))) {
|
||||
priv->csrc = nv_clk_src_hclkm4;
|
||||
priv->cctrl = divs << 16;
|
||||
clk->csrc = nv_clk_src_hclkm4;
|
||||
clk->cctrl = divs << 16;
|
||||
} else {
|
||||
/* NVCTRL is actually used _after_ NVPOST, and after what we
|
||||
* call NVPLL. To make matters worse, NVPOST is an integer
|
||||
@ -225,31 +224,31 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
P1 = 2;
|
||||
}
|
||||
|
||||
priv->csrc = nv_clk_src_core;
|
||||
priv->ccoef = (N << 8) | M;
|
||||
clk->csrc = nv_clk_src_core;
|
||||
clk->ccoef = (N << 8) | M;
|
||||
|
||||
priv->cctrl = (P2 + 1) << 16;
|
||||
priv->cpost = (1 << P1) << 16;
|
||||
clk->cctrl = (P2 + 1) << 16;
|
||||
clk->cpost = (1 << P1) << 16;
|
||||
}
|
||||
|
||||
/* sclk: nvpll + divisor, href or spll */
|
||||
out = 0;
|
||||
if (shader == clk->read(clk, nv_clk_src_href)) {
|
||||
priv->ssrc = nv_clk_src_href;
|
||||
if (shader == clk->base.read(&clk->base, nv_clk_src_href)) {
|
||||
clk->ssrc = nv_clk_src_href;
|
||||
} else {
|
||||
clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
|
||||
if (priv->csrc == nv_clk_src_core)
|
||||
clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
|
||||
if (clk->csrc == nv_clk_src_core)
|
||||
out = calc_P((core << 1), shader, &divs);
|
||||
|
||||
if (abs(shader - out) <=
|
||||
abs(shader - clock) &&
|
||||
(divs + P2) <= 7) {
|
||||
priv->ssrc = nv_clk_src_core;
|
||||
priv->sctrl = (divs + P2) << 16;
|
||||
clk->ssrc = nv_clk_src_core;
|
||||
clk->sctrl = (divs + P2) << 16;
|
||||
} else {
|
||||
priv->ssrc = nv_clk_src_shader;
|
||||
priv->scoef = (N << 8) | M;
|
||||
priv->sctrl = P1 << 16;
|
||||
clk->ssrc = nv_clk_src_shader;
|
||||
clk->scoef = (N << 8) | M;
|
||||
clk->sctrl = P1 << 16;
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,49 +256,49 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
out = calc_P(core, vdec, &divs);
|
||||
clock = calc_P(500000, vdec, &P1);
|
||||
if(abs(vdec - out) <= abs(vdec - clock)) {
|
||||
priv->vsrc = nv_clk_src_cclk;
|
||||
priv->vdiv = divs << 16;
|
||||
clk->vsrc = nv_clk_src_cclk;
|
||||
clk->vdiv = divs << 16;
|
||||
} else {
|
||||
priv->vsrc = nv_clk_src_vdec;
|
||||
priv->vdiv = P1 << 16;
|
||||
clk->vsrc = nv_clk_src_vdec;
|
||||
clk->vdiv = P1 << 16;
|
||||
}
|
||||
|
||||
/* Print strategy! */
|
||||
nv_debug(priv, "nvpll: %08x %08x %08x\n",
|
||||
priv->ccoef, priv->cpost, priv->cctrl);
|
||||
nv_debug(priv, " spll: %08x %08x %08x\n",
|
||||
priv->scoef, priv->spost, priv->sctrl);
|
||||
nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
|
||||
if (priv->csrc == nv_clk_src_hclkm4)
|
||||
nv_debug(priv, "core: hrefm4\n");
|
||||
nv_debug(clk, "nvpll: %08x %08x %08x\n",
|
||||
clk->ccoef, clk->cpost, clk->cctrl);
|
||||
nv_debug(clk, " spll: %08x %08x %08x\n",
|
||||
clk->scoef, clk->spost, clk->sctrl);
|
||||
nv_debug(clk, " vdiv: %08x\n", clk->vdiv);
|
||||
if (clk->csrc == nv_clk_src_hclkm4)
|
||||
nv_debug(clk, "core: hrefm4\n");
|
||||
else
|
||||
nv_debug(priv, "core: nvpll\n");
|
||||
nv_debug(clk, "core: nvpll\n");
|
||||
|
||||
if (priv->ssrc == nv_clk_src_hclkm4)
|
||||
nv_debug(priv, "shader: hrefm4\n");
|
||||
else if (priv->ssrc == nv_clk_src_core)
|
||||
nv_debug(priv, "shader: nvpll\n");
|
||||
if (clk->ssrc == nv_clk_src_hclkm4)
|
||||
nv_debug(clk, "shader: hrefm4\n");
|
||||
else if (clk->ssrc == nv_clk_src_core)
|
||||
nv_debug(clk, "shader: nvpll\n");
|
||||
else
|
||||
nv_debug(priv, "shader: spll\n");
|
||||
nv_debug(clk, "shader: spll\n");
|
||||
|
||||
if (priv->vsrc == nv_clk_src_hclkm4)
|
||||
nv_debug(priv, "vdec: 500MHz\n");
|
||||
if (clk->vsrc == nv_clk_src_hclkm4)
|
||||
nv_debug(clk, "vdec: 500MHz\n");
|
||||
else
|
||||
nv_debug(priv, "vdec: core\n");
|
||||
nv_debug(clk, "vdec: core\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mcp77_clk_prog(struct nvkm_clk *clk)
|
||||
mcp77_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct mcp77_clk_priv *priv = (void *)clk;
|
||||
struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
u32 pllmask = 0, mast;
|
||||
unsigned long flags;
|
||||
unsigned long *f = &flags;
|
||||
int ret = 0;
|
||||
|
||||
ret = gt215_clk_pre(clk, f);
|
||||
ret = gt215_clk_pre(&clk->base, f);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -308,66 +307,66 @@ mcp77_clk_prog(struct nvkm_clk *clk)
|
||||
mast &= ~0x00400e73;
|
||||
mast |= 0x03000000;
|
||||
|
||||
switch (priv->csrc) {
|
||||
switch (clk->csrc) {
|
||||
case nv_clk_src_hclkm4:
|
||||
nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
|
||||
nv_mask(clk, 0x4028, 0x00070000, clk->cctrl);
|
||||
mast |= 0x00000002;
|
||||
break;
|
||||
case nv_clk_src_core:
|
||||
nv_wr32(clk, 0x402c, priv->ccoef);
|
||||
nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
|
||||
nv_wr32(clk, 0x4040, priv->cpost);
|
||||
nv_wr32(clk, 0x402c, clk->ccoef);
|
||||
nv_wr32(clk, 0x4028, 0x80000000 | clk->cctrl);
|
||||
nv_wr32(clk, 0x4040, clk->cpost);
|
||||
pllmask |= (0x3 << 8);
|
||||
mast |= 0x00000003;
|
||||
break;
|
||||
default:
|
||||
nv_warn(priv,"Reclocking failed: unknown core clock\n");
|
||||
nv_warn(clk,"Reclocking failed: unknown core clock\n");
|
||||
goto resume;
|
||||
}
|
||||
|
||||
switch (priv->ssrc) {
|
||||
switch (clk->ssrc) {
|
||||
case nv_clk_src_href:
|
||||
nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
|
||||
/* mast |= 0x00000000; */
|
||||
break;
|
||||
case nv_clk_src_core:
|
||||
nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
|
||||
nv_mask(clk, 0x4020, 0x00070000, clk->sctrl);
|
||||
mast |= 0x00000020;
|
||||
break;
|
||||
case nv_clk_src_shader:
|
||||
nv_wr32(clk, 0x4024, priv->scoef);
|
||||
nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
|
||||
nv_wr32(clk, 0x4070, priv->spost);
|
||||
nv_wr32(clk, 0x4024, clk->scoef);
|
||||
nv_wr32(clk, 0x4020, 0x80000000 | clk->sctrl);
|
||||
nv_wr32(clk, 0x4070, clk->spost);
|
||||
pllmask |= (0x3 << 12);
|
||||
mast |= 0x00000030;
|
||||
break;
|
||||
default:
|
||||
nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
|
||||
nv_warn(clk,"Reclocking failed: unknown sclk clock\n");
|
||||
goto resume;
|
||||
}
|
||||
|
||||
if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
|
||||
nv_warn(priv,"Reclocking failed: unstable PLLs\n");
|
||||
nv_warn(clk,"Reclocking failed: unstable PLLs\n");
|
||||
goto resume;
|
||||
}
|
||||
|
||||
switch (priv->vsrc) {
|
||||
switch (clk->vsrc) {
|
||||
case nv_clk_src_cclk:
|
||||
mast |= 0x00400000;
|
||||
default:
|
||||
nv_wr32(clk, 0x4600, priv->vdiv);
|
||||
nv_wr32(clk, 0x4600, clk->vdiv);
|
||||
}
|
||||
|
||||
nv_wr32(clk, 0xc054, mast);
|
||||
|
||||
resume:
|
||||
/* Disable some PLLs and dividers when unused */
|
||||
if (priv->csrc != nv_clk_src_core) {
|
||||
if (clk->csrc != nv_clk_src_core) {
|
||||
nv_wr32(clk, 0x4040, 0x00000000);
|
||||
nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
|
||||
}
|
||||
|
||||
if (priv->ssrc != nv_clk_src_shader) {
|
||||
if (clk->ssrc != nv_clk_src_shader) {
|
||||
nv_wr32(clk, 0x4070, 0x00000000);
|
||||
nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
|
||||
}
|
||||
@ -376,12 +375,12 @@ out:
|
||||
if (ret == -EBUSY)
|
||||
f = NULL;
|
||||
|
||||
gt215_clk_post(clk, f);
|
||||
gt215_clk_post(&clk->base, f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
mcp77_clk_tidy(struct nvkm_clk *clk)
|
||||
mcp77_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
}
|
||||
|
||||
@ -400,19 +399,19 @@ mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct mcp77_clk_priv *priv;
|
||||
struct mcp77_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
|
||||
NULL, 0, true, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
NULL, 0, true, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.read = mcp77_clk_read;
|
||||
priv->base.calc = mcp77_clk_calc;
|
||||
priv->base.prog = mcp77_clk_prog;
|
||||
priv->base.tidy = mcp77_clk_tidy;
|
||||
clk->base.read = mcp77_clk_read;
|
||||
clk->base.calc = mcp77_clk_calc;
|
||||
clk->base.prog = mcp77_clk_prog;
|
||||
clk->base.tidy = mcp77_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -28,10 +28,6 @@
|
||||
#include <subdev/bios/pll.h>
|
||||
#include <subdev/devinit/nv04.h>
|
||||
|
||||
struct nv04_clk_priv {
|
||||
struct nvkm_clk base;
|
||||
};
|
||||
|
||||
int
|
||||
nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
|
||||
int clk, struct nvkm_pll_vals *pv)
|
||||
@ -77,17 +73,17 @@ nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv04_clk_priv *priv;
|
||||
struct nvkm_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
|
||||
NULL, 0, false, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
NULL, 0, false, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.pll_calc = nv04_clk_pll_calc;
|
||||
priv->base.pll_prog = nv04_clk_pll_prog;
|
||||
clk->pll_calc = nv04_clk_pll_calc;
|
||||
clk->pll_prog = nv04_clk_pll_prog;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/pll.h>
|
||||
|
||||
struct nv40_clk_priv {
|
||||
struct nv40_clk {
|
||||
struct nvkm_clk base;
|
||||
u32 ctrl;
|
||||
u32 npll_ctrl;
|
||||
@ -46,53 +46,53 @@ nv40_domain[] = {
|
||||
};
|
||||
|
||||
static u32
|
||||
read_pll_1(struct nv40_clk_priv *priv, u32 reg)
|
||||
read_pll_1(struct nv40_clk *clk, u32 reg)
|
||||
{
|
||||
u32 ctrl = nv_rd32(priv, reg + 0x00);
|
||||
u32 ctrl = nv_rd32(clk, reg + 0x00);
|
||||
int P = (ctrl & 0x00070000) >> 16;
|
||||
int N = (ctrl & 0x0000ff00) >> 8;
|
||||
int M = (ctrl & 0x000000ff) >> 0;
|
||||
u32 ref = 27000, clk = 0;
|
||||
u32 ref = 27000, khz = 0;
|
||||
|
||||
if (ctrl & 0x80000000)
|
||||
clk = ref * N / M;
|
||||
khz = ref * N / M;
|
||||
|
||||
return clk >> P;
|
||||
return khz >> P;
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll_2(struct nv40_clk_priv *priv, u32 reg)
|
||||
read_pll_2(struct nv40_clk *clk, u32 reg)
|
||||
{
|
||||
u32 ctrl = nv_rd32(priv, reg + 0x00);
|
||||
u32 coef = nv_rd32(priv, reg + 0x04);
|
||||
u32 ctrl = nv_rd32(clk, reg + 0x00);
|
||||
u32 coef = nv_rd32(clk, reg + 0x04);
|
||||
int N2 = (coef & 0xff000000) >> 24;
|
||||
int M2 = (coef & 0x00ff0000) >> 16;
|
||||
int N1 = (coef & 0x0000ff00) >> 8;
|
||||
int M1 = (coef & 0x000000ff) >> 0;
|
||||
int P = (ctrl & 0x00070000) >> 16;
|
||||
u32 ref = 27000, clk = 0;
|
||||
u32 ref = 27000, khz = 0;
|
||||
|
||||
if ((ctrl & 0x80000000) && M1) {
|
||||
clk = ref * N1 / M1;
|
||||
khz = ref * N1 / M1;
|
||||
if ((ctrl & 0x40000100) == 0x40000000) {
|
||||
if (M2)
|
||||
clk = clk * N2 / M2;
|
||||
khz = khz * N2 / M2;
|
||||
else
|
||||
clk = 0;
|
||||
khz = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return clk >> P;
|
||||
return khz >> P;
|
||||
}
|
||||
|
||||
static u32
|
||||
read_clk(struct nv40_clk_priv *priv, u32 src)
|
||||
read_clk(struct nv40_clk *clk, u32 src)
|
||||
{
|
||||
switch (src) {
|
||||
case 3:
|
||||
return read_pll_2(priv, 0x004000);
|
||||
return read_pll_2(clk, 0x004000);
|
||||
case 2:
|
||||
return read_pll_1(priv, 0x004008);
|
||||
return read_pll_1(clk, 0x004008);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -101,35 +101,35 @@ read_clk(struct nv40_clk_priv *priv, u32 src)
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
nv40_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct nv40_clk_priv *priv = (void *)clk;
|
||||
u32 mast = nv_rd32(priv, 0x00c040);
|
||||
struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
u32 mast = nv_rd32(clk, 0x00c040);
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
case nv_clk_src_href:
|
||||
return 100000; /*XXX: PCIE/AGP differ*/
|
||||
case nv_clk_src_core:
|
||||
return read_clk(priv, (mast & 0x00000003) >> 0);
|
||||
return read_clk(clk, (mast & 0x00000003) >> 0);
|
||||
case nv_clk_src_shader:
|
||||
return read_clk(priv, (mast & 0x00000030) >> 4);
|
||||
return read_clk(clk, (mast & 0x00000030) >> 4);
|
||||
case nv_clk_src_mem:
|
||||
return read_pll_2(priv, 0x4020);
|
||||
return read_pll_2(clk, 0x4020);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
|
||||
nv_debug(clk, "unknown clock source %d 0x%08x\n", src, mast);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
|
||||
nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
|
||||
int *N1, int *M1, int *N2, int *M2, int *log2P)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(priv);
|
||||
struct nvkm_bios *bios = nvkm_bios(clk);
|
||||
struct nvbios_pll pll;
|
||||
int ret;
|
||||
|
||||
@ -137,10 +137,10 @@ nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (clk < pll.vco1.max_freq)
|
||||
if (khz < pll.vco1.max_freq)
|
||||
pll.vco2.max_freq = 0;
|
||||
|
||||
ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
|
||||
ret = nv04_pll_calc(nv_subdev(clk), &pll, khz, N1, M1, N2, M2, log2P);
|
||||
if (ret == 0)
|
||||
return -ERANGE;
|
||||
|
||||
@ -148,60 +148,60 @@ nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
nv40_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct nv40_clk_priv *priv = (void *)clk;
|
||||
struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
int gclk = cstate->domain[nv_clk_src_core];
|
||||
int sclk = cstate->domain[nv_clk_src_shader];
|
||||
int N1, M1, N2, M2, log2P;
|
||||
int ret;
|
||||
|
||||
/* core/geometric clock */
|
||||
ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
|
||||
ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
|
||||
&N1, &M1, &N2, &M2, &log2P);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (N2 == M2) {
|
||||
priv->npll_ctrl = 0x80000100 | (log2P << 16);
|
||||
priv->npll_coef = (N1 << 8) | M1;
|
||||
clk->npll_ctrl = 0x80000100 | (log2P << 16);
|
||||
clk->npll_coef = (N1 << 8) | M1;
|
||||
} else {
|
||||
priv->npll_ctrl = 0xc0000000 | (log2P << 16);
|
||||
priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
|
||||
clk->npll_ctrl = 0xc0000000 | (log2P << 16);
|
||||
clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
|
||||
}
|
||||
|
||||
/* use the second pll for shader/rop clock, if it differs from core */
|
||||
if (sclk && sclk != gclk) {
|
||||
ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
|
||||
ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
|
||||
&N1, &M1, NULL, NULL, &log2P);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
|
||||
priv->ctrl = 0x00000223;
|
||||
clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
|
||||
clk->ctrl = 0x00000223;
|
||||
} else {
|
||||
priv->spll = 0x00000000;
|
||||
priv->ctrl = 0x00000333;
|
||||
clk->spll = 0x00000000;
|
||||
clk->ctrl = 0x00000333;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_clk_prog(struct nvkm_clk *clk)
|
||||
nv40_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct nv40_clk_priv *priv = (void *)clk;
|
||||
nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
|
||||
nv_wr32(priv, 0x004004, priv->npll_coef);
|
||||
nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
|
||||
nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
|
||||
struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
nv_mask(clk, 0x00c040, 0x00000333, 0x00000000);
|
||||
nv_wr32(clk, 0x004004, clk->npll_coef);
|
||||
nv_mask(clk, 0x004000, 0xc0070100, clk->npll_ctrl);
|
||||
nv_mask(clk, 0x004008, 0xc007ffff, clk->spll);
|
||||
mdelay(5);
|
||||
nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
|
||||
nv_mask(clk, 0x00c040, 0x00000333, clk->ctrl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_clk_tidy(struct nvkm_clk *clk)
|
||||
nv40_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
}
|
||||
|
||||
@ -210,21 +210,21 @@ nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv40_clk_priv *priv;
|
||||
struct nv40_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
|
||||
NULL, 0, true, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
NULL, 0, true, &clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.pll_calc = nv04_clk_pll_calc;
|
||||
priv->base.pll_prog = nv04_clk_pll_prog;
|
||||
priv->base.read = nv40_clk_read;
|
||||
priv->base.calc = nv40_clk_calc;
|
||||
priv->base.prog = nv40_clk_prog;
|
||||
priv->base.tidy = nv40_clk_tidy;
|
||||
clk->base.pll_calc = nv04_clk_pll_calc;
|
||||
clk->base.pll_prog = nv04_clk_pll_prog;
|
||||
clk->base.read = nv40_clk_read;
|
||||
clk->base.calc = nv40_clk_calc;
|
||||
clk->base.prog = nv40_clk_prog;
|
||||
clk->base.tidy = nv40_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -29,33 +29,32 @@
|
||||
#include <subdev/bios/pll.h>
|
||||
|
||||
static u32
|
||||
read_div(struct nv50_clk_priv *priv)
|
||||
read_div(struct nv50_clk *clk)
|
||||
{
|
||||
switch (nv_device(priv)->chipset) {
|
||||
switch (nv_device(clk)->chipset) {
|
||||
case 0x50: /* it exists, but only has bit 31, not the dividers.. */
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x98:
|
||||
case 0xa0:
|
||||
return nv_rd32(priv, 0x004700);
|
||||
return nv_rd32(clk, 0x004700);
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
return nv_rd32(priv, 0x004800);
|
||||
return nv_rd32(clk, 0x004800);
|
||||
default:
|
||||
return 0x00000000;
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll_src(struct nv50_clk_priv *priv, u32 base)
|
||||
read_pll_src(struct nv50_clk *clk, u32 base)
|
||||
{
|
||||
struct nvkm_clk *clk = &priv->base;
|
||||
u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
|
||||
u32 rsel = nv_rd32(priv, 0x00e18c);
|
||||
u32 coef, ref = clk->base.read(&clk->base, nv_clk_src_crystal);
|
||||
u32 rsel = nv_rd32(clk, 0x00e18c);
|
||||
int P, N, M, id;
|
||||
|
||||
switch (nv_device(priv)->chipset) {
|
||||
switch (nv_device(clk)->chipset) {
|
||||
case 0x50:
|
||||
case 0xa0:
|
||||
switch (base) {
|
||||
@ -64,11 +63,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
|
||||
case 0x4008: id = !!(rsel & 0x00000008); break;
|
||||
case 0x4030: id = 0; break;
|
||||
default:
|
||||
nv_error(priv, "ref: bad pll 0x%06x\n", base);
|
||||
nv_error(clk, "ref: bad pll 0x%06x\n", base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
|
||||
coef = nv_rd32(clk, 0x00e81c + (id * 0x0c));
|
||||
ref *= (coef & 0x01000000) ? 2 : 4;
|
||||
P = (coef & 0x00070000) >> 16;
|
||||
N = ((coef & 0x0000ff00) >> 8) + 1;
|
||||
@ -77,7 +76,7 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
coef = nv_rd32(priv, 0x00e81c);
|
||||
coef = nv_rd32(clk, 0x00e81c);
|
||||
P = (coef & 0x00070000) >> 16;
|
||||
N = (coef & 0x0000ff00) >> 8;
|
||||
M = (coef & 0x000000ff) >> 0;
|
||||
@ -85,26 +84,26 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0x98:
|
||||
rsel = nv_rd32(priv, 0x00c050);
|
||||
rsel = nv_rd32(clk, 0x00c050);
|
||||
switch (base) {
|
||||
case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
|
||||
case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
|
||||
case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
|
||||
case 0x4030: rsel = 3; break;
|
||||
default:
|
||||
nv_error(priv, "ref: bad pll 0x%06x\n", base);
|
||||
nv_error(clk, "ref: bad pll 0x%06x\n", base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (rsel) {
|
||||
case 0: id = 1; break;
|
||||
case 1: return clk->read(clk, nv_clk_src_crystal);
|
||||
case 2: return clk->read(clk, nv_clk_src_href);
|
||||
case 1: return clk->base.read(&clk->base, nv_clk_src_crystal);
|
||||
case 2: return clk->base.read(&clk->base, nv_clk_src_href);
|
||||
case 3: id = 0; break;
|
||||
}
|
||||
|
||||
coef = nv_rd32(priv, 0x00e81c + (id * 0x28));
|
||||
P = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
|
||||
coef = nv_rd32(clk, 0x00e81c + (id * 0x28));
|
||||
P = (nv_rd32(clk, 0x00e824 + (id * 0x28)) >> 16) & 7;
|
||||
P += (coef & 0x00070000) >> 16;
|
||||
N = (coef & 0x0000ff00) >> 8;
|
||||
M = (coef & 0x000000ff) >> 0;
|
||||
@ -120,10 +119,9 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll_ref(struct nv50_clk_priv *priv, u32 base)
|
||||
read_pll_ref(struct nv50_clk *clk, u32 base)
|
||||
{
|
||||
struct nvkm_clk *clk = &priv->base;
|
||||
u32 src, mast = nv_rd32(priv, 0x00c040);
|
||||
u32 src, mast = nv_rd32(clk, 0x00c040);
|
||||
|
||||
switch (base) {
|
||||
case 0x004028:
|
||||
@ -139,33 +137,32 @@ read_pll_ref(struct nv50_clk_priv *priv, u32 base)
|
||||
src = !!(mast & 0x02000000);
|
||||
break;
|
||||
case 0x00e810:
|
||||
return clk->read(clk, nv_clk_src_crystal);
|
||||
return clk->base.read(&clk->base, nv_clk_src_crystal);
|
||||
default:
|
||||
nv_error(priv, "bad pll 0x%06x\n", base);
|
||||
nv_error(clk, "bad pll 0x%06x\n", base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (src)
|
||||
return clk->read(clk, nv_clk_src_href);
|
||||
return clk->base.read(&clk->base, nv_clk_src_href);
|
||||
|
||||
return read_pll_src(priv, base);
|
||||
return read_pll_src(clk, base);
|
||||
}
|
||||
|
||||
static u32
|
||||
read_pll(struct nv50_clk_priv *priv, u32 base)
|
||||
read_pll(struct nv50_clk *clk, u32 base)
|
||||
{
|
||||
struct nvkm_clk *clk = &priv->base;
|
||||
u32 mast = nv_rd32(priv, 0x00c040);
|
||||
u32 ctrl = nv_rd32(priv, base + 0);
|
||||
u32 coef = nv_rd32(priv, base + 4);
|
||||
u32 ref = read_pll_ref(priv, base);
|
||||
u32 mast = nv_rd32(clk, 0x00c040);
|
||||
u32 ctrl = nv_rd32(clk, base + 0);
|
||||
u32 coef = nv_rd32(clk, base + 4);
|
||||
u32 ref = read_pll_ref(clk, base);
|
||||
u32 freq = 0;
|
||||
int N1, N2, M1, M2;
|
||||
|
||||
if (base == 0x004028 && (mast & 0x00100000)) {
|
||||
/* wtf, appears to only disable post-divider on gt200 */
|
||||
if (nv_device(priv)->chipset != 0xa0)
|
||||
return clk->read(clk, nv_clk_src_dom6);
|
||||
if (nv_device(clk)->chipset != 0xa0)
|
||||
return clk->base.read(&clk->base, nv_clk_src_dom6);
|
||||
}
|
||||
|
||||
N2 = (coef & 0xff000000) >> 24;
|
||||
@ -186,70 +183,70 @@ read_pll(struct nv50_clk_priv *priv, u32 base)
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
nv50_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
|
||||
{
|
||||
struct nv50_clk_priv *priv = (void *)clk;
|
||||
u32 mast = nv_rd32(priv, 0x00c040);
|
||||
struct nv50_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
u32 mast = nv_rd32(clk, 0x00c040);
|
||||
u32 P = 0;
|
||||
|
||||
switch (src) {
|
||||
case nv_clk_src_crystal:
|
||||
return nv_device(priv)->crystal;
|
||||
return nv_device(clk)->crystal;
|
||||
case nv_clk_src_href:
|
||||
return 100000; /* PCIE reference clock */
|
||||
case nv_clk_src_hclk:
|
||||
return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
|
||||
return div_u64((u64)clk->base.read(&clk->base, nv_clk_src_href) * 27778, 10000);
|
||||
case nv_clk_src_hclkm3:
|
||||
return clk->read(clk, nv_clk_src_hclk) * 3;
|
||||
return clk->base.read(&clk->base, nv_clk_src_hclk) * 3;
|
||||
case nv_clk_src_hclkm3d2:
|
||||
return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
|
||||
return clk->base.read(&clk->base, nv_clk_src_hclk) * 3 / 2;
|
||||
case nv_clk_src_host:
|
||||
switch (mast & 0x30000000) {
|
||||
case 0x00000000: return clk->read(clk, nv_clk_src_href);
|
||||
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
|
||||
case 0x10000000: break;
|
||||
case 0x20000000: /* !0x50 */
|
||||
case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
|
||||
case 0x30000000: return clk->base.read(&clk->base, nv_clk_src_hclk);
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_core:
|
||||
if (!(mast & 0x00100000))
|
||||
P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
|
||||
P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
|
||||
switch (mast & 0x00000003) {
|
||||
case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
|
||||
case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
|
||||
case 0x00000002: return read_pll(priv, 0x004020) >> P;
|
||||
case 0x00000003: return read_pll(priv, 0x004028) >> P;
|
||||
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
|
||||
case 0x00000001: return clk->base.read(&clk->base, nv_clk_src_dom6);
|
||||
case 0x00000002: return read_pll(clk, 0x004020) >> P;
|
||||
case 0x00000003: return read_pll(clk, 0x004028) >> P;
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_shader:
|
||||
P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
|
||||
P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
|
||||
switch (mast & 0x00000030) {
|
||||
case 0x00000000:
|
||||
if (mast & 0x00000080)
|
||||
return clk->read(clk, nv_clk_src_host) >> P;
|
||||
return clk->read(clk, nv_clk_src_crystal) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_host) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
|
||||
case 0x00000010: break;
|
||||
case 0x00000020: return read_pll(priv, 0x004028) >> P;
|
||||
case 0x00000030: return read_pll(priv, 0x004020) >> P;
|
||||
case 0x00000020: return read_pll(clk, 0x004028) >> P;
|
||||
case 0x00000030: return read_pll(clk, 0x004020) >> P;
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_mem:
|
||||
P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
|
||||
if (nv_rd32(priv, 0x004008) & 0x00000200) {
|
||||
P = (nv_rd32(clk, 0x004008) & 0x00070000) >> 16;
|
||||
if (nv_rd32(clk, 0x004008) & 0x00000200) {
|
||||
switch (mast & 0x0000c000) {
|
||||
case 0x00000000:
|
||||
return clk->read(clk, nv_clk_src_crystal) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
|
||||
case 0x00008000:
|
||||
case 0x0000c000:
|
||||
return clk->read(clk, nv_clk_src_href) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_href) >> P;
|
||||
}
|
||||
} else {
|
||||
return read_pll(priv, 0x004008) >> P;
|
||||
return read_pll(clk, 0x004008) >> P;
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_vdec:
|
||||
P = (read_div(priv) & 0x00000700) >> 8;
|
||||
switch (nv_device(priv)->chipset) {
|
||||
P = (read_div(clk) & 0x00000700) >> 8;
|
||||
switch (nv_device(clk)->chipset) {
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
@ -258,51 +255,51 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
case 0xa0:
|
||||
switch (mast & 0x00000c00) {
|
||||
case 0x00000000:
|
||||
if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
|
||||
return clk->read(clk, nv_clk_src_core) >> P;
|
||||
return clk->read(clk, nv_clk_src_crystal) >> P;
|
||||
if (nv_device(clk)->chipset == 0xa0) /* wtf?? */
|
||||
return clk->base.read(&clk->base, nv_clk_src_core) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
|
||||
case 0x00000400:
|
||||
return 0;
|
||||
case 0x00000800:
|
||||
if (mast & 0x01000000)
|
||||
return read_pll(priv, 0x004028) >> P;
|
||||
return read_pll(priv, 0x004030) >> P;
|
||||
return read_pll(clk, 0x004028) >> P;
|
||||
return read_pll(clk, 0x004030) >> P;
|
||||
case 0x00000c00:
|
||||
return clk->read(clk, nv_clk_src_core) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_core) >> P;
|
||||
}
|
||||
break;
|
||||
case 0x98:
|
||||
switch (mast & 0x00000c00) {
|
||||
case 0x00000000:
|
||||
return clk->read(clk, nv_clk_src_core) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_core) >> P;
|
||||
case 0x00000400:
|
||||
return 0;
|
||||
case 0x00000800:
|
||||
return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_hclkm3d2) >> P;
|
||||
case 0x00000c00:
|
||||
return clk->read(clk, nv_clk_src_mem) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_mem) >> P;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case nv_clk_src_dom6:
|
||||
switch (nv_device(priv)->chipset) {
|
||||
switch (nv_device(clk)->chipset) {
|
||||
case 0x50:
|
||||
case 0xa0:
|
||||
return read_pll(priv, 0x00e810) >> 2;
|
||||
return read_pll(clk, 0x00e810) >> 2;
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0x98:
|
||||
P = (read_div(priv) & 0x00000007) >> 0;
|
||||
P = (read_div(clk) & 0x00000007) >> 0;
|
||||
switch (mast & 0x0c000000) {
|
||||
case 0x00000000: return clk->read(clk, nv_clk_src_href);
|
||||
case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
|
||||
case 0x04000000: break;
|
||||
case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
|
||||
case 0x08000000: return clk->base.read(&clk->base, nv_clk_src_hclk);
|
||||
case 0x0c000000:
|
||||
return clk->read(clk, nv_clk_src_hclkm3) >> P;
|
||||
return clk->base.read(&clk->base, nv_clk_src_hclkm3) >> P;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -312,14 +309,14 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
|
||||
break;
|
||||
}
|
||||
|
||||
nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
|
||||
nv_debug(clk, "unknown clock source %d 0x%08x\n", src, mast);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static u32
|
||||
calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
|
||||
calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(priv);
|
||||
struct nvkm_bios *bios = nvkm_bios(clk);
|
||||
struct nvbios_pll pll;
|
||||
int ret;
|
||||
|
||||
@ -328,11 +325,11 @@ calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
|
||||
return 0;
|
||||
|
||||
pll.vco2.max_freq = 0;
|
||||
pll.refclk = read_pll_ref(priv, reg);
|
||||
pll.refclk = read_pll_ref(clk, reg);
|
||||
if (!pll.refclk)
|
||||
return 0;
|
||||
|
||||
return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
|
||||
return nv04_pll_calc(nv_subdev(clk), &pll, idx, N, M, NULL, NULL, P);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
@ -360,10 +357,10 @@ clk_same(u32 a, u32 b)
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
nv50_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
|
||||
{
|
||||
struct nv50_clk_priv *priv = (void *)clk;
|
||||
struct nv50_clk_hwsq *hwsq = &priv->hwsq;
|
||||
struct nv50_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
struct nv50_clk_hwsq *hwsq = &clk->hwsq;
|
||||
const int shader = cstate->domain[nv_clk_src_shader];
|
||||
const int core = cstate->domain[nv_clk_src_core];
|
||||
const int vdec = cstate->domain[nv_clk_src_vdec];
|
||||
@ -392,15 +389,15 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
freq = calc_div(core, vdec, &P1);
|
||||
|
||||
/* see how close we can get using xpll/hclk as a source */
|
||||
if (nv_device(priv)->chipset != 0x98)
|
||||
out = read_pll(priv, 0x004030);
|
||||
if (nv_device(clk)->chipset != 0x98)
|
||||
out = read_pll(clk, 0x004030);
|
||||
else
|
||||
out = clk->read(clk, nv_clk_src_hclkm3d2);
|
||||
out = clk->base.read(&clk->base, nv_clk_src_hclkm3d2);
|
||||
out = calc_div(out, vdec, &P2);
|
||||
|
||||
/* select whichever gets us closest */
|
||||
if (abs(vdec - freq) <= abs(vdec - out)) {
|
||||
if (nv_device(priv)->chipset != 0x98)
|
||||
if (nv_device(clk)->chipset != 0x98)
|
||||
mastv |= 0x00000c00;
|
||||
divsv |= P1 << 8;
|
||||
} else {
|
||||
@ -416,14 +413,14 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
* of the host clock frequency
|
||||
*/
|
||||
if (dom6) {
|
||||
if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
|
||||
if (clk_same(dom6, clk->base.read(&clk->base, nv_clk_src_href))) {
|
||||
mastv |= 0x00000000;
|
||||
} else
|
||||
if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
|
||||
if (clk_same(dom6, clk->base.read(&clk->base, nv_clk_src_hclk))) {
|
||||
mastv |= 0x08000000;
|
||||
} else {
|
||||
freq = clk->read(clk, nv_clk_src_hclk) * 3;
|
||||
freq = calc_div(freq, dom6, &P1);
|
||||
freq = clk->base.read(&clk->base, nv_clk_src_hclk) * 3;
|
||||
calc_div(freq, dom6, &P1);
|
||||
|
||||
mastv |= 0x0c000000;
|
||||
divsv |= P1;
|
||||
@ -443,13 +440,13 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
|
||||
* sclk to hclk) before reprogramming
|
||||
*/
|
||||
if (nv_device(priv)->chipset < 0x92)
|
||||
if (nv_device(clk)->chipset < 0x92)
|
||||
clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
|
||||
else
|
||||
clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
|
||||
|
||||
/* core: for the moment at least, always use nvpll */
|
||||
freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
|
||||
freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
|
||||
if (freq == 0)
|
||||
return -ERANGE;
|
||||
|
||||
@ -467,7 +464,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
|
||||
clk_mask(hwsq, mast, 0x00100033, 0x00000023);
|
||||
} else {
|
||||
freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
|
||||
freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
|
||||
if (freq == 0)
|
||||
return -ERANGE;
|
||||
|
||||
@ -485,17 +482,17 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_clk_prog(struct nvkm_clk *clk)
|
||||
nv50_clk_prog(struct nvkm_clk *obj)
|
||||
{
|
||||
struct nv50_clk_priv *priv = (void *)clk;
|
||||
return clk_exec(&priv->hwsq, true);
|
||||
struct nv50_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
return clk_exec(&clk->hwsq, true);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_clk_tidy(struct nvkm_clk *clk)
|
||||
nv50_clk_tidy(struct nvkm_clk *obj)
|
||||
{
|
||||
struct nv50_clk_priv *priv = (void *)clk;
|
||||
clk_exec(&priv->hwsq, false);
|
||||
struct nv50_clk *clk = container_of(obj, typeof(*clk), base);
|
||||
clk_exec(&clk->hwsq, false);
|
||||
}
|
||||
|
||||
int
|
||||
@ -504,37 +501,37 @@ nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nv50_clk_oclass *pclass = (void *)oclass;
|
||||
struct nv50_clk_priv *priv;
|
||||
struct nv50_clk *clk;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
|
||||
NULL, 0, nv_device(parent)->chipset == 0xa0,
|
||||
&priv);
|
||||
*pobject = nv_object(priv);
|
||||
&clk);
|
||||
*pobject = nv_object(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->hwsq.r_fifo = hwsq_reg(0x002504);
|
||||
priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
|
||||
priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
|
||||
priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
|
||||
priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
|
||||
switch (nv_device(priv)->chipset) {
|
||||
clk->hwsq.r_fifo = hwsq_reg(0x002504);
|
||||
clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
|
||||
clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
|
||||
clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
|
||||
clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
|
||||
switch (nv_device(clk)->chipset) {
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
priv->hwsq.r_divs = hwsq_reg(0x004800);
|
||||
clk->hwsq.r_divs = hwsq_reg(0x004800);
|
||||
break;
|
||||
default:
|
||||
priv->hwsq.r_divs = hwsq_reg(0x004700);
|
||||
clk->hwsq.r_divs = hwsq_reg(0x004700);
|
||||
break;
|
||||
}
|
||||
priv->hwsq.r_mast = hwsq_reg(0x00c040);
|
||||
clk->hwsq.r_mast = hwsq_reg(0x00c040);
|
||||
|
||||
priv->base.read = nv50_clk_read;
|
||||
priv->base.calc = nv50_clk_calc;
|
||||
priv->base.prog = nv50_clk_prog;
|
||||
priv->base.tidy = nv50_clk_tidy;
|
||||
clk->base.read = nv50_clk_read;
|
||||
clk->base.calc = nv50_clk_calc;
|
||||
clk->base.prog = nv50_clk_prog;
|
||||
clk->base.tidy = nv50_clk_tidy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ struct nv50_clk_hwsq {
|
||||
struct hwsq_reg r_mast;
|
||||
};
|
||||
|
||||
struct nv50_clk_priv {
|
||||
struct nv50_clk {
|
||||
struct nvkm_clk base;
|
||||
struct nv50_clk_hwsq hwsq;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user