1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gt215.h"
25 #include "pll.h"
26 
27 #include <subdev/bios.h>
28 #include <subdev/bios/pll.h>
29 #include <subdev/timer.h>
30 
31 struct mcp77_clk {
32 	struct nvkm_clk base;
33 	enum nv_clk_src csrc, ssrc, vsrc;
34 	u32 cctrl, sctrl;
35 	u32 ccoef, scoef;
36 	u32 cpost, spost;
37 	u32 vdiv;
38 };
39 
40 static u32
41 read_div(struct mcp77_clk *clk)
42 {
43 	return nv_rd32(clk, 0x004600);
44 }
45 
46 static u32
47 read_pll(struct mcp77_clk *clk, u32 base)
48 {
49 	u32 ctrl = nv_rd32(clk, base + 0);
50 	u32 coef = nv_rd32(clk, base + 4);
51 	u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
52 	u32 post_div = 0;
53 	u32 clock = 0;
54 	int N1, M1;
55 
56 	switch (base){
57 	case 0x4020:
58 		post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
59 		break;
60 	case 0x4028:
61 		post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
62 		break;
63 	default:
64 		break;
65 	}
66 
67 	N1 = (coef & 0x0000ff00) >> 8;
68 	M1 = (coef & 0x000000ff);
69 	if ((ctrl & 0x80000000) && M1) {
70 		clock = ref * N1 / M1;
71 		clock = clock / post_div;
72 	}
73 
74 	return clock;
75 }
76 
77 static int
78 mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
79 {
80 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
81 	u32 mast = nv_rd32(clk, 0x00c054);
82 	u32 P = 0;
83 
84 	switch (src) {
85 	case nv_clk_src_crystal:
86 		return nv_device(clk)->crystal;
87 	case nv_clk_src_href:
88 		return 100000; /* PCIE reference clock */
89 	case nv_clk_src_hclkm4:
90 		return clk->base.read(&clk->base, nv_clk_src_href) * 4;
91 	case nv_clk_src_hclkm2d3:
92 		return clk->base.read(&clk->base, nv_clk_src_href) * 2 / 3;
93 	case nv_clk_src_host:
94 		switch (mast & 0x000c0000) {
95 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
96 		case 0x00040000: break;
97 		case 0x00080000: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
98 		case 0x000c0000: return clk->base.read(&clk->base, nv_clk_src_cclk);
99 		}
100 		break;
101 	case nv_clk_src_core:
102 		P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
103 
104 		switch (mast & 0x00000003) {
105 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
106 		case 0x00000001: return 0;
107 		case 0x00000002: return clk->base.read(&clk->base, nv_clk_src_hclkm4) >> P;
108 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
109 		}
110 		break;
111 	case nv_clk_src_cclk:
112 		if ((mast & 0x03000000) != 0x03000000)
113 			return clk->base.read(&clk->base, nv_clk_src_core);
114 
115 		if ((mast & 0x00000200) == 0x00000000)
116 			return clk->base.read(&clk->base, nv_clk_src_core);
117 
118 		switch (mast & 0x00000c00) {
119 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
120 		case 0x00000400: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
121 		case 0x00000800: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
122 		default: return 0;
123 		}
124 	case nv_clk_src_shader:
125 		P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
126 		switch (mast & 0x00000030) {
127 		case 0x00000000:
128 			if (mast & 0x00000040)
129 				return clk->base.read(&clk->base, nv_clk_src_href) >> P;
130 			return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
131 		case 0x00000010: break;
132 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
133 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
134 		}
135 		break;
136 	case nv_clk_src_mem:
137 		return 0;
138 		break;
139 	case nv_clk_src_vdec:
140 		P = (read_div(clk) & 0x00000700) >> 8;
141 
142 		switch (mast & 0x00400000) {
143 		case 0x00400000:
144 			return clk->base.read(&clk->base, nv_clk_src_core) >> P;
145 			break;
146 		default:
147 			return 500000 >> P;
148 			break;
149 		}
150 		break;
151 	default:
152 		break;
153 	}
154 
155 	nv_debug(clk, "unknown clock source %d 0x%08x\n", src, mast);
156 	return 0;
157 }
158 
159 static u32
160 calc_pll(struct mcp77_clk *clk, u32 reg,
161 	 u32 clock, int *N, int *M, int *P)
162 {
163 	struct nvkm_bios *bios = nvkm_bios(clk);
164 	struct nvbios_pll pll;
165 	int ret;
166 
167 	ret = nvbios_pll_parse(bios, reg, &pll);
168 	if (ret)
169 		return 0;
170 
171 	pll.vco2.max_freq = 0;
172 	pll.refclk = clk->base.read(&clk->base, nv_clk_src_href);
173 	if (!pll.refclk)
174 		return 0;
175 
176 	return nv04_pll_calc(nv_subdev(clk), &pll, clock, N, M, NULL, NULL, P);
177 }
178 
179 static inline u32
180 calc_P(u32 src, u32 target, int *div)
181 {
182 	u32 clk0 = src, clk1 = src;
183 	for (*div = 0; *div <= 7; (*div)++) {
184 		if (clk0 <= target) {
185 			clk1 = clk0 << (*div ? 1 : 0);
186 			break;
187 		}
188 		clk0 >>= 1;
189 	}
190 
191 	if (target - clk0 <= clk1 - target)
192 		return clk0;
193 	(*div)--;
194 	return clk1;
195 }
196 
197 static int
198 mcp77_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
199 {
200 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
201 	const int shader = cstate->domain[nv_clk_src_shader];
202 	const int core = cstate->domain[nv_clk_src_core];
203 	const int vdec = cstate->domain[nv_clk_src_vdec];
204 	u32 out = 0, clock = 0;
205 	int N, M, P1, P2 = 0;
206 	int divs = 0;
207 
208 	/* cclk: find suitable source, disable PLL if we can */
209 	if (core < clk->base.read(&clk->base, nv_clk_src_hclkm4))
210 		out = calc_P(clk->base.read(&clk->base, nv_clk_src_hclkm4), core, &divs);
211 
212 	/* Calculate clock * 2, so shader clock can use it too */
213 	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
214 
215 	if (abs(core - out) <= abs(core - (clock >> 1))) {
216 		clk->csrc = nv_clk_src_hclkm4;
217 		clk->cctrl = divs << 16;
218 	} else {
219 		/* NVCTRL is actually used _after_ NVPOST, and after what we
220 		 * call NVPLL. To make matters worse, NVPOST is an integer
221 		 * divider instead of a right-shift number. */
222 		if(P1 > 2) {
223 			P2 = P1 - 2;
224 			P1 = 2;
225 		}
226 
227 		clk->csrc = nv_clk_src_core;
228 		clk->ccoef = (N << 8) | M;
229 
230 		clk->cctrl = (P2 + 1) << 16;
231 		clk->cpost = (1 << P1) << 16;
232 	}
233 
234 	/* sclk: nvpll + divisor, href or spll */
235 	out = 0;
236 	if (shader == clk->base.read(&clk->base, nv_clk_src_href)) {
237 		clk->ssrc = nv_clk_src_href;
238 	} else {
239 		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
240 		if (clk->csrc == nv_clk_src_core)
241 			out = calc_P((core << 1), shader, &divs);
242 
243 		if (abs(shader - out) <=
244 		    abs(shader - clock) &&
245 		   (divs + P2) <= 7) {
246 			clk->ssrc = nv_clk_src_core;
247 			clk->sctrl = (divs + P2) << 16;
248 		} else {
249 			clk->ssrc = nv_clk_src_shader;
250 			clk->scoef = (N << 8) | M;
251 			clk->sctrl = P1 << 16;
252 		}
253 	}
254 
255 	/* vclk */
256 	out = calc_P(core, vdec, &divs);
257 	clock = calc_P(500000, vdec, &P1);
258 	if(abs(vdec - out) <= abs(vdec - clock)) {
259 		clk->vsrc = nv_clk_src_cclk;
260 		clk->vdiv = divs << 16;
261 	} else {
262 		clk->vsrc = nv_clk_src_vdec;
263 		clk->vdiv = P1 << 16;
264 	}
265 
266 	/* Print strategy! */
267 	nv_debug(clk, "nvpll: %08x %08x %08x\n",
268 			clk->ccoef, clk->cpost, clk->cctrl);
269 	nv_debug(clk, " spll: %08x %08x %08x\n",
270 			clk->scoef, clk->spost, clk->sctrl);
271 	nv_debug(clk, " vdiv: %08x\n", clk->vdiv);
272 	if (clk->csrc == nv_clk_src_hclkm4)
273 		nv_debug(clk, "core: hrefm4\n");
274 	else
275 		nv_debug(clk, "core: nvpll\n");
276 
277 	if (clk->ssrc == nv_clk_src_hclkm4)
278 		nv_debug(clk, "shader: hrefm4\n");
279 	else if (clk->ssrc == nv_clk_src_core)
280 		nv_debug(clk, "shader: nvpll\n");
281 	else
282 		nv_debug(clk, "shader: spll\n");
283 
284 	if (clk->vsrc == nv_clk_src_hclkm4)
285 		nv_debug(clk, "vdec: 500MHz\n");
286 	else
287 		nv_debug(clk, "vdec: core\n");
288 
289 	return 0;
290 }
291 
292 static int
293 mcp77_clk_prog(struct nvkm_clk *obj)
294 {
295 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
296 	u32 pllmask = 0, mast;
297 	unsigned long flags;
298 	unsigned long *f = &flags;
299 	int ret = 0;
300 
301 	ret = gt215_clk_pre(&clk->base, f);
302 	if (ret)
303 		goto out;
304 
305 	/* First switch to safe clocks: href */
306 	mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
307 	mast &= ~0x00400e73;
308 	mast |= 0x03000000;
309 
310 	switch (clk->csrc) {
311 	case nv_clk_src_hclkm4:
312 		nv_mask(clk, 0x4028, 0x00070000, clk->cctrl);
313 		mast |= 0x00000002;
314 		break;
315 	case nv_clk_src_core:
316 		nv_wr32(clk, 0x402c, clk->ccoef);
317 		nv_wr32(clk, 0x4028, 0x80000000 | clk->cctrl);
318 		nv_wr32(clk, 0x4040, clk->cpost);
319 		pllmask |= (0x3 << 8);
320 		mast |= 0x00000003;
321 		break;
322 	default:
323 		nv_warn(clk,"Reclocking failed: unknown core clock\n");
324 		goto resume;
325 	}
326 
327 	switch (clk->ssrc) {
328 	case nv_clk_src_href:
329 		nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
330 		/* mast |= 0x00000000; */
331 		break;
332 	case nv_clk_src_core:
333 		nv_mask(clk, 0x4020, 0x00070000, clk->sctrl);
334 		mast |= 0x00000020;
335 		break;
336 	case nv_clk_src_shader:
337 		nv_wr32(clk, 0x4024, clk->scoef);
338 		nv_wr32(clk, 0x4020, 0x80000000 | clk->sctrl);
339 		nv_wr32(clk, 0x4070, clk->spost);
340 		pllmask |= (0x3 << 12);
341 		mast |= 0x00000030;
342 		break;
343 	default:
344 		nv_warn(clk,"Reclocking failed: unknown sclk clock\n");
345 		goto resume;
346 	}
347 
348 	if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
349 		nv_warn(clk,"Reclocking failed: unstable PLLs\n");
350 		goto resume;
351 	}
352 
353 	switch (clk->vsrc) {
354 	case nv_clk_src_cclk:
355 		mast |= 0x00400000;
356 	default:
357 		nv_wr32(clk, 0x4600, clk->vdiv);
358 	}
359 
360 	nv_wr32(clk, 0xc054, mast);
361 
362 resume:
363 	/* Disable some PLLs and dividers when unused */
364 	if (clk->csrc != nv_clk_src_core) {
365 		nv_wr32(clk, 0x4040, 0x00000000);
366 		nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
367 	}
368 
369 	if (clk->ssrc != nv_clk_src_shader) {
370 		nv_wr32(clk, 0x4070, 0x00000000);
371 		nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
372 	}
373 
374 out:
375 	if (ret == -EBUSY)
376 		f = NULL;
377 
378 	gt215_clk_post(&clk->base, f);
379 	return ret;
380 }
381 
382 static void
383 mcp77_clk_tidy(struct nvkm_clk *obj)
384 {
385 }
386 
387 static struct nvkm_domain
388 mcp77_domains[] = {
389 	{ nv_clk_src_crystal, 0xff },
390 	{ nv_clk_src_href   , 0xff },
391 	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
392 	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
393 	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
394 	{ nv_clk_src_max }
395 };
396 
397 static int
398 mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
399 	       struct nvkm_oclass *oclass, void *data, u32 size,
400 	       struct nvkm_object **pobject)
401 {
402 	struct mcp77_clk *clk;
403 	int ret;
404 
405 	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
406 			      NULL, 0, true, &clk);
407 	*pobject = nv_object(clk);
408 	if (ret)
409 		return ret;
410 
411 	clk->base.read = mcp77_clk_read;
412 	clk->base.calc = mcp77_clk_calc;
413 	clk->base.prog = mcp77_clk_prog;
414 	clk->base.tidy = mcp77_clk_tidy;
415 	return 0;
416 }
417 
418 struct nvkm_oclass *
419 mcp77_clk_oclass = &(struct nvkm_oclass) {
420 	.handle = NV_SUBDEV(CLK, 0xaa),
421 	.ofuncs = &(struct nvkm_ofuncs) {
422 		.ctor = mcp77_clk_ctor,
423 		.dtor = _nvkm_clk_dtor,
424 		.init = _nvkm_clk_init,
425 		.fini = _nvkm_clk_fini,
426 	},
427 };
428