1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gt215.h"
25 #include "pll.h"
26 
27 #include <subdev/bios.h>
28 #include <subdev/bios/pll.h>
29 #include <subdev/timer.h>
30 
31 struct mcp77_clk {
32 	struct nvkm_clk base;
33 	enum nv_clk_src csrc, ssrc, vsrc;
34 	u32 cctrl, sctrl;
35 	u32 ccoef, scoef;
36 	u32 cpost, spost;
37 	u32 vdiv;
38 };
39 
40 static u32
41 read_div(struct mcp77_clk *clk)
42 {
43 	struct nvkm_device *device = clk->base.subdev.device;
44 	return nvkm_rd32(device, 0x004600);
45 }
46 
47 static u32
48 read_pll(struct mcp77_clk *clk, u32 base)
49 {
50 	struct nvkm_device *device = clk->base.subdev.device;
51 	u32 ctrl = nvkm_rd32(device, base + 0);
52 	u32 coef = nvkm_rd32(device, base + 4);
53 	u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
54 	u32 post_div = 0;
55 	u32 clock = 0;
56 	int N1, M1;
57 
58 	switch (base){
59 	case 0x4020:
60 		post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
61 		break;
62 	case 0x4028:
63 		post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
64 		break;
65 	default:
66 		break;
67 	}
68 
69 	N1 = (coef & 0x0000ff00) >> 8;
70 	M1 = (coef & 0x000000ff);
71 	if ((ctrl & 0x80000000) && M1) {
72 		clock = ref * N1 / M1;
73 		clock = clock / post_div;
74 	}
75 
76 	return clock;
77 }
78 
79 static int
80 mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
81 {
82 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
83 	struct nvkm_device *device = clk->base.subdev.device;
84 	u32 mast = nvkm_rd32(device, 0x00c054);
85 	u32 P = 0;
86 
87 	switch (src) {
88 	case nv_clk_src_crystal:
89 		return device->crystal;
90 	case nv_clk_src_href:
91 		return 100000; /* PCIE reference clock */
92 	case nv_clk_src_hclkm4:
93 		return clk->base.read(&clk->base, nv_clk_src_href) * 4;
94 	case nv_clk_src_hclkm2d3:
95 		return clk->base.read(&clk->base, nv_clk_src_href) * 2 / 3;
96 	case nv_clk_src_host:
97 		switch (mast & 0x000c0000) {
98 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
99 		case 0x00040000: break;
100 		case 0x00080000: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
101 		case 0x000c0000: return clk->base.read(&clk->base, nv_clk_src_cclk);
102 		}
103 		break;
104 	case nv_clk_src_core:
105 		P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
106 
107 		switch (mast & 0x00000003) {
108 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
109 		case 0x00000001: return 0;
110 		case 0x00000002: return clk->base.read(&clk->base, nv_clk_src_hclkm4) >> P;
111 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
112 		}
113 		break;
114 	case nv_clk_src_cclk:
115 		if ((mast & 0x03000000) != 0x03000000)
116 			return clk->base.read(&clk->base, nv_clk_src_core);
117 
118 		if ((mast & 0x00000200) == 0x00000000)
119 			return clk->base.read(&clk->base, nv_clk_src_core);
120 
121 		switch (mast & 0x00000c00) {
122 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
123 		case 0x00000400: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
124 		case 0x00000800: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
125 		default: return 0;
126 		}
127 	case nv_clk_src_shader:
128 		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
129 		switch (mast & 0x00000030) {
130 		case 0x00000000:
131 			if (mast & 0x00000040)
132 				return clk->base.read(&clk->base, nv_clk_src_href) >> P;
133 			return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
134 		case 0x00000010: break;
135 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
136 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
137 		}
138 		break;
139 	case nv_clk_src_mem:
140 		return 0;
141 		break;
142 	case nv_clk_src_vdec:
143 		P = (read_div(clk) & 0x00000700) >> 8;
144 
145 		switch (mast & 0x00400000) {
146 		case 0x00400000:
147 			return clk->base.read(&clk->base, nv_clk_src_core) >> P;
148 			break;
149 		default:
150 			return 500000 >> P;
151 			break;
152 		}
153 		break;
154 	default:
155 		break;
156 	}
157 
158 	nv_debug(clk, "unknown clock source %d 0x%08x\n", src, mast);
159 	return 0;
160 }
161 
162 static u32
163 calc_pll(struct mcp77_clk *clk, u32 reg,
164 	 u32 clock, int *N, int *M, int *P)
165 {
166 	struct nvkm_bios *bios = nvkm_bios(clk);
167 	struct nvbios_pll pll;
168 	int ret;
169 
170 	ret = nvbios_pll_parse(bios, reg, &pll);
171 	if (ret)
172 		return 0;
173 
174 	pll.vco2.max_freq = 0;
175 	pll.refclk = clk->base.read(&clk->base, nv_clk_src_href);
176 	if (!pll.refclk)
177 		return 0;
178 
179 	return nv04_pll_calc(nv_subdev(clk), &pll, clock, N, M, NULL, NULL, P);
180 }
181 
182 static inline u32
183 calc_P(u32 src, u32 target, int *div)
184 {
185 	u32 clk0 = src, clk1 = src;
186 	for (*div = 0; *div <= 7; (*div)++) {
187 		if (clk0 <= target) {
188 			clk1 = clk0 << (*div ? 1 : 0);
189 			break;
190 		}
191 		clk0 >>= 1;
192 	}
193 
194 	if (target - clk0 <= clk1 - target)
195 		return clk0;
196 	(*div)--;
197 	return clk1;
198 }
199 
200 static int
201 mcp77_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
202 {
203 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
204 	const int shader = cstate->domain[nv_clk_src_shader];
205 	const int core = cstate->domain[nv_clk_src_core];
206 	const int vdec = cstate->domain[nv_clk_src_vdec];
207 	u32 out = 0, clock = 0;
208 	int N, M, P1, P2 = 0;
209 	int divs = 0;
210 
211 	/* cclk: find suitable source, disable PLL if we can */
212 	if (core < clk->base.read(&clk->base, nv_clk_src_hclkm4))
213 		out = calc_P(clk->base.read(&clk->base, nv_clk_src_hclkm4), core, &divs);
214 
215 	/* Calculate clock * 2, so shader clock can use it too */
216 	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
217 
218 	if (abs(core - out) <= abs(core - (clock >> 1))) {
219 		clk->csrc = nv_clk_src_hclkm4;
220 		clk->cctrl = divs << 16;
221 	} else {
222 		/* NVCTRL is actually used _after_ NVPOST, and after what we
223 		 * call NVPLL. To make matters worse, NVPOST is an integer
224 		 * divider instead of a right-shift number. */
225 		if(P1 > 2) {
226 			P2 = P1 - 2;
227 			P1 = 2;
228 		}
229 
230 		clk->csrc = nv_clk_src_core;
231 		clk->ccoef = (N << 8) | M;
232 
233 		clk->cctrl = (P2 + 1) << 16;
234 		clk->cpost = (1 << P1) << 16;
235 	}
236 
237 	/* sclk: nvpll + divisor, href or spll */
238 	out = 0;
239 	if (shader == clk->base.read(&clk->base, nv_clk_src_href)) {
240 		clk->ssrc = nv_clk_src_href;
241 	} else {
242 		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
243 		if (clk->csrc == nv_clk_src_core)
244 			out = calc_P((core << 1), shader, &divs);
245 
246 		if (abs(shader - out) <=
247 		    abs(shader - clock) &&
248 		   (divs + P2) <= 7) {
249 			clk->ssrc = nv_clk_src_core;
250 			clk->sctrl = (divs + P2) << 16;
251 		} else {
252 			clk->ssrc = nv_clk_src_shader;
253 			clk->scoef = (N << 8) | M;
254 			clk->sctrl = P1 << 16;
255 		}
256 	}
257 
258 	/* vclk */
259 	out = calc_P(core, vdec, &divs);
260 	clock = calc_P(500000, vdec, &P1);
261 	if(abs(vdec - out) <= abs(vdec - clock)) {
262 		clk->vsrc = nv_clk_src_cclk;
263 		clk->vdiv = divs << 16;
264 	} else {
265 		clk->vsrc = nv_clk_src_vdec;
266 		clk->vdiv = P1 << 16;
267 	}
268 
269 	/* Print strategy! */
270 	nv_debug(clk, "nvpll: %08x %08x %08x\n",
271 			clk->ccoef, clk->cpost, clk->cctrl);
272 	nv_debug(clk, " spll: %08x %08x %08x\n",
273 			clk->scoef, clk->spost, clk->sctrl);
274 	nv_debug(clk, " vdiv: %08x\n", clk->vdiv);
275 	if (clk->csrc == nv_clk_src_hclkm4)
276 		nv_debug(clk, "core: hrefm4\n");
277 	else
278 		nv_debug(clk, "core: nvpll\n");
279 
280 	if (clk->ssrc == nv_clk_src_hclkm4)
281 		nv_debug(clk, "shader: hrefm4\n");
282 	else if (clk->ssrc == nv_clk_src_core)
283 		nv_debug(clk, "shader: nvpll\n");
284 	else
285 		nv_debug(clk, "shader: spll\n");
286 
287 	if (clk->vsrc == nv_clk_src_hclkm4)
288 		nv_debug(clk, "vdec: 500MHz\n");
289 	else
290 		nv_debug(clk, "vdec: core\n");
291 
292 	return 0;
293 }
294 
295 static int
296 mcp77_clk_prog(struct nvkm_clk *obj)
297 {
298 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
299 	struct nvkm_device *device = clk->base.subdev.device;
300 	u32 pllmask = 0, mast;
301 	unsigned long flags;
302 	unsigned long *f = &flags;
303 	int ret = 0;
304 
305 	ret = gt215_clk_pre(&clk->base, f);
306 	if (ret)
307 		goto out;
308 
309 	/* First switch to safe clocks: href */
310 	mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
311 	mast &= ~0x00400e73;
312 	mast |= 0x03000000;
313 
314 	switch (clk->csrc) {
315 	case nv_clk_src_hclkm4:
316 		nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
317 		mast |= 0x00000002;
318 		break;
319 	case nv_clk_src_core:
320 		nvkm_wr32(device, 0x402c, clk->ccoef);
321 		nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
322 		nvkm_wr32(device, 0x4040, clk->cpost);
323 		pllmask |= (0x3 << 8);
324 		mast |= 0x00000003;
325 		break;
326 	default:
327 		nv_warn(clk,"Reclocking failed: unknown core clock\n");
328 		goto resume;
329 	}
330 
331 	switch (clk->ssrc) {
332 	case nv_clk_src_href:
333 		nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
334 		/* mast |= 0x00000000; */
335 		break;
336 	case nv_clk_src_core:
337 		nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
338 		mast |= 0x00000020;
339 		break;
340 	case nv_clk_src_shader:
341 		nvkm_wr32(device, 0x4024, clk->scoef);
342 		nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
343 		nvkm_wr32(device, 0x4070, clk->spost);
344 		pllmask |= (0x3 << 12);
345 		mast |= 0x00000030;
346 		break;
347 	default:
348 		nv_warn(clk,"Reclocking failed: unknown sclk clock\n");
349 		goto resume;
350 	}
351 
352 	if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
353 		nv_warn(clk,"Reclocking failed: unstable PLLs\n");
354 		goto resume;
355 	}
356 
357 	switch (clk->vsrc) {
358 	case nv_clk_src_cclk:
359 		mast |= 0x00400000;
360 	default:
361 		nvkm_wr32(device, 0x4600, clk->vdiv);
362 	}
363 
364 	nvkm_wr32(device, 0xc054, mast);
365 
366 resume:
367 	/* Disable some PLLs and dividers when unused */
368 	if (clk->csrc != nv_clk_src_core) {
369 		nvkm_wr32(device, 0x4040, 0x00000000);
370 		nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
371 	}
372 
373 	if (clk->ssrc != nv_clk_src_shader) {
374 		nvkm_wr32(device, 0x4070, 0x00000000);
375 		nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
376 	}
377 
378 out:
379 	if (ret == -EBUSY)
380 		f = NULL;
381 
382 	gt215_clk_post(&clk->base, f);
383 	return ret;
384 }
385 
386 static void
387 mcp77_clk_tidy(struct nvkm_clk *obj)
388 {
389 }
390 
391 static struct nvkm_domain
392 mcp77_domains[] = {
393 	{ nv_clk_src_crystal, 0xff },
394 	{ nv_clk_src_href   , 0xff },
395 	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
396 	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
397 	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
398 	{ nv_clk_src_max }
399 };
400 
401 static int
402 mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
403 	       struct nvkm_oclass *oclass, void *data, u32 size,
404 	       struct nvkm_object **pobject)
405 {
406 	struct mcp77_clk *clk;
407 	int ret;
408 
409 	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
410 			      NULL, 0, true, &clk);
411 	*pobject = nv_object(clk);
412 	if (ret)
413 		return ret;
414 
415 	clk->base.read = mcp77_clk_read;
416 	clk->base.calc = mcp77_clk_calc;
417 	clk->base.prog = mcp77_clk_prog;
418 	clk->base.tidy = mcp77_clk_tidy;
419 	return 0;
420 }
421 
422 struct nvkm_oclass *
423 mcp77_clk_oclass = &(struct nvkm_oclass) {
424 	.handle = NV_SUBDEV(CLK, 0xaa),
425 	.ofuncs = &(struct nvkm_ofuncs) {
426 		.ctor = mcp77_clk_ctor,
427 		.dtor = _nvkm_clk_dtor,
428 		.init = _nvkm_clk_init,
429 		.fini = _nvkm_clk_fini,
430 	},
431 };
432