1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gt215.h"
25 #include "pll.h"
26 
27 #include <subdev/bios.h>
28 #include <subdev/bios/pll.h>
29 #include <subdev/timer.h>
30 
31 struct mcp77_clk {
32 	struct nvkm_clk base;
33 	enum nv_clk_src csrc, ssrc, vsrc;
34 	u32 cctrl, sctrl;
35 	u32 ccoef, scoef;
36 	u32 cpost, spost;
37 	u32 vdiv;
38 };
39 
40 static u32
41 read_div(struct mcp77_clk *clk)
42 {
43 	struct nvkm_device *device = clk->base.subdev.device;
44 	return nvkm_rd32(device, 0x004600);
45 }
46 
47 static u32
48 read_pll(struct mcp77_clk *clk, u32 base)
49 {
50 	struct nvkm_device *device = clk->base.subdev.device;
51 	u32 ctrl = nvkm_rd32(device, base + 0);
52 	u32 coef = nvkm_rd32(device, base + 4);
53 	u32 ref = clk->base.read(&clk->base, nv_clk_src_href);
54 	u32 post_div = 0;
55 	u32 clock = 0;
56 	int N1, M1;
57 
58 	switch (base){
59 	case 0x4020:
60 		post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
61 		break;
62 	case 0x4028:
63 		post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
64 		break;
65 	default:
66 		break;
67 	}
68 
69 	N1 = (coef & 0x0000ff00) >> 8;
70 	M1 = (coef & 0x000000ff);
71 	if ((ctrl & 0x80000000) && M1) {
72 		clock = ref * N1 / M1;
73 		clock = clock / post_div;
74 	}
75 
76 	return clock;
77 }
78 
79 static int
80 mcp77_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
81 {
82 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
83 	struct nvkm_subdev *subdev = &clk->base.subdev;
84 	struct nvkm_device *device = subdev->device;
85 	u32 mast = nvkm_rd32(device, 0x00c054);
86 	u32 P = 0;
87 
88 	switch (src) {
89 	case nv_clk_src_crystal:
90 		return device->crystal;
91 	case nv_clk_src_href:
92 		return 100000; /* PCIE reference clock */
93 	case nv_clk_src_hclkm4:
94 		return clk->base.read(&clk->base, nv_clk_src_href) * 4;
95 	case nv_clk_src_hclkm2d3:
96 		return clk->base.read(&clk->base, nv_clk_src_href) * 2 / 3;
97 	case nv_clk_src_host:
98 		switch (mast & 0x000c0000) {
99 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
100 		case 0x00040000: break;
101 		case 0x00080000: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
102 		case 0x000c0000: return clk->base.read(&clk->base, nv_clk_src_cclk);
103 		}
104 		break;
105 	case nv_clk_src_core:
106 		P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
107 
108 		switch (mast & 0x00000003) {
109 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
110 		case 0x00000001: return 0;
111 		case 0x00000002: return clk->base.read(&clk->base, nv_clk_src_hclkm4) >> P;
112 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
113 		}
114 		break;
115 	case nv_clk_src_cclk:
116 		if ((mast & 0x03000000) != 0x03000000)
117 			return clk->base.read(&clk->base, nv_clk_src_core);
118 
119 		if ((mast & 0x00000200) == 0x00000000)
120 			return clk->base.read(&clk->base, nv_clk_src_core);
121 
122 		switch (mast & 0x00000c00) {
123 		case 0x00000000: return clk->base.read(&clk->base, nv_clk_src_href);
124 		case 0x00000400: return clk->base.read(&clk->base, nv_clk_src_hclkm4);
125 		case 0x00000800: return clk->base.read(&clk->base, nv_clk_src_hclkm2d3);
126 		default: return 0;
127 		}
128 	case nv_clk_src_shader:
129 		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
130 		switch (mast & 0x00000030) {
131 		case 0x00000000:
132 			if (mast & 0x00000040)
133 				return clk->base.read(&clk->base, nv_clk_src_href) >> P;
134 			return clk->base.read(&clk->base, nv_clk_src_crystal) >> P;
135 		case 0x00000010: break;
136 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
137 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
138 		}
139 		break;
140 	case nv_clk_src_mem:
141 		return 0;
142 		break;
143 	case nv_clk_src_vdec:
144 		P = (read_div(clk) & 0x00000700) >> 8;
145 
146 		switch (mast & 0x00400000) {
147 		case 0x00400000:
148 			return clk->base.read(&clk->base, nv_clk_src_core) >> P;
149 			break;
150 		default:
151 			return 500000 >> P;
152 			break;
153 		}
154 		break;
155 	default:
156 		break;
157 	}
158 
159 	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
160 	return 0;
161 }
162 
163 static u32
164 calc_pll(struct mcp77_clk *clk, u32 reg,
165 	 u32 clock, int *N, int *M, int *P)
166 {
167 	struct nvkm_bios *bios = nvkm_bios(clk);
168 	struct nvbios_pll pll;
169 	int ret;
170 
171 	ret = nvbios_pll_parse(bios, reg, &pll);
172 	if (ret)
173 		return 0;
174 
175 	pll.vco2.max_freq = 0;
176 	pll.refclk = clk->base.read(&clk->base, nv_clk_src_href);
177 	if (!pll.refclk)
178 		return 0;
179 
180 	return nv04_pll_calc(nv_subdev(clk), &pll, clock, N, M, NULL, NULL, P);
181 }
182 
183 static inline u32
184 calc_P(u32 src, u32 target, int *div)
185 {
186 	u32 clk0 = src, clk1 = src;
187 	for (*div = 0; *div <= 7; (*div)++) {
188 		if (clk0 <= target) {
189 			clk1 = clk0 << (*div ? 1 : 0);
190 			break;
191 		}
192 		clk0 >>= 1;
193 	}
194 
195 	if (target - clk0 <= clk1 - target)
196 		return clk0;
197 	(*div)--;
198 	return clk1;
199 }
200 
201 static int
202 mcp77_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
203 {
204 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
205 	const int shader = cstate->domain[nv_clk_src_shader];
206 	const int core = cstate->domain[nv_clk_src_core];
207 	const int vdec = cstate->domain[nv_clk_src_vdec];
208 	struct nvkm_subdev *subdev = &clk->base.subdev;
209 	u32 out = 0, clock = 0;
210 	int N, M, P1, P2 = 0;
211 	int divs = 0;
212 
213 	/* cclk: find suitable source, disable PLL if we can */
214 	if (core < clk->base.read(&clk->base, nv_clk_src_hclkm4))
215 		out = calc_P(clk->base.read(&clk->base, nv_clk_src_hclkm4), core, &divs);
216 
217 	/* Calculate clock * 2, so shader clock can use it too */
218 	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
219 
220 	if (abs(core - out) <= abs(core - (clock >> 1))) {
221 		clk->csrc = nv_clk_src_hclkm4;
222 		clk->cctrl = divs << 16;
223 	} else {
224 		/* NVCTRL is actually used _after_ NVPOST, and after what we
225 		 * call NVPLL. To make matters worse, NVPOST is an integer
226 		 * divider instead of a right-shift number. */
227 		if(P1 > 2) {
228 			P2 = P1 - 2;
229 			P1 = 2;
230 		}
231 
232 		clk->csrc = nv_clk_src_core;
233 		clk->ccoef = (N << 8) | M;
234 
235 		clk->cctrl = (P2 + 1) << 16;
236 		clk->cpost = (1 << P1) << 16;
237 	}
238 
239 	/* sclk: nvpll + divisor, href or spll */
240 	out = 0;
241 	if (shader == clk->base.read(&clk->base, nv_clk_src_href)) {
242 		clk->ssrc = nv_clk_src_href;
243 	} else {
244 		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
245 		if (clk->csrc == nv_clk_src_core)
246 			out = calc_P((core << 1), shader, &divs);
247 
248 		if (abs(shader - out) <=
249 		    abs(shader - clock) &&
250 		   (divs + P2) <= 7) {
251 			clk->ssrc = nv_clk_src_core;
252 			clk->sctrl = (divs + P2) << 16;
253 		} else {
254 			clk->ssrc = nv_clk_src_shader;
255 			clk->scoef = (N << 8) | M;
256 			clk->sctrl = P1 << 16;
257 		}
258 	}
259 
260 	/* vclk */
261 	out = calc_P(core, vdec, &divs);
262 	clock = calc_P(500000, vdec, &P1);
263 	if(abs(vdec - out) <= abs(vdec - clock)) {
264 		clk->vsrc = nv_clk_src_cclk;
265 		clk->vdiv = divs << 16;
266 	} else {
267 		clk->vsrc = nv_clk_src_vdec;
268 		clk->vdiv = P1 << 16;
269 	}
270 
271 	/* Print strategy! */
272 	nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
273 		   clk->ccoef, clk->cpost, clk->cctrl);
274 	nvkm_debug(subdev, " spll: %08x %08x %08x\n",
275 		   clk->scoef, clk->spost, clk->sctrl);
276 	nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
277 	if (clk->csrc == nv_clk_src_hclkm4)
278 		nvkm_debug(subdev, "core: hrefm4\n");
279 	else
280 		nvkm_debug(subdev, "core: nvpll\n");
281 
282 	if (clk->ssrc == nv_clk_src_hclkm4)
283 		nvkm_debug(subdev, "shader: hrefm4\n");
284 	else if (clk->ssrc == nv_clk_src_core)
285 		nvkm_debug(subdev, "shader: nvpll\n");
286 	else
287 		nvkm_debug(subdev, "shader: spll\n");
288 
289 	if (clk->vsrc == nv_clk_src_hclkm4)
290 		nvkm_debug(subdev, "vdec: 500MHz\n");
291 	else
292 		nvkm_debug(subdev, "vdec: core\n");
293 
294 	return 0;
295 }
296 
297 static int
298 mcp77_clk_prog(struct nvkm_clk *obj)
299 {
300 	struct mcp77_clk *clk = container_of(obj, typeof(*clk), base);
301 	struct nvkm_subdev *subdev = &clk->base.subdev;
302 	struct nvkm_device *device = subdev->device;
303 	u32 pllmask = 0, mast;
304 	unsigned long flags;
305 	unsigned long *f = &flags;
306 	int ret = 0;
307 
308 	ret = gt215_clk_pre(&clk->base, f);
309 	if (ret)
310 		goto out;
311 
312 	/* First switch to safe clocks: href */
313 	mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
314 	mast &= ~0x00400e73;
315 	mast |= 0x03000000;
316 
317 	switch (clk->csrc) {
318 	case nv_clk_src_hclkm4:
319 		nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
320 		mast |= 0x00000002;
321 		break;
322 	case nv_clk_src_core:
323 		nvkm_wr32(device, 0x402c, clk->ccoef);
324 		nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
325 		nvkm_wr32(device, 0x4040, clk->cpost);
326 		pllmask |= (0x3 << 8);
327 		mast |= 0x00000003;
328 		break;
329 	default:
330 		nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
331 		goto resume;
332 	}
333 
334 	switch (clk->ssrc) {
335 	case nv_clk_src_href:
336 		nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
337 		/* mast |= 0x00000000; */
338 		break;
339 	case nv_clk_src_core:
340 		nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
341 		mast |= 0x00000020;
342 		break;
343 	case nv_clk_src_shader:
344 		nvkm_wr32(device, 0x4024, clk->scoef);
345 		nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
346 		nvkm_wr32(device, 0x4070, clk->spost);
347 		pllmask |= (0x3 << 12);
348 		mast |= 0x00000030;
349 		break;
350 	default:
351 		nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
352 		goto resume;
353 	}
354 
355 	if (nvkm_msec(device, 2000,
356 		u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
357 		if (tmp == pllmask)
358 			break;
359 	) < 0)
360 		goto resume;
361 
362 	switch (clk->vsrc) {
363 	case nv_clk_src_cclk:
364 		mast |= 0x00400000;
365 	default:
366 		nvkm_wr32(device, 0x4600, clk->vdiv);
367 	}
368 
369 	nvkm_wr32(device, 0xc054, mast);
370 
371 resume:
372 	/* Disable some PLLs and dividers when unused */
373 	if (clk->csrc != nv_clk_src_core) {
374 		nvkm_wr32(device, 0x4040, 0x00000000);
375 		nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
376 	}
377 
378 	if (clk->ssrc != nv_clk_src_shader) {
379 		nvkm_wr32(device, 0x4070, 0x00000000);
380 		nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
381 	}
382 
383 out:
384 	if (ret == -EBUSY)
385 		f = NULL;
386 
387 	gt215_clk_post(&clk->base, f);
388 	return ret;
389 }
390 
391 static void
392 mcp77_clk_tidy(struct nvkm_clk *obj)
393 {
394 }
395 
396 static struct nvkm_domain
397 mcp77_domains[] = {
398 	{ nv_clk_src_crystal, 0xff },
399 	{ nv_clk_src_href   , 0xff },
400 	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
401 	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
402 	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
403 	{ nv_clk_src_max }
404 };
405 
406 static int
407 mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
408 	       struct nvkm_oclass *oclass, void *data, u32 size,
409 	       struct nvkm_object **pobject)
410 {
411 	struct mcp77_clk *clk;
412 	int ret;
413 
414 	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
415 			      NULL, 0, true, &clk);
416 	*pobject = nv_object(clk);
417 	if (ret)
418 		return ret;
419 
420 	clk->base.read = mcp77_clk_read;
421 	clk->base.calc = mcp77_clk_calc;
422 	clk->base.prog = mcp77_clk_prog;
423 	clk->base.tidy = mcp77_clk_tidy;
424 	return 0;
425 }
426 
427 struct nvkm_oclass *
428 mcp77_clk_oclass = &(struct nvkm_oclass) {
429 	.handle = NV_SUBDEV(CLK, 0xaa),
430 	.ofuncs = &(struct nvkm_ofuncs) {
431 		.ctor = mcp77_clk_ctor,
432 		.dtor = _nvkm_clk_dtor,
433 		.init = _nvkm_clk_init,
434 		.fini = _nvkm_clk_fini,
435 	},
436 };
437