1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  *          Roy Spliet
24  */
25 #include "gt215.h"
26 #include "pll.h"
27 
28 #include <engine/fifo.h>
29 #include <subdev/bios.h>
30 #include <subdev/bios/pll.h>
31 #include <subdev/timer.h>
32 
33 struct gt215_clk {
34 	struct nvkm_clk base;
35 	struct gt215_clk_info eng[nv_clk_src_max];
36 };
37 
38 static u32 read_clk(struct gt215_clk *, int, bool);
39 static u32 read_pll(struct gt215_clk *, int, u32);
40 
41 static u32
42 read_vco(struct gt215_clk *clk, int idx)
43 {
44 	struct nvkm_device *device = clk->base.subdev.device;
45 	u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
46 
47 	switch (sctl & 0x00000030) {
48 	case 0x00000000:
49 		return device->crystal;
50 	case 0x00000020:
51 		return read_pll(clk, 0x41, 0x00e820);
52 	case 0x00000030:
53 		return read_pll(clk, 0x42, 0x00e8a0);
54 	default:
55 		return 0;
56 	}
57 }
58 
59 static u32
60 read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
61 {
62 	struct nvkm_device *device = clk->base.subdev.device;
63 	u32 sctl, sdiv, sclk;
64 
65 	/* refclk for the 0xe8xx plls is a fixed frequency */
66 	if (idx >= 0x40) {
67 		if (device->chipset == 0xaf) {
68 			/* no joke.. seriously.. sigh.. */
69 			return nvkm_rd32(device, 0x00471c) * 1000;
70 		}
71 
72 		return device->crystal;
73 	}
74 
75 	sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
76 	if (!ignore_en && !(sctl & 0x00000100))
77 		return 0;
78 
79 	/* out_alt */
80 	if (sctl & 0x00000400)
81 		return 108000;
82 
83 	/* vco_out */
84 	switch (sctl & 0x00003000) {
85 	case 0x00000000:
86 		if (!(sctl & 0x00000200))
87 			return device->crystal;
88 		return 0;
89 	case 0x00002000:
90 		if (sctl & 0x00000040)
91 			return 108000;
92 		return 100000;
93 	case 0x00003000:
94 		/* vco_enable */
95 		if (!(sctl & 0x00000001))
96 			return 0;
97 
98 		sclk = read_vco(clk, idx);
99 		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
100 		return (sclk * 2) / sdiv;
101 	default:
102 		return 0;
103 	}
104 }
105 
106 static u32
107 read_pll(struct gt215_clk *clk, int idx, u32 pll)
108 {
109 	struct nvkm_device *device = clk->base.subdev.device;
110 	u32 ctrl = nvkm_rd32(device, pll + 0);
111 	u32 sclk = 0, P = 1, N = 1, M = 1;
112 
113 	if (!(ctrl & 0x00000008)) {
114 		if (ctrl & 0x00000001) {
115 			u32 coef = nvkm_rd32(device, pll + 4);
116 			M = (coef & 0x000000ff) >> 0;
117 			N = (coef & 0x0000ff00) >> 8;
118 			P = (coef & 0x003f0000) >> 16;
119 
120 			/* no post-divider on these..
121 			 * XXX: it looks more like two post-"dividers" that
122 			 * cross each other out in the default RPLL config */
123 			if ((pll & 0x00ff00) == 0x00e800)
124 				P = 1;
125 
126 			sclk = read_clk(clk, 0x00 + idx, false);
127 		}
128 	} else {
129 		sclk = read_clk(clk, 0x10 + idx, false);
130 	}
131 
132 	if (M * P)
133 		return sclk * N / (M * P);
134 
135 	return 0;
136 }
137 
138 static int
139 gt215_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
140 {
141 	struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
142 	struct nvkm_subdev *subdev = &clk->base.subdev;
143 	struct nvkm_device *device = subdev->device;
144 	u32 hsrc;
145 
146 	switch (src) {
147 	case nv_clk_src_crystal:
148 		return device->crystal;
149 	case nv_clk_src_core:
150 	case nv_clk_src_core_intm:
151 		return read_pll(clk, 0x00, 0x4200);
152 	case nv_clk_src_shader:
153 		return read_pll(clk, 0x01, 0x4220);
154 	case nv_clk_src_mem:
155 		return read_pll(clk, 0x02, 0x4000);
156 	case nv_clk_src_disp:
157 		return read_clk(clk, 0x20, false);
158 	case nv_clk_src_vdec:
159 		return read_clk(clk, 0x21, false);
160 	case nv_clk_src_daemon:
161 		return read_clk(clk, 0x25, false);
162 	case nv_clk_src_host:
163 		hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
164 		switch (hsrc) {
165 		case 0:
166 			return read_clk(clk, 0x1d, false);
167 		case 2:
168 		case 3:
169 			return 277000;
170 		default:
171 			nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
172 			return -EINVAL;
173 		}
174 	default:
175 		nvkm_error(subdev, "invalid clock source %d\n", src);
176 		return -EINVAL;
177 	}
178 
179 	return 0;
180 }
181 
182 int
183 gt215_clk_info(struct nvkm_clk *obj, int idx, u32 khz,
184 	       struct gt215_clk_info *info)
185 {
186 	struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
187 	u32 oclk, sclk, sdiv;
188 	s32 diff;
189 
190 	info->clk = 0;
191 
192 	switch (khz) {
193 	case 27000:
194 		info->clk = 0x00000100;
195 		return khz;
196 	case 100000:
197 		info->clk = 0x00002100;
198 		return khz;
199 	case 108000:
200 		info->clk = 0x00002140;
201 		return khz;
202 	default:
203 		sclk = read_vco(clk, idx);
204 		sdiv = min((sclk * 2) / khz, (u32)65);
205 		oclk = (sclk * 2) / sdiv;
206 		diff = ((khz + 3000) - oclk);
207 
208 		/* When imprecise, play it safe and aim for a clock lower than
209 		 * desired rather than higher */
210 		if (diff < 0) {
211 			sdiv++;
212 			oclk = (sclk * 2) / sdiv;
213 		}
214 
215 		/* divider can go as low as 2, limited here because NVIDIA
216 		 * and the VBIOS on my NVA8 seem to prefer using the PLL
217 		 * for 810MHz - is there a good reason?
218 		 * XXX: PLLs with refclk 810MHz?  */
219 		if (sdiv > 4) {
220 			info->clk = (((sdiv - 2) << 16) | 0x00003100);
221 			return oclk;
222 		}
223 
224 		break;
225 	}
226 
227 	return -ERANGE;
228 }
229 
230 int
231 gt215_pll_info(struct nvkm_clk *clock, int idx, u32 pll, u32 khz,
232 	       struct gt215_clk_info *info)
233 {
234 	struct gt215_clk *clk = (void *)clock;
235 	struct nvkm_subdev *subdev = &clk->base.subdev;
236 	struct nvbios_pll limits;
237 	int P, N, M, diff;
238 	int ret;
239 
240 	info->pll = 0;
241 
242 	/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
243 	 * PLL and use the divider instead. */
244 	ret = gt215_clk_info(clock, idx, khz, info);
245 	diff = khz - ret;
246 	if (!pll || (diff >= -2000 && diff < 3000)) {
247 		goto out;
248 	}
249 
250 	/* Try with PLL */
251 	ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
252 	if (ret)
253 		return ret;
254 
255 	ret = gt215_clk_info(clock, idx - 0x10, limits.refclk, info);
256 	if (ret != limits.refclk)
257 		return -EINVAL;
258 
259 	ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
260 	if (ret >= 0) {
261 		info->pll = (P << 16) | (N << 8) | M;
262 	}
263 
264 out:
265 	info->fb_delay = max(((khz + 7566) / 15133), (u32) 18);
266 	return ret ? ret : -ERANGE;
267 }
268 
269 static int
270 calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
271 	 int idx, u32 pll, int dom)
272 {
273 	int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
274 				 &clk->eng[dom]);
275 	if (ret >= 0)
276 		return 0;
277 	return ret;
278 }
279 
280 static int
281 calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
282 {
283 	int ret = 0;
284 	u32 kHz = cstate->domain[nv_clk_src_host];
285 	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
286 
287 	if (kHz == 277000) {
288 		info->clk = 0;
289 		info->host_out = NVA3_HOST_277;
290 		return 0;
291 	}
292 
293 	info->host_out = NVA3_HOST_CLK;
294 
295 	ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
296 	if (ret >= 0)
297 		return 0;
298 
299 	return ret;
300 }
301 
302 int
303 gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
304 {
305 	struct nvkm_device *device = clk->subdev.device;
306 	struct nvkm_fifo *fifo = device->fifo;
307 
308 	/* halt and idle execution engines */
309 	nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
310 	nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
311 	/* Wait until the interrupt handler is finished */
312 	if (nvkm_msec(device, 2000,
313 		if (!nvkm_rd32(device, 0x000100))
314 			break;
315 	) < 0)
316 		return -EBUSY;
317 
318 	if (fifo)
319 		fifo->pause(fifo, flags);
320 
321 	if (nvkm_msec(device, 2000,
322 		if (nvkm_rd32(device, 0x002504) & 0x00000010)
323 			break;
324 	) < 0)
325 		return -EIO;
326 
327 	if (nvkm_msec(device, 2000,
328 		u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
329 		if (tmp == 0x0000003f)
330 			break;
331 	) < 0)
332 		return -EIO;
333 
334 	return 0;
335 }
336 
337 void
338 gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
339 {
340 	struct nvkm_device *device = clk->subdev.device;
341 	struct nvkm_fifo *fifo = device->fifo;
342 
343 	if (fifo && flags)
344 		fifo->start(fifo, flags);
345 
346 	nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
347 	nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
348 }
349 
350 static void
351 disable_clk_src(struct gt215_clk *clk, u32 src)
352 {
353 	struct nvkm_device *device = clk->base.subdev.device;
354 	nvkm_mask(device, src, 0x00000100, 0x00000000);
355 	nvkm_mask(device, src, 0x00000001, 0x00000000);
356 }
357 
358 static void
359 prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
360 {
361 	struct gt215_clk_info *info = &clk->eng[dom];
362 	struct nvkm_device *device = clk->base.subdev.device;
363 	const u32 src0 = 0x004120 + (idx * 4);
364 	const u32 src1 = 0x004160 + (idx * 4);
365 	const u32 ctrl = pll + 0;
366 	const u32 coef = pll + 4;
367 	u32 bypass;
368 
369 	if (info->pll) {
370 		/* Always start from a non-PLL clock */
371 		bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
372 		if (!bypass) {
373 			nvkm_mask(device, src1, 0x00000101, 0x00000101);
374 			nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
375 			udelay(20);
376 		}
377 
378 		nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
379 		nvkm_wr32(device, coef, info->pll);
380 		nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
381 		nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
382 		if (nvkm_msec(device, 2000,
383 			if (nvkm_rd32(device, ctrl) & 0x00020000)
384 				break;
385 		) < 0) {
386 			nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
387 			nvkm_mask(device, src0, 0x00000101, 0x00000000);
388 			return;
389 		}
390 		nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
391 		nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
392 		disable_clk_src(clk, src1);
393 	} else {
394 		nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
395 		nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
396 		udelay(20);
397 		nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
398 		disable_clk_src(clk, src0);
399 	}
400 }
401 
402 static void
403 prog_clk(struct gt215_clk *clk, int idx, int dom)
404 {
405 	struct gt215_clk_info *info = &clk->eng[dom];
406 	struct nvkm_device *device = clk->base.subdev.device;
407 	nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
408 }
409 
410 static void
411 prog_host(struct gt215_clk *clk)
412 {
413 	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
414 	struct nvkm_device *device = clk->base.subdev.device;
415 	u32 hsrc = (nvkm_rd32(device, 0xc040));
416 
417 	switch (info->host_out) {
418 	case NVA3_HOST_277:
419 		if ((hsrc & 0x30000000) == 0) {
420 			nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
421 			disable_clk_src(clk, 0x4194);
422 		}
423 		break;
424 	case NVA3_HOST_CLK:
425 		prog_clk(clk, 0x1d, nv_clk_src_host);
426 		if ((hsrc & 0x30000000) >= 0x20000000) {
427 			nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
428 		}
429 		break;
430 	default:
431 		break;
432 	}
433 
434 	/* This seems to be a clock gating factor on idle, always set to 64 */
435 	nvkm_wr32(device, 0xc044, 0x3e);
436 }
437 
438 static void
439 prog_core(struct gt215_clk *clk, int dom)
440 {
441 	struct gt215_clk_info *info = &clk->eng[dom];
442 	struct nvkm_device *device = clk->base.subdev.device;
443 	u32 fb_delay = nvkm_rd32(device, 0x10002c);
444 
445 	if (fb_delay < info->fb_delay)
446 		nvkm_wr32(device, 0x10002c, info->fb_delay);
447 
448 	prog_pll(clk, 0x00, 0x004200, dom);
449 
450 	if (fb_delay > info->fb_delay)
451 		nvkm_wr32(device, 0x10002c, info->fb_delay);
452 }
453 
454 static int
455 gt215_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
456 {
457 	struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
458 	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
459 	int ret;
460 
461 	if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
462 	    (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
463 	    (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
464 	    (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
465 	    (ret = calc_host(clk, cstate)))
466 		return ret;
467 
468 	/* XXX: Should be reading the highest bit in the VBIOS clock to decide
469 	 * whether to use a PLL or not... but using a PLL defeats the purpose */
470 	if (core->pll) {
471 		ret = gt215_clk_info(&clk->base, 0x10,
472 				     cstate->domain[nv_clk_src_core_intm],
473 				     &clk->eng[nv_clk_src_core_intm]);
474 		if (ret < 0)
475 			return ret;
476 	}
477 
478 	return 0;
479 }
480 
481 static int
482 gt215_clk_prog(struct nvkm_clk *obj)
483 {
484 	struct gt215_clk *clk = container_of(obj, typeof(*clk), base);
485 	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
486 	int ret = 0;
487 	unsigned long flags;
488 	unsigned long *f = &flags;
489 
490 	ret = gt215_clk_pre(&clk->base, f);
491 	if (ret)
492 		goto out;
493 
494 	if (core->pll)
495 		prog_core(clk, nv_clk_src_core_intm);
496 
497 	prog_core(clk,  nv_clk_src_core);
498 	prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
499 	prog_clk(clk, 0x20, nv_clk_src_disp);
500 	prog_clk(clk, 0x21, nv_clk_src_vdec);
501 	prog_host(clk);
502 
503 out:
504 	if (ret == -EBUSY)
505 		f = NULL;
506 
507 	gt215_clk_post(&clk->base, f);
508 	return ret;
509 }
510 
511 static void
512 gt215_clk_tidy(struct nvkm_clk *obj)
513 {
514 }
515 
516 static struct nvkm_domain
517 gt215_domain[] = {
518 	{ nv_clk_src_crystal  , 0xff },
519 	{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
520 	{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
521 	{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
522 	{ nv_clk_src_vdec     , 0x03 },
523 	{ nv_clk_src_disp     , 0x04 },
524 	{ nv_clk_src_host     , 0x05 },
525 	{ nv_clk_src_core_intm, 0x06 },
526 	{ nv_clk_src_max }
527 };
528 
529 static int
530 gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
531 	       struct nvkm_oclass *oclass, void *data, u32 size,
532 	       struct nvkm_object **pobject)
533 {
534 	struct gt215_clk *clk;
535 	int ret;
536 
537 	ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
538 			      NULL, 0, true, &clk);
539 	*pobject = nv_object(clk);
540 	if (ret)
541 		return ret;
542 
543 	clk->base.read = gt215_clk_read;
544 	clk->base.calc = gt215_clk_calc;
545 	clk->base.prog = gt215_clk_prog;
546 	clk->base.tidy = gt215_clk_tidy;
547 	return 0;
548 }
549 
550 struct nvkm_oclass
551 gt215_clk_oclass = {
552 	.handle = NV_SUBDEV(CLK, 0xa3),
553 	.ofuncs = &(struct nvkm_ofuncs) {
554 		.ctor = gt215_clk_ctor,
555 		.dtor = _nvkm_clk_dtor,
556 		.init = _nvkm_clk_init,
557 		.fini = _nvkm_clk_fini,
558 	},
559 };
560