1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <core/client.h>
27 #include <core/option.h>
28 
29 #include <nvif/class.h>
30 #include <nvif/ioctl.h>
31 #include <nvif/unpack.h>
32 
33 static u8
34 nvkm_pm_count_perfdom(struct nvkm_pm *pm)
35 {
36 	struct nvkm_perfdom *dom;
37 	u8 domain_nr = 0;
38 
39 	list_for_each_entry(dom, &pm->domains, head)
40 		domain_nr++;
41 	return domain_nr;
42 }
43 
44 static u16
45 nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
46 {
47 	u16 signal_nr = 0;
48 	int i;
49 
50 	if (dom) {
51 		for (i = 0; i < dom->signal_nr; i++) {
52 			if (dom->signal[i].name)
53 				signal_nr++;
54 		}
55 	}
56 	return signal_nr;
57 }
58 
59 static struct nvkm_perfdom *
60 nvkm_perfdom_find(struct nvkm_pm *pm, int di)
61 {
62 	struct nvkm_perfdom *dom;
63 	int tmp = 0;
64 
65 	list_for_each_entry(dom, &pm->domains, head) {
66 		if (tmp++ == di)
67 			return dom;
68 	}
69 	return NULL;
70 }
71 
72 struct nvkm_perfsig *
73 nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
74 {
75 	struct nvkm_perfdom *dom = *pdom;
76 
77 	if (dom == NULL) {
78 		dom = nvkm_perfdom_find(pm, di);
79 		if (dom == NULL)
80 			return NULL;
81 		*pdom = dom;
82 	}
83 
84 	if (!dom->signal[si].name)
85 		return NULL;
86 	return &dom->signal[si];
87 }
88 
89 static u8
90 nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
91 {
92 	u8 source_nr = 0, i;
93 
94 	for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
95 		if (sig->source[i])
96 			source_nr++;
97 	}
98 	return source_nr;
99 }
100 
101 static struct nvkm_perfsrc *
102 nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
103 {
104 	struct nvkm_perfsrc *src;
105 	bool found = false;
106 	int tmp = 1; /* Sources ID start from 1 */
107 	u8 i;
108 
109 	for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
110 		if (sig->source[i] == si) {
111 			found = true;
112 			break;
113 		}
114 	}
115 
116 	if (found) {
117 		list_for_each_entry(src, &pm->sources, head) {
118 			if (tmp++ == si)
119 				return src;
120 		}
121 	}
122 
123 	return NULL;
124 }
125 
126 static int
127 nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
128 {
129 	struct nvkm_perfdom *dom = NULL;
130 	struct nvkm_perfsig *sig;
131 	struct nvkm_perfsrc *src;
132 	u32 mask, value;
133 	int i, j;
134 
135 	for (i = 0; i < 4; i++) {
136 		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
137 			sig = nvkm_perfsig_find(pm, ctr->domain,
138 						ctr->signal[i], &dom);
139 			if (!sig)
140 				return -EINVAL;
141 
142 			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
143 			if (!src)
144 				return -EINVAL;
145 
146 			/* set enable bit if needed */
147 			mask = value = 0x00000000;
148 			if (src->enable)
149 				mask = value = 0x80000000;
150 			mask  |= (src->mask << src->shift);
151 			value |= ((ctr->source[i][j] >> 32) << src->shift);
152 
153 			/* enable the source */
154 			nv_mask(pm, src->addr, mask, value);
155 			nv_debug(pm, "enabled source 0x%08x 0x%08x 0x%08x\n",
156 				 src->addr, mask, value);
157 		}
158 	}
159 	return 0;
160 }
161 
162 static int
163 nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
164 {
165 	struct nvkm_perfdom *dom = NULL;
166 	struct nvkm_perfsig *sig;
167 	struct nvkm_perfsrc *src;
168 	u32 mask;
169 	int i, j;
170 
171 	for (i = 0; i < 4; i++) {
172 		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
173 			sig = nvkm_perfsig_find(pm, ctr->domain,
174 						ctr->signal[i], &dom);
175 			if (!sig)
176 				return -EINVAL;
177 
178 			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
179 			if (!src)
180 				return -EINVAL;
181 
182 			/* unset enable bit if needed */
183 			mask = 0x00000000;
184 			if (src->enable)
185 				mask = 0x80000000;
186 			mask |= (src->mask << src->shift);
187 
188 			/* disable the source */
189 			nv_mask(pm, src->addr, mask, 0);
190 			nv_debug(pm, "disabled source 0x%08x 0x%08x\n",
191 				 src->addr, mask);
192 		}
193 	}
194 	return 0;
195 }
196 
197 /*******************************************************************************
198  * Perfdom object classes
199  ******************************************************************************/
200 static int
201 nvkm_perfdom_init(struct nvkm_object *object, void *data, u32 size)
202 {
203 	union {
204 		struct nvif_perfdom_init none;
205 	} *args = data;
206 	struct nvkm_pm *pm = (void *)object->engine;
207 	struct nvkm_perfdom *dom = (void *)object;
208 	int ret, i;
209 
210 	nv_ioctl(object, "perfdom init size %d\n", size);
211 	if (nvif_unvers(args->none)) {
212 		nv_ioctl(object, "perfdom init\n");
213 	} else
214 		return ret;
215 
216 	for (i = 0; i < 4; i++) {
217 		if (dom->ctr[i]) {
218 			dom->func->init(pm, dom, dom->ctr[i]);
219 
220 			/* enable sources */
221 			nvkm_perfsrc_enable(pm, dom->ctr[i]);
222 		}
223 	}
224 
225 	/* start next batch of counters for sampling */
226 	dom->func->next(pm, dom);
227 	return 0;
228 }
229 
230 static int
231 nvkm_perfdom_sample(struct nvkm_object *object, void *data, u32 size)
232 {
233 	union {
234 		struct nvif_perfdom_sample none;
235 	} *args = data;
236 	struct nvkm_pm *pm = (void *)object->engine;
237 	struct nvkm_perfdom *dom;
238 	int ret;
239 
240 	nv_ioctl(object, "perfdom sample size %d\n", size);
241 	if (nvif_unvers(args->none)) {
242 		nv_ioctl(object, "perfdom sample\n");
243 	} else
244 		return ret;
245 	pm->sequence++;
246 
247 	/* sample previous batch of counters */
248 	list_for_each_entry(dom, &pm->domains, head)
249 		dom->func->next(pm, dom);
250 
251 	return 0;
252 }
253 
254 static int
255 nvkm_perfdom_read(struct nvkm_object *object, void *data, u32 size)
256 {
257 	union {
258 		struct nvif_perfdom_read_v0 v0;
259 	} *args = data;
260 	struct nvkm_pm *pm = (void *)object->engine;
261 	struct nvkm_perfdom *dom = (void *)object;
262 	int ret, i;
263 
264 	nv_ioctl(object, "perfdom read size %d\n", size);
265 	if (nvif_unpack(args->v0, 0, 0, false)) {
266 		nv_ioctl(object, "perfdom read vers %d\n", args->v0.version);
267 	} else
268 		return ret;
269 
270 	for (i = 0; i < 4; i++) {
271 		if (dom->ctr[i])
272 			dom->func->read(pm, dom, dom->ctr[i]);
273 	}
274 
275 	if (!dom->clk)
276 		return -EAGAIN;
277 
278 	for (i = 0; i < 4; i++)
279 		if (dom->ctr[i])
280 			args->v0.ctr[i] = dom->ctr[i]->ctr;
281 	args->v0.clk = dom->clk;
282 	return 0;
283 }
284 
285 static int
286 nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
287 {
288 	switch (mthd) {
289 	case NVIF_PERFDOM_V0_INIT:
290 		return nvkm_perfdom_init(object, data, size);
291 	case NVIF_PERFDOM_V0_SAMPLE:
292 		return nvkm_perfdom_sample(object, data, size);
293 	case NVIF_PERFDOM_V0_READ:
294 		return nvkm_perfdom_read(object, data, size);
295 	default:
296 		break;
297 	}
298 	return -EINVAL;
299 }
300 
301 static void
302 nvkm_perfdom_dtor(struct nvkm_object *object)
303 {
304 	struct nvkm_pm *pm = (void *)object->engine;
305 	struct nvkm_perfdom *dom = (void *)object;
306 	int i;
307 
308 	for (i = 0; i < 4; i++) {
309 		struct nvkm_perfctr *ctr = dom->ctr[i];
310 		if (ctr) {
311 			nvkm_perfsrc_disable(pm, ctr);
312 			if (ctr->head.next)
313 				list_del(&ctr->head);
314 		}
315 		kfree(ctr);
316 	}
317 	nvkm_object_destroy(&dom->base);
318 }
319 
320 static int
321 nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
322 		 struct nvkm_perfsig *signal[4], u64 source[4][8],
323 		 u16 logic_op, struct nvkm_perfctr **pctr)
324 {
325 	struct nvkm_perfctr *ctr;
326 	int i, j;
327 
328 	if (!dom)
329 		return -EINVAL;
330 
331 	ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
332 	if (!ctr)
333 		return -ENOMEM;
334 
335 	ctr->domain   = domain;
336 	ctr->logic_op = logic_op;
337 	ctr->slot     = slot;
338 	for (i = 0; i < 4; i++) {
339 		if (signal[i]) {
340 			ctr->signal[i] = signal[i] - dom->signal;
341 			for (j = 0; j < 8; j++)
342 				ctr->source[i][j] = source[i][j];
343 		}
344 	}
345 	list_add_tail(&ctr->head, &dom->list);
346 
347 	return 0;
348 }
349 
350 static int
351 nvkm_perfdom_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
352 		  struct nvkm_oclass *oclass, void *data, u32 size,
353 		  struct nvkm_object **pobject)
354 {
355 	union {
356 		struct nvif_perfdom_v0 v0;
357 	} *args = data;
358 	struct nvkm_pm *pm = (void *)engine;
359 	struct nvkm_perfdom *sdom = NULL;
360 	struct nvkm_perfctr *ctr[4] = {};
361 	struct nvkm_perfdom *dom;
362 	int c, s, m;
363 	int ret;
364 
365 	nv_ioctl(parent, "create perfdom size %d\n", size);
366 	if (nvif_unpack(args->v0, 0, 0, false)) {
367 		nv_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
368 			 args->v0.version, args->v0.domain, args->v0.mode);
369 	} else
370 		return ret;
371 
372 	for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
373 		struct nvkm_perfsig *sig[4] = {};
374 		u64 src[4][8] = {};
375 
376 		for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
377 			sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
378 						   args->v0.ctr[c].signal[s],
379 						   &sdom);
380 			if (args->v0.ctr[c].signal[s] && !sig[s])
381 				return -EINVAL;
382 
383 			for (m = 0; m < 8; m++) {
384 				src[s][m] = args->v0.ctr[c].source[s][m];
385 				if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
386 							            src[s][m]))
387 					return -EINVAL;
388 			}
389 		}
390 
391 		ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
392 				       args->v0.ctr[c].logic_op, &ctr[c]);
393 		if (ret)
394 			return ret;
395 	}
396 
397 	if (!sdom)
398 		return -EINVAL;
399 
400 	ret = nvkm_object_create(parent, engine, oclass, 0, &dom);
401 	*pobject = nv_object(dom);
402 	if (ret)
403 		return ret;
404 
405 	dom->func = sdom->func;
406 	dom->addr = sdom->addr;
407 	dom->mode = args->v0.mode;
408 	for (c = 0; c < ARRAY_SIZE(ctr); c++)
409 		dom->ctr[c] = ctr[c];
410 	return 0;
411 }
412 
413 static struct nvkm_ofuncs
414 nvkm_perfdom_ofuncs = {
415 	.ctor = nvkm_perfdom_ctor,
416 	.dtor = nvkm_perfdom_dtor,
417 	.init = nvkm_object_init,
418 	.fini = nvkm_object_fini,
419 	.mthd = nvkm_perfdom_mthd,
420 };
421 
422 /*******************************************************************************
423  * Perfmon object classes
424  ******************************************************************************/
425 static int
426 nvkm_perfmon_mthd_query_domain(struct nvkm_object *object, void *data, u32 size)
427 {
428 	union {
429 		struct nvif_perfmon_query_domain_v0 v0;
430 	} *args = data;
431 	struct nvkm_pm *pm = (void *)object->engine;
432 	struct nvkm_perfdom *dom;
433 	u8 domain_nr;
434 	int di, ret;
435 
436 	nv_ioctl(object, "perfmon query domain size %d\n", size);
437 	if (nvif_unpack(args->v0, 0, 0, false)) {
438 		nv_ioctl(object, "perfmon domain vers %d iter %02x\n",
439 			 args->v0.version, args->v0.iter);
440 		di = (args->v0.iter & 0xff) - 1;
441 	} else
442 		return ret;
443 
444 	domain_nr = nvkm_pm_count_perfdom(pm);
445 	if (di >= (int)domain_nr)
446 		return -EINVAL;
447 
448 	if (di >= 0) {
449 		dom = nvkm_perfdom_find(pm, di);
450 		if (dom == NULL)
451 			return -EINVAL;
452 
453 		args->v0.id         = di;
454 		args->v0.signal_nr  = nvkm_perfdom_count_perfsig(dom);
455 		strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
456 
457 		/* Currently only global counters (PCOUNTER) are implemented
458 		 * but this will be different for local counters (MP). */
459 		args->v0.counter_nr = 4;
460 	}
461 
462 	if (++di < domain_nr) {
463 		args->v0.iter = ++di;
464 		return 0;
465 	}
466 
467 	args->v0.iter = 0xff;
468 	return 0;
469 }
470 
471 static int
472 nvkm_perfmon_mthd_query_signal(struct nvkm_object *object, void *data, u32 size)
473 {
474 	union {
475 		struct nvif_perfmon_query_signal_v0 v0;
476 	} *args = data;
477 	struct nvkm_device *device = nv_device(object);
478 	struct nvkm_pm *pm = (void *)object->engine;
479 	struct nvkm_perfdom *dom;
480 	struct nvkm_perfsig *sig;
481 	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
482 	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
483 	int ret, si;
484 
485 	nv_ioctl(object, "perfmon query signal size %d\n", size);
486 	if (nvif_unpack(args->v0, 0, 0, false)) {
487 		nv_ioctl(object,
488 			 "perfmon query signal vers %d dom %d iter %04x\n",
489 			 args->v0.version, args->v0.domain, args->v0.iter);
490 		si = (args->v0.iter & 0xffff) - 1;
491 	} else
492 		return ret;
493 
494 	dom = nvkm_perfdom_find(pm, args->v0.domain);
495 	if (dom == NULL || si >= (int)dom->signal_nr)
496 		return -EINVAL;
497 
498 	if (si >= 0) {
499 		sig = &dom->signal[si];
500 		if (raw || !sig->name) {
501 			snprintf(args->v0.name, sizeof(args->v0.name),
502 				 "/%s/%02x", dom->name, si);
503 		} else {
504 			strncpy(args->v0.name, sig->name,
505 				sizeof(args->v0.name));
506 		}
507 
508 		args->v0.signal = si;
509 		args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
510 	}
511 
512 	while (++si < dom->signal_nr) {
513 		if (all || dom->signal[si].name) {
514 			args->v0.iter = ++si;
515 			return 0;
516 		}
517 	}
518 
519 	args->v0.iter = 0xffff;
520 	return 0;
521 }
522 
523 static int
524 nvkm_perfmon_mthd_query_source(struct nvkm_object *object, void *data, u32 size)
525 {
526 	union {
527 		struct nvif_perfmon_query_source_v0 v0;
528 	} *args = data;
529 	struct nvkm_pm *pm = (void *)object->engine;
530 	struct nvkm_perfdom *dom = NULL;
531 	struct nvkm_perfsig *sig;
532 	struct nvkm_perfsrc *src;
533 	u8 source_nr = 0;
534 	int si, ret;
535 
536 	nv_ioctl(object, "perfmon query source size %d\n", size);
537 	if (nvif_unpack(args->v0, 0, 0, false)) {
538 		nv_ioctl(object,
539 			 "perfmon source vers %d dom %d sig %02x iter %02x\n",
540 			 args->v0.version, args->v0.domain, args->v0.signal,
541 			 args->v0.iter);
542 		si = (args->v0.iter & 0xff) - 1;
543 	} else
544 		return ret;
545 
546 	sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
547 	if (!sig)
548 		return -EINVAL;
549 
550 	source_nr = nvkm_perfsig_count_perfsrc(sig);
551 	if (si >= (int)source_nr)
552 		return -EINVAL;
553 
554 	if (si >= 0) {
555 		src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
556 		if (!src)
557 			return -EINVAL;
558 
559 		args->v0.source = sig->source[si];
560 		args->v0.mask   = src->mask;
561 		strncpy(args->v0.name, src->name, sizeof(args->v0.name));
562 	}
563 
564 	if (++si < source_nr) {
565 		args->v0.iter = ++si;
566 		return 0;
567 	}
568 
569 	args->v0.iter = 0xff;
570 	return 0;
571 }
572 
573 static int
574 nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
575 {
576 	switch (mthd) {
577 	case NVIF_PERFMON_V0_QUERY_DOMAIN:
578 		return nvkm_perfmon_mthd_query_domain(object, data, size);
579 	case NVIF_PERFMON_V0_QUERY_SIGNAL:
580 		return nvkm_perfmon_mthd_query_signal(object, data, size);
581 	case NVIF_PERFMON_V0_QUERY_SOURCE:
582 		return nvkm_perfmon_mthd_query_source(object, data, size);
583 	default:
584 		break;
585 	}
586 	return -EINVAL;
587 }
588 
589 static struct nvkm_oclass
590 nvkm_perfmon_sclass[] = {
591 	{ .handle = NVIF_IOCTL_NEW_V0_PERFDOM,
592 	  .ofuncs = &nvkm_perfdom_ofuncs,
593 	},
594 	{}
595 };
596 
597 static int
598 nvkm_perfmon_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
599 		  struct nvkm_oclass *oclass, void *data, u32 size,
600 		  struct nvkm_object **pobject)
601 {
602 	struct nvkm_parent *perfmon;
603 	int ret = nvkm_parent_create(parent, engine, oclass, 0,
604 				     nvkm_perfmon_sclass, 0, &perfmon);
605 	*pobject = perfmon ? &perfmon->object : NULL;
606 	return ret;
607 }
608 
609 static struct nvkm_ofuncs
610 nvkm_perfmon_ofuncs = {
611 	.ctor = nvkm_perfmon_ctor,
612 	.dtor = _nvkm_parent_dtor,
613 	.init = _nvkm_parent_init,
614 	.fini = _nvkm_parent_fini,
615 	.mthd = nvkm_perfmon_mthd,
616 };
617 
618 struct nvkm_oclass
619 nvkm_pm_sclass[] = {
620 	{
621 	  .handle = NVIF_IOCTL_NEW_V0_PERFMON,
622 	  .ofuncs = &nvkm_perfmon_ofuncs,
623 	},
624 	{},
625 };
626 
627 /*******************************************************************************
628  * PPM context
629  ******************************************************************************/
630 static void
631 nvkm_perfctx_dtor(struct nvkm_object *object)
632 {
633 	struct nvkm_pm *pm = (void *)object->engine;
634 	struct nvkm_perfctx *ctx = (void *)object;
635 
636 	mutex_lock(&nv_subdev(pm)->mutex);
637 	nvkm_engctx_destroy(&ctx->base);
638 	if (pm->context == ctx)
639 		pm->context = NULL;
640 	mutex_unlock(&nv_subdev(pm)->mutex);
641 }
642 
643 static int
644 nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
645 		  struct nvkm_oclass *oclass, void *data, u32 size,
646 		  struct nvkm_object **pobject)
647 {
648 	struct nvkm_pm *pm = (void *)engine;
649 	struct nvkm_perfctx *ctx;
650 	int ret;
651 
652 	/* no context needed for perfdom objects... */
653 	if (nv_mclass(parent) != NV_DEVICE) {
654 		atomic_inc(&parent->refcount);
655 		*pobject = parent;
656 		return 1;
657 	}
658 
659 	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx);
660 	*pobject = nv_object(ctx);
661 	if (ret)
662 		return ret;
663 
664 	mutex_lock(&nv_subdev(pm)->mutex);
665 	if (pm->context == NULL)
666 		pm->context = ctx;
667 	if (ctx != pm->context)
668 		ret = -EBUSY;
669 	mutex_unlock(&nv_subdev(pm)->mutex);
670 
671 	return ret;
672 }
673 
674 struct nvkm_oclass
675 nvkm_pm_cclass = {
676 	.handle = NV_ENGCTX(PM, 0x00),
677 	.ofuncs = &(struct nvkm_ofuncs) {
678 		.ctor = nvkm_perfctx_ctor,
679 		.dtor = nvkm_perfctx_dtor,
680 		.init = _nvkm_engctx_init,
681 		.fini = _nvkm_engctx_fini,
682 	},
683 };
684 
685 /*******************************************************************************
686  * PPM engine/subdev functions
687  ******************************************************************************/
688 int
689 nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
690 		 const struct nvkm_specsrc *spec)
691 {
692 	const struct nvkm_specsrc *ssrc;
693 	const struct nvkm_specmux *smux;
694 	struct nvkm_perfsrc *src;
695 	u8 source_nr = 0;
696 
697 	if (!spec) {
698 		/* No sources are defined for this signal. */
699 		return 0;
700 	}
701 
702 	ssrc = spec;
703 	while (ssrc->name) {
704 		smux = ssrc->mux;
705 		while (smux->name) {
706 			bool found = false;
707 			u8 source_id = 0;
708 			u32 len;
709 
710 			list_for_each_entry(src, &pm->sources, head) {
711 				if (src->addr == ssrc->addr &&
712 				    src->shift == smux->shift) {
713 					found = true;
714 					break;
715 				}
716 				source_id++;
717 			}
718 
719 			if (!found) {
720 				src = kzalloc(sizeof(*src), GFP_KERNEL);
721 				if (!src)
722 					return -ENOMEM;
723 
724 				src->addr   = ssrc->addr;
725 				src->mask   = smux->mask;
726 				src->shift  = smux->shift;
727 				src->enable = smux->enable;
728 
729 				len = strlen(ssrc->name) +
730 				      strlen(smux->name) + 2;
731 				src->name = kzalloc(len, GFP_KERNEL);
732 				if (!src->name) {
733 					kfree(src);
734 					return -ENOMEM;
735 				}
736 				snprintf(src->name, len, "%s_%s", ssrc->name,
737 					 smux->name);
738 
739 				list_add_tail(&src->head, &pm->sources);
740 			}
741 
742 			sig->source[source_nr++] = source_id + 1;
743 			smux++;
744 		}
745 		ssrc++;
746 	}
747 
748 	return 0;
749 }
750 
751 int
752 nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
753 		 u32 base, u32 size_unit, u32 size_domain,
754 		 const struct nvkm_specdom *spec)
755 {
756 	const struct nvkm_specdom *sdom;
757 	const struct nvkm_specsig *ssig;
758 	struct nvkm_perfdom *dom;
759 	int ret, i;
760 
761 	for (i = 0; i == 0 || mask; i++) {
762 		u32 addr = base + (i * size_unit);
763 		if (i && !(mask & (1 << i)))
764 			continue;
765 
766 		sdom = spec;
767 		while (sdom->signal_nr) {
768 			dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
769 				      sizeof(*dom->signal), GFP_KERNEL);
770 			if (!dom)
771 				return -ENOMEM;
772 
773 			if (mask) {
774 				snprintf(dom->name, sizeof(dom->name),
775 					 "%s/%02x/%02x", name, i,
776 					 (int)(sdom - spec));
777 			} else {
778 				snprintf(dom->name, sizeof(dom->name),
779 					 "%s/%02x", name, (int)(sdom - spec));
780 			}
781 
782 			list_add_tail(&dom->head, &pm->domains);
783 			INIT_LIST_HEAD(&dom->list);
784 			dom->func = sdom->func;
785 			dom->addr = addr;
786 			dom->signal_nr = sdom->signal_nr;
787 
788 			ssig = (sdom++)->signal;
789 			while (ssig->name) {
790 				struct nvkm_perfsig *sig =
791 					&dom->signal[ssig->signal];
792 				sig->name = ssig->name;
793 				ret = nvkm_perfsrc_new(pm, sig, ssig->source);
794 				if (ret)
795 					return ret;
796 				ssig++;
797 			}
798 
799 			addr += size_domain;
800 		}
801 
802 		mask &= ~(1 << i);
803 	}
804 
805 	return 0;
806 }
807 
808 int
809 _nvkm_pm_fini(struct nvkm_object *object, bool suspend)
810 {
811 	struct nvkm_pm *pm = (void *)object;
812 	return nvkm_engine_fini(&pm->engine, suspend);
813 }
814 
815 int
816 _nvkm_pm_init(struct nvkm_object *object)
817 {
818 	struct nvkm_pm *pm = (void *)object;
819 	return nvkm_engine_init(&pm->engine);
820 }
821 
822 void
823 _nvkm_pm_dtor(struct nvkm_object *object)
824 {
825 	struct nvkm_pm *pm = (void *)object;
826 	struct nvkm_perfdom *dom, *next_dom;
827 	struct nvkm_perfsrc *src, *next_src;
828 
829 	list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
830 		list_del(&dom->head);
831 		kfree(dom);
832 	}
833 
834 	list_for_each_entry_safe(src, next_src, &pm->sources, head) {
835 		list_del(&src->head);
836 		kfree(src->name);
837 		kfree(src);
838 	}
839 
840 	nvkm_engine_destroy(&pm->engine);
841 }
842 
843 int
844 nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
845 		struct nvkm_oclass *oclass, int length, void **pobject)
846 {
847 	struct nvkm_pm *pm;
848 	int ret;
849 
850 	ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM",
851 				  "pm", length, pobject);
852 	pm = *pobject;
853 	if (ret)
854 		return ret;
855 
856 	INIT_LIST_HEAD(&pm->domains);
857 	INIT_LIST_HEAD(&pm->sources);
858 	return 0;
859 }
860