xref: /openbmc/linux/drivers/hwtracing/intel_th/gth.c (revision e2ad626f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel(R) Trace Hub Global Trace Hub
4  *
5  * Copyright (C) 2014-2015 Intel Corporation.
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include <linux/slab.h>
16 #include <linux/bitmap.h>
17 #include <linux/pm_runtime.h>
18 
19 #include "intel_th.h"
20 #include "gth.h"
21 
22 struct gth_device;
23 
24 /**
25  * struct gth_output - GTH view on an output port
26  * @gth:	backlink to the GTH device
27  * @output:	link to output device's output descriptor
28  * @index:	output port number
29  * @port_type:	one of GTH_* port type values
30  * @master:	bitmap of masters configured for this output
31  */
32 struct gth_output {
33 	struct gth_device	*gth;
34 	struct intel_th_output	*output;
35 	unsigned int		index;
36 	unsigned int		port_type;
37 	DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1);
38 };
39 
40 /**
41  * struct gth_device - GTH device
42  * @dev:	driver core's device
43  * @base:	register window base address
44  * @output_group:	attributes describing output ports
45  * @master_group:	attributes describing master assignments
46  * @output:		output ports
47  * @master:		master/output port assignments
48  * @gth_lock:		serializes accesses to GTH bits
49  */
50 struct gth_device {
51 	struct device		*dev;
52 	void __iomem		*base;
53 
54 	struct attribute_group	output_group;
55 	struct attribute_group	master_group;
56 	struct gth_output	output[TH_POSSIBLE_OUTPUTS];
57 	signed char		master[TH_CONFIGURABLE_MASTERS + 1];
58 	spinlock_t		gth_lock;
59 };
60 
61 static void gth_output_set(struct gth_device *gth, int port,
62 			   unsigned int config)
63 {
64 	unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
65 	u32 val;
66 	int shift = (port & 3) * 8;
67 
68 	val = ioread32(gth->base + reg);
69 	val &= ~(0xff << shift);
70 	val |= config << shift;
71 	iowrite32(val, gth->base + reg);
72 }
73 
74 static unsigned int gth_output_get(struct gth_device *gth, int port)
75 {
76 	unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
77 	u32 val;
78 	int shift = (port & 3) * 8;
79 
80 	val = ioread32(gth->base + reg);
81 	val &= 0xff << shift;
82 	val >>= shift;
83 
84 	return val;
85 }
86 
87 static void gth_smcfreq_set(struct gth_device *gth, int port,
88 			    unsigned int freq)
89 {
90 	unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
91 	int shift = (port & 1) * 16;
92 	u32 val;
93 
94 	val = ioread32(gth->base + reg);
95 	val &= ~(0xffff << shift);
96 	val |= freq << shift;
97 	iowrite32(val, gth->base + reg);
98 }
99 
100 static unsigned int gth_smcfreq_get(struct gth_device *gth, int port)
101 {
102 	unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
103 	int shift = (port & 1) * 16;
104 	u32 val;
105 
106 	val = ioread32(gth->base + reg);
107 	val &= 0xffff << shift;
108 	val >>= shift;
109 
110 	return val;
111 }
112 
113 /*
114  * "masters" attribute group
115  */
116 
117 struct master_attribute {
118 	struct device_attribute	attr;
119 	struct gth_device	*gth;
120 	unsigned int		master;
121 };
122 
123 static void
124 gth_master_set(struct gth_device *gth, unsigned int master, int port)
125 {
126 	unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
127 	unsigned int shift = (master & 0x7) * 4;
128 	u32 val;
129 
130 	if (master >= 256) {
131 		reg = REG_GTH_GSWTDEST;
132 		shift = 0;
133 	}
134 
135 	val = ioread32(gth->base + reg);
136 	val &= ~(0xf << shift);
137 	if (port >= 0)
138 		val |= (0x8 | port) << shift;
139 	iowrite32(val, gth->base + reg);
140 }
141 
142 static ssize_t master_attr_show(struct device *dev,
143 				struct device_attribute *attr,
144 				char *buf)
145 {
146 	struct master_attribute *ma =
147 		container_of(attr, struct master_attribute, attr);
148 	struct gth_device *gth = ma->gth;
149 	size_t count;
150 	int port;
151 
152 	spin_lock(&gth->gth_lock);
153 	port = gth->master[ma->master];
154 	spin_unlock(&gth->gth_lock);
155 
156 	if (port >= 0)
157 		count = snprintf(buf, PAGE_SIZE, "%x\n", port);
158 	else
159 		count = snprintf(buf, PAGE_SIZE, "disabled\n");
160 
161 	return count;
162 }
163 
164 static ssize_t master_attr_store(struct device *dev,
165 				 struct device_attribute *attr,
166 				 const char *buf, size_t count)
167 {
168 	struct master_attribute *ma =
169 		container_of(attr, struct master_attribute, attr);
170 	struct gth_device *gth = ma->gth;
171 	int old_port, port;
172 
173 	if (kstrtoint(buf, 10, &port) < 0)
174 		return -EINVAL;
175 
176 	if (port >= TH_POSSIBLE_OUTPUTS || port < -1)
177 		return -EINVAL;
178 
179 	spin_lock(&gth->gth_lock);
180 
181 	/* disconnect from the previous output port, if any */
182 	old_port = gth->master[ma->master];
183 	if (old_port >= 0) {
184 		gth->master[ma->master] = -1;
185 		clear_bit(ma->master, gth->output[old_port].master);
186 
187 		/*
188 		 * if the port is active, program this setting,
189 		 * implies that runtime PM is on
190 		 */
191 		if (gth->output[old_port].output->active)
192 			gth_master_set(gth, ma->master, -1);
193 	}
194 
195 	/* connect to the new output port, if any */
196 	if (port >= 0) {
197 		/* check if there's a driver for this port */
198 		if (!gth->output[port].output) {
199 			count = -ENODEV;
200 			goto unlock;
201 		}
202 
203 		set_bit(ma->master, gth->output[port].master);
204 
205 		/* if the port is active, program this setting, see above */
206 		if (gth->output[port].output->active)
207 			gth_master_set(gth, ma->master, port);
208 	}
209 
210 	gth->master[ma->master] = port;
211 
212 unlock:
213 	spin_unlock(&gth->gth_lock);
214 
215 	return count;
216 }
217 
218 struct output_attribute {
219 	struct device_attribute attr;
220 	struct gth_device	*gth;
221 	unsigned int		port;
222 	unsigned int		parm;
223 };
224 
225 #define OUTPUT_PARM(_name, _mask, _r, _w, _what)			\
226 	[TH_OUTPUT_PARM(_name)] = { .name = __stringify(_name),		\
227 				    .get = gth_ ## _what ## _get,	\
228 				    .set = gth_ ## _what ## _set,	\
229 				    .mask = (_mask),			\
230 				    .readable = (_r),			\
231 				    .writable = (_w) }
232 
233 static const struct output_parm {
234 	const char	*name;
235 	unsigned int	(*get)(struct gth_device *gth, int port);
236 	void		(*set)(struct gth_device *gth, int port,
237 			       unsigned int val);
238 	unsigned int	mask;
239 	unsigned int	readable : 1,
240 			writable : 1;
241 } output_parms[] = {
242 	OUTPUT_PARM(port,	0x7,	1, 0, output),
243 	OUTPUT_PARM(null,	BIT(3),	1, 1, output),
244 	OUTPUT_PARM(drop,	BIT(4),	1, 1, output),
245 	OUTPUT_PARM(reset,	BIT(5),	1, 0, output),
246 	OUTPUT_PARM(flush,	BIT(7),	0, 1, output),
247 	OUTPUT_PARM(smcfreq,	0xffff,	1, 1, smcfreq),
248 };
249 
250 static void
251 gth_output_parm_set(struct gth_device *gth, int port, unsigned int parm,
252 		    unsigned int val)
253 {
254 	unsigned int config = output_parms[parm].get(gth, port);
255 	unsigned int mask = output_parms[parm].mask;
256 	unsigned int shift = __ffs(mask);
257 
258 	config &= ~mask;
259 	config |= (val << shift) & mask;
260 	output_parms[parm].set(gth, port, config);
261 }
262 
263 static unsigned int
264 gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
265 {
266 	unsigned int config = output_parms[parm].get(gth, port);
267 	unsigned int mask = output_parms[parm].mask;
268 	unsigned int shift = __ffs(mask);
269 
270 	config &= mask;
271 	config >>= shift;
272 	return config;
273 }
274 
275 /*
276  * Reset outputs and sources
277  */
278 static int intel_th_gth_reset(struct gth_device *gth)
279 {
280 	u32 reg;
281 	int port, i;
282 
283 	reg = ioread32(gth->base + REG_GTH_SCRPD0);
284 	if (reg & SCRPD_DEBUGGER_IN_USE)
285 		return -EBUSY;
286 
287 	/* Always save/restore STH and TU registers in S0ix entry/exit */
288 	reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
289 	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
290 
291 	/* output ports */
292 	for (port = 0; port < 8; port++) {
293 		if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
294 		    GTH_NONE)
295 			continue;
296 
297 		gth_output_set(gth, port, 0);
298 		gth_smcfreq_set(gth, port, 16);
299 	}
300 	/* disable overrides */
301 	iowrite32(0, gth->base + REG_GTH_DESTOVR);
302 
303 	/* masters swdest_0~31 and gswdest */
304 	for (i = 0; i < 33; i++)
305 		iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4);
306 
307 	/* sources */
308 	iowrite32(0, gth->base + REG_GTH_SCR);
309 	iowrite32(0xfc, gth->base + REG_GTH_SCR2);
310 
311 	/* setup CTS for single trigger */
312 	iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
313 	iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
314 		  CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
315 
316 	return 0;
317 }
318 
319 /*
320  * "outputs" attribute group
321  */
322 
323 static ssize_t output_attr_show(struct device *dev,
324 				struct device_attribute *attr,
325 				char *buf)
326 {
327 	struct output_attribute *oa =
328 		container_of(attr, struct output_attribute, attr);
329 	struct gth_device *gth = oa->gth;
330 	size_t count;
331 
332 	pm_runtime_get_sync(dev);
333 
334 	spin_lock(&gth->gth_lock);
335 	count = snprintf(buf, PAGE_SIZE, "%x\n",
336 			 gth_output_parm_get(gth, oa->port, oa->parm));
337 	spin_unlock(&gth->gth_lock);
338 
339 	pm_runtime_put(dev);
340 
341 	return count;
342 }
343 
344 static ssize_t output_attr_store(struct device *dev,
345 				 struct device_attribute *attr,
346 				 const char *buf, size_t count)
347 {
348 	struct output_attribute *oa =
349 		container_of(attr, struct output_attribute, attr);
350 	struct gth_device *gth = oa->gth;
351 	unsigned int config;
352 
353 	if (kstrtouint(buf, 16, &config) < 0)
354 		return -EINVAL;
355 
356 	pm_runtime_get_sync(dev);
357 
358 	spin_lock(&gth->gth_lock);
359 	gth_output_parm_set(gth, oa->port, oa->parm, config);
360 	spin_unlock(&gth->gth_lock);
361 
362 	pm_runtime_put(dev);
363 
364 	return count;
365 }
366 
367 static int intel_th_master_attributes(struct gth_device *gth)
368 {
369 	struct master_attribute *master_attrs;
370 	struct attribute **attrs;
371 	int i, nattrs = TH_CONFIGURABLE_MASTERS + 2;
372 
373 	attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
374 	if (!attrs)
375 		return -ENOMEM;
376 
377 	master_attrs = devm_kcalloc(gth->dev, nattrs,
378 				    sizeof(struct master_attribute),
379 				    GFP_KERNEL);
380 	if (!master_attrs)
381 		return -ENOMEM;
382 
383 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) {
384 		char *name;
385 
386 		name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d%s", i,
387 				      i == TH_CONFIGURABLE_MASTERS ? "+" : "");
388 		if (!name)
389 			return -ENOMEM;
390 
391 		master_attrs[i].attr.attr.name = name;
392 		master_attrs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
393 		master_attrs[i].attr.show = master_attr_show;
394 		master_attrs[i].attr.store = master_attr_store;
395 
396 		sysfs_attr_init(&master_attrs[i].attr.attr);
397 		attrs[i] = &master_attrs[i].attr.attr;
398 
399 		master_attrs[i].gth = gth;
400 		master_attrs[i].master = i;
401 	}
402 
403 	gth->master_group.name	= "masters";
404 	gth->master_group.attrs = attrs;
405 
406 	return sysfs_create_group(&gth->dev->kobj, &gth->master_group);
407 }
408 
409 static int intel_th_output_attributes(struct gth_device *gth)
410 {
411 	struct output_attribute *out_attrs;
412 	struct attribute **attrs;
413 	int i, j, nouts = TH_POSSIBLE_OUTPUTS;
414 	int nparms = ARRAY_SIZE(output_parms);
415 	int nattrs = nouts * nparms + 1;
416 
417 	attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
418 	if (!attrs)
419 		return -ENOMEM;
420 
421 	out_attrs = devm_kcalloc(gth->dev, nattrs,
422 				 sizeof(struct output_attribute),
423 				 GFP_KERNEL);
424 	if (!out_attrs)
425 		return -ENOMEM;
426 
427 	for (i = 0; i < nouts; i++) {
428 		for (j = 0; j < nparms; j++) {
429 			unsigned int idx = i * nparms + j;
430 			char *name;
431 
432 			name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d_%s", i,
433 					      output_parms[j].name);
434 			if (!name)
435 				return -ENOMEM;
436 
437 			out_attrs[idx].attr.attr.name = name;
438 
439 			if (output_parms[j].readable) {
440 				out_attrs[idx].attr.attr.mode |= S_IRUGO;
441 				out_attrs[idx].attr.show = output_attr_show;
442 			}
443 
444 			if (output_parms[j].writable) {
445 				out_attrs[idx].attr.attr.mode |= S_IWUSR;
446 				out_attrs[idx].attr.store = output_attr_store;
447 			}
448 
449 			sysfs_attr_init(&out_attrs[idx].attr.attr);
450 			attrs[idx] = &out_attrs[idx].attr.attr;
451 
452 			out_attrs[idx].gth = gth;
453 			out_attrs[idx].port = i;
454 			out_attrs[idx].parm = j;
455 		}
456 	}
457 
458 	gth->output_group.name	= "outputs";
459 	gth->output_group.attrs = attrs;
460 
461 	return sysfs_create_group(&gth->dev->kobj, &gth->output_group);
462 }
463 
464 /**
465  * intel_th_gth_stop() - stop tracing to an output device
466  * @gth:		GTH device
467  * @output:		output device's descriptor
468  * @capture_done:	set when no more traces will be captured
469  *
470  * This will stop tracing using force storeEn off signal and wait for the
471  * pipelines to be empty for the corresponding output port.
472  */
473 static void intel_th_gth_stop(struct gth_device *gth,
474 			      struct intel_th_output *output,
475 			      bool capture_done)
476 {
477 	struct intel_th_device *outdev =
478 		container_of(output, struct intel_th_device, output);
479 	struct intel_th_driver *outdrv =
480 		to_intel_th_driver(outdev->dev.driver);
481 	unsigned long count;
482 	u32 reg;
483 	u32 scr2 = 0xfc | (capture_done ? 1 : 0);
484 
485 	iowrite32(0, gth->base + REG_GTH_SCR);
486 	iowrite32(scr2, gth->base + REG_GTH_SCR2);
487 
488 	/* wait on pipeline empty for the given port */
489 	for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
490 	     count && !(reg & BIT(output->port)); count--) {
491 		reg = ioread32(gth->base + REG_GTH_STAT);
492 		cpu_relax();
493 	}
494 
495 	if (!count)
496 		dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
497 			output->port);
498 
499 	/* wait on output piepline empty */
500 	if (outdrv->wait_empty)
501 		outdrv->wait_empty(outdev);
502 
503 	/* clear force capture done for next captures */
504 	iowrite32(0xfc, gth->base + REG_GTH_SCR2);
505 }
506 
507 /**
508  * intel_th_gth_start() - start tracing to an output device
509  * @gth:	GTH device
510  * @output:	output device's descriptor
511  *
512  * This will start tracing using force storeEn signal.
513  */
514 static void intel_th_gth_start(struct gth_device *gth,
515 			       struct intel_th_output *output)
516 {
517 	u32 scr = 0xfc0000;
518 
519 	if (output->multiblock)
520 		scr |= 0xff;
521 
522 	iowrite32(scr, gth->base + REG_GTH_SCR);
523 	iowrite32(0, gth->base + REG_GTH_SCR2);
524 }
525 
526 /**
527  * intel_th_gth_disable() - disable tracing to an output device
528  * @thdev:	GTH device
529  * @output:	output device's descriptor
530  *
531  * This will deconfigure all masters set to output to this device,
532  * disable tracing using force storeEn off signal and wait for the
533  * "pipeline empty" bit for corresponding output port.
534  */
535 static void intel_th_gth_disable(struct intel_th_device *thdev,
536 				 struct intel_th_output *output)
537 {
538 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
539 	int master;
540 	u32 reg;
541 
542 	spin_lock(&gth->gth_lock);
543 	output->active = false;
544 
545 	for_each_set_bit(master, gth->output[output->port].master,
546 			 TH_CONFIGURABLE_MASTERS + 1) {
547 		gth_master_set(gth, master, -1);
548 	}
549 	spin_unlock(&gth->gth_lock);
550 
551 	intel_th_gth_stop(gth, output, true);
552 
553 	reg = ioread32(gth->base + REG_GTH_SCRPD0);
554 	reg &= ~output->scratchpad;
555 	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
556 }
557 
558 static void gth_tscu_resync(struct gth_device *gth)
559 {
560 	u32 reg;
561 
562 	reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
563 	reg &= ~TSUCTRL_CTCRESYNC;
564 	iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
565 }
566 
567 static void intel_th_gth_prepare(struct intel_th_device *thdev,
568 				 struct intel_th_output *output)
569 {
570 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
571 	int count;
572 
573 	/*
574 	 * Wait until the output port is in reset before we start
575 	 * programming it.
576 	 */
577 	for (count = GTH_PLE_WAITLOOP_DEPTH;
578 	     count && !(gth_output_get(gth, output->port) & BIT(5)); count--)
579 		cpu_relax();
580 }
581 
582 /**
583  * intel_th_gth_enable() - enable tracing to an output device
584  * @thdev:	GTH device
585  * @output:	output device's descriptor
586  *
587  * This will configure all masters set to output to this device and
588  * enable tracing using force storeEn signal.
589  */
590 static void intel_th_gth_enable(struct intel_th_device *thdev,
591 				struct intel_th_output *output)
592 {
593 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
594 	struct intel_th *th = to_intel_th(thdev);
595 	int master;
596 	u32 scrpd;
597 
598 	spin_lock(&gth->gth_lock);
599 	for_each_set_bit(master, gth->output[output->port].master,
600 			 TH_CONFIGURABLE_MASTERS + 1) {
601 		gth_master_set(gth, master, output->port);
602 	}
603 
604 	output->active = true;
605 	spin_unlock(&gth->gth_lock);
606 
607 	if (INTEL_TH_CAP(th, tscu_enable))
608 		gth_tscu_resync(gth);
609 
610 	scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
611 	scrpd |= output->scratchpad;
612 	iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
613 
614 	intel_th_gth_start(gth, output);
615 }
616 
617 /**
618  * intel_th_gth_switch() - execute a switch sequence
619  * @thdev:	GTH device
620  * @output:	output device's descriptor
621  *
622  * This will execute a switch sequence that will trigger a switch window
623  * when tracing to MSC in multi-block mode.
624  */
625 static void intel_th_gth_switch(struct intel_th_device *thdev,
626 				struct intel_th_output *output)
627 {
628 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
629 	unsigned long count;
630 	u32 reg;
631 
632 	/* trigger */
633 	iowrite32(0, gth->base + REG_CTS_CTL);
634 	iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
635 	/* wait on trigger status */
636 	for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
637 	     count && !(reg & BIT(4)); count--) {
638 		reg = ioread32(gth->base + REG_CTS_STAT);
639 		cpu_relax();
640 	}
641 	if (!count)
642 		dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
643 
644 	/* De-assert the trigger */
645 	iowrite32(0, gth->base + REG_CTS_CTL);
646 
647 	intel_th_gth_stop(gth, output, false);
648 	intel_th_gth_start(gth, output);
649 }
650 
651 /**
652  * intel_th_gth_assign() - assign output device to a GTH output port
653  * @thdev:	GTH device
654  * @othdev:	output device
655  *
656  * This will match a given output device parameters against present
657  * output ports on the GTH and fill out relevant bits in output device's
658  * descriptor.
659  *
660  * Return:	0 on success, -errno on error.
661  */
662 static int intel_th_gth_assign(struct intel_th_device *thdev,
663 			       struct intel_th_device *othdev)
664 {
665 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
666 	int i, id;
667 
668 	if (thdev->host_mode)
669 		return -EBUSY;
670 
671 	if (othdev->type != INTEL_TH_OUTPUT)
672 		return -EINVAL;
673 
674 	for (i = 0, id = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
675 		if (gth->output[i].port_type != othdev->output.type)
676 			continue;
677 
678 		if (othdev->id == -1 || othdev->id == id)
679 			goto found;
680 
681 		id++;
682 	}
683 
684 	return -ENOENT;
685 
686 found:
687 	spin_lock(&gth->gth_lock);
688 	othdev->output.port = i;
689 	othdev->output.active = false;
690 	gth->output[i].output = &othdev->output;
691 	spin_unlock(&gth->gth_lock);
692 
693 	return 0;
694 }
695 
696 /**
697  * intel_th_gth_unassign() - deassociate an output device from its output port
698  * @thdev:	GTH device
699  * @othdev:	output device
700  */
701 static void intel_th_gth_unassign(struct intel_th_device *thdev,
702 				  struct intel_th_device *othdev)
703 {
704 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
705 	int port = othdev->output.port;
706 	int master;
707 
708 	if (thdev->host_mode)
709 		return;
710 
711 	spin_lock(&gth->gth_lock);
712 	othdev->output.port = -1;
713 	othdev->output.active = false;
714 	gth->output[port].output = NULL;
715 	for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
716 		if (gth->master[master] == port)
717 			gth->master[master] = -1;
718 	spin_unlock(&gth->gth_lock);
719 }
720 
721 static int
722 intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
723 {
724 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
725 	int port = 0; /* FIXME: make default output configurable */
726 
727 	/*
728 	 * everything above TH_CONFIGURABLE_MASTERS is controlled by the
729 	 * same register
730 	 */
731 	if (master > TH_CONFIGURABLE_MASTERS)
732 		master = TH_CONFIGURABLE_MASTERS;
733 
734 	spin_lock(&gth->gth_lock);
735 	if (gth->master[master] == -1) {
736 		set_bit(master, gth->output[port].master);
737 		gth->master[master] = port;
738 	}
739 	spin_unlock(&gth->gth_lock);
740 
741 	return 0;
742 }
743 
744 static int intel_th_gth_probe(struct intel_th_device *thdev)
745 {
746 	struct device *dev = &thdev->dev;
747 	struct intel_th *th = dev_get_drvdata(dev->parent);
748 	struct gth_device *gth;
749 	struct resource *res;
750 	void __iomem *base;
751 	int i, ret;
752 
753 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
754 	if (!res)
755 		return -ENODEV;
756 
757 	base = devm_ioremap(dev, res->start, resource_size(res));
758 	if (!base)
759 		return -ENOMEM;
760 
761 	gth = devm_kzalloc(dev, sizeof(*gth), GFP_KERNEL);
762 	if (!gth)
763 		return -ENOMEM;
764 
765 	gth->dev = dev;
766 	gth->base = base;
767 	spin_lock_init(&gth->gth_lock);
768 
769 	dev_set_drvdata(dev, gth);
770 
771 	/*
772 	 * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
773 	 * bit. Either way, don't reset HW in this case, and don't export any
774 	 * capture configuration attributes. Also, refuse to assign output
775 	 * drivers to ports, see intel_th_gth_assign().
776 	 */
777 	if (thdev->host_mode)
778 		return 0;
779 
780 	ret = intel_th_gth_reset(gth);
781 	if (ret) {
782 		if (ret != -EBUSY)
783 			return ret;
784 
785 		thdev->host_mode = true;
786 
787 		return 0;
788 	}
789 
790 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
791 		gth->master[i] = -1;
792 
793 	for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
794 		gth->output[i].gth = gth;
795 		gth->output[i].index = i;
796 		gth->output[i].port_type =
797 			gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
798 		if (gth->output[i].port_type == GTH_NONE)
799 			continue;
800 
801 		ret = intel_th_output_enable(th, gth->output[i].port_type);
802 		/* -ENODEV is ok, we just won't have that device enumerated */
803 		if (ret && ret != -ENODEV)
804 			return ret;
805 	}
806 
807 	if (intel_th_output_attributes(gth) ||
808 	    intel_th_master_attributes(gth)) {
809 		pr_warn("Can't initialize sysfs attributes\n");
810 
811 		if (gth->output_group.attrs)
812 			sysfs_remove_group(&gth->dev->kobj, &gth->output_group);
813 		return -ENOMEM;
814 	}
815 
816 	return 0;
817 }
818 
819 static void intel_th_gth_remove(struct intel_th_device *thdev)
820 {
821 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
822 
823 	sysfs_remove_group(&gth->dev->kobj, &gth->output_group);
824 	sysfs_remove_group(&gth->dev->kobj, &gth->master_group);
825 }
826 
827 static struct intel_th_driver intel_th_gth_driver = {
828 	.probe		= intel_th_gth_probe,
829 	.remove		= intel_th_gth_remove,
830 	.assign		= intel_th_gth_assign,
831 	.unassign	= intel_th_gth_unassign,
832 	.set_output	= intel_th_gth_set_output,
833 	.prepare	= intel_th_gth_prepare,
834 	.enable		= intel_th_gth_enable,
835 	.trig_switch	= intel_th_gth_switch,
836 	.disable	= intel_th_gth_disable,
837 	.driver	= {
838 		.name	= "gth",
839 		.owner	= THIS_MODULE,
840 	},
841 };
842 
843 module_driver(intel_th_gth_driver,
844 	      intel_th_driver_register,
845 	      intel_th_driver_unregister);
846 
847 MODULE_ALIAS("intel_th_switch");
848 MODULE_LICENSE("GPL v2");
849 MODULE_DESCRIPTION("Intel(R) Trace Hub Global Trace Hub driver");
850 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
851