xref: /openbmc/linux/drivers/bus/omap_l3_noc.c (revision 5a729246)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OMAP L3 Interconnect error handling driver
4  *
5  * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
6  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
7  *	Sricharan <r.sricharan@ti.com>
8  */
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/of_device.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 
19 #include "omap_l3_noc.h"
20 
21 /**
22  * l3_handle_target() - Handle Target specific parse and reporting
23  * @l3:		pointer to l3 struct
24  * @base:	base address of clkdm
25  * @flag_mux:	flagmux corresponding to the event
26  * @err_src:	error source index of the slave (target)
27  *
28  * This does the second part of the error interrupt handling:
29  *	3) Parse in the slave information
30  *	4) Print the logged information.
31  *	5) Add dump stack to provide kernel trace.
32  *	6) Clear the source if known.
33  *
34  * This handles two types of errors:
35  *	1) Custom errors in L3 :
36  *		Target like DMM/FW/EMIF generates SRESP=ERR error
37  *	2) Standard L3 error:
38  *		- Unsupported CMD.
39  *			L3 tries to access target while it is idle
40  *		- OCP disconnect.
41  *		- Address hole error:
42  *			If DSS/ISS/FDIF/USBHOSTFS access a target where they
43  *			do not have connectivity, the error is logged in
44  *			their default target which is DMM2.
45  *
46  *	On High Secure devices, firewall errors are possible and those
47  *	can be trapped as well. But the trapping is implemented as part
48  *	secure software and hence need not be implemented here.
49  */
l3_handle_target(struct omap_l3 * l3,void __iomem * base,struct l3_flagmux_data * flag_mux,int err_src)50 static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
51 			    struct l3_flagmux_data *flag_mux, int err_src)
52 {
53 	int k;
54 	u32 std_err_main, clear, masterid;
55 	u8 op_code, m_req_info;
56 	void __iomem *l3_targ_base;
57 	void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
58 	void __iomem *l3_targ_hdr, *l3_targ_info;
59 	struct l3_target_data *l3_targ_inst;
60 	struct l3_masters_data *master;
61 	char *target_name, *master_name = "UN IDENTIFIED";
62 	char *err_description;
63 	char err_string[30] = { 0 };
64 	char info_string[60] = { 0 };
65 
66 	/* We DONOT expect err_src to go out of bounds */
67 	BUG_ON(err_src > MAX_CLKDM_TARGETS);
68 
69 	if (err_src < flag_mux->num_targ_data) {
70 		l3_targ_inst = &flag_mux->l3_targ[err_src];
71 		target_name = l3_targ_inst->name;
72 		l3_targ_base = base + l3_targ_inst->offset;
73 	} else {
74 		target_name = L3_TARGET_NOT_SUPPORTED;
75 	}
76 
77 	if (target_name == L3_TARGET_NOT_SUPPORTED)
78 		return -ENODEV;
79 
80 	/* Read the stderrlog_main_source from clk domain */
81 	l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
82 	l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
83 
84 	std_err_main = readl_relaxed(l3_targ_stderr);
85 
86 	switch (std_err_main & CUSTOM_ERROR) {
87 	case STANDARD_ERROR:
88 		err_description = "Standard";
89 		snprintf(err_string, sizeof(err_string),
90 			 ": At Address: 0x%08X ",
91 			 readl_relaxed(l3_targ_slvofslsb));
92 
93 		l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
94 		l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
95 		l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
96 		break;
97 
98 	case CUSTOM_ERROR:
99 		err_description = "Custom";
100 
101 		l3_targ_mstaddr = l3_targ_base +
102 				  L3_TARG_STDERRLOG_CINFO_MSTADDR;
103 		l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
104 		l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
105 		break;
106 
107 	default:
108 		/* Nothing to be handled here as of now */
109 		return 0;
110 	}
111 
112 	/* STDERRLOG_MSTADDR Stores the NTTP master address. */
113 	masterid = (readl_relaxed(l3_targ_mstaddr) &
114 		    l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
115 
116 	for (k = 0, master = l3->l3_masters; k < l3->num_masters;
117 	     k++, master++) {
118 		if (masterid == master->id) {
119 			master_name = master->name;
120 			break;
121 		}
122 	}
123 
124 	op_code = readl_relaxed(l3_targ_hdr) & 0x7;
125 
126 	m_req_info = readl_relaxed(l3_targ_info) & 0xF;
127 	snprintf(info_string, sizeof(info_string),
128 		 ": %s in %s mode during %s access",
129 		 (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
130 		 (m_req_info & BIT(1)) ? "Supervisor" : "User",
131 		 (m_req_info & BIT(3)) ? "Debug" : "Functional");
132 
133 	WARN(true,
134 	     "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
135 	     dev_name(l3->dev),
136 	     err_description,
137 	     master_name, target_name,
138 	     l3_transaction_type[op_code],
139 	     err_string, info_string);
140 
141 	/* clear the std error log*/
142 	clear = std_err_main | CLEAR_STDERR_LOG;
143 	writel_relaxed(clear, l3_targ_stderr);
144 
145 	return 0;
146 }
147 
148 /**
149  * l3_interrupt_handler() - interrupt handler for l3 events
150  * @irq:	irq number
151  * @_l3:	pointer to l3 structure
152  *
153  * Interrupt Handler for L3 error detection.
154  *	1) Identify the L3 clockdomain partition to which the error belongs to.
155  *	2) Identify the slave where the error information is logged
156  *	... handle the slave event..
157  *	7) if the slave is unknown, mask out the slave.
158  */
l3_interrupt_handler(int irq,void * _l3)159 static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
160 {
161 	struct omap_l3 *l3 = _l3;
162 	int inttype, i, ret;
163 	int err_src = 0;
164 	u32 err_reg, mask_val;
165 	void __iomem *base, *mask_reg;
166 	struct l3_flagmux_data *flag_mux;
167 
168 	/* Get the Type of interrupt */
169 	inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
170 
171 	for (i = 0; i < l3->num_modules; i++) {
172 		/*
173 		 * Read the regerr register of the clock domain
174 		 * to determine the source
175 		 */
176 		base = l3->l3_base[i];
177 		flag_mux = l3->l3_flagmux[i];
178 		err_reg = readl_relaxed(base + flag_mux->offset +
179 					L3_FLAGMUX_REGERR0 + (inttype << 3));
180 
181 		err_reg &= ~(inttype ? flag_mux->mask_app_bits :
182 				flag_mux->mask_dbg_bits);
183 
184 		/* Get the corresponding error and analyse */
185 		if (err_reg) {
186 			/* Identify the source from control status register */
187 			err_src = __ffs(err_reg);
188 
189 			ret = l3_handle_target(l3, base, flag_mux, err_src);
190 
191 			/*
192 			 * Certain plaforms may have "undocumented" status
193 			 * pending on boot. So dont generate a severe warning
194 			 * here. Just mask it off to prevent the error from
195 			 * reoccuring and locking up the system.
196 			 */
197 			if (ret) {
198 				dev_err(l3->dev,
199 					"L3 %s error: target %d mod:%d %s\n",
200 					inttype ? "debug" : "application",
201 					err_src, i, "(unclearable)");
202 
203 				mask_reg = base + flag_mux->offset +
204 					   L3_FLAGMUX_MASK0 + (inttype << 3);
205 				mask_val = readl_relaxed(mask_reg);
206 				mask_val &= ~(1 << err_src);
207 				writel_relaxed(mask_val, mask_reg);
208 
209 				/* Mark these bits as to be ignored */
210 				if (inttype)
211 					flag_mux->mask_app_bits |= 1 << err_src;
212 				else
213 					flag_mux->mask_dbg_bits |= 1 << err_src;
214 			}
215 
216 			/* Error found so break the for loop */
217 			return IRQ_HANDLED;
218 		}
219 	}
220 
221 	dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
222 		inttype ? "debug" : "application");
223 
224 	return IRQ_NONE;
225 }
226 
227 static const struct of_device_id l3_noc_match[] = {
228 	{.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
229 	{.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
230 	{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
231 	{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
232 	{},
233 };
234 MODULE_DEVICE_TABLE(of, l3_noc_match);
235 
omap_l3_probe(struct platform_device * pdev)236 static int omap_l3_probe(struct platform_device *pdev)
237 {
238 	const struct of_device_id *of_id;
239 	static struct omap_l3 *l3;
240 	int ret, i, res_idx;
241 
242 	of_id = of_match_device(l3_noc_match, &pdev->dev);
243 	if (!of_id) {
244 		dev_err(&pdev->dev, "OF data missing\n");
245 		return -EINVAL;
246 	}
247 
248 	l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
249 	if (!l3)
250 		return -ENOMEM;
251 
252 	memcpy(l3, of_id->data, sizeof(*l3));
253 	l3->dev = &pdev->dev;
254 	platform_set_drvdata(pdev, l3);
255 
256 	/* Get mem resources */
257 	for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
258 		struct resource	*res;
259 
260 		if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
261 			/* First entry cannot be submodule */
262 			BUG_ON(i == 0);
263 			l3->l3_base[i] = l3->l3_base[i - 1];
264 			continue;
265 		}
266 		res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
267 		l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
268 		if (IS_ERR(l3->l3_base[i])) {
269 			dev_err(l3->dev, "ioremap %d failed\n", i);
270 			return PTR_ERR(l3->l3_base[i]);
271 		}
272 		res_idx++;
273 	}
274 
275 	/*
276 	 * Setup interrupt Handlers
277 	 */
278 	l3->debug_irq = platform_get_irq(pdev, 0);
279 	ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
280 			       IRQF_NO_THREAD, "l3-dbg-irq", l3);
281 	if (ret) {
282 		dev_err(l3->dev, "request_irq failed for %d\n",
283 			l3->debug_irq);
284 		return ret;
285 	}
286 
287 	l3->app_irq = platform_get_irq(pdev, 1);
288 	ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
289 			       IRQF_NO_THREAD, "l3-app-irq", l3);
290 	if (ret)
291 		dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
292 
293 	return ret;
294 }
295 
296 #ifdef	CONFIG_PM_SLEEP
297 
298 /**
299  * l3_resume_noirq() - resume function for l3_noc
300  * @dev:	pointer to l3_noc device structure
301  *
302  * We only have the resume handler only since we
303  * have already maintained the delta register
304  * configuration as part of configuring the system
305  */
l3_resume_noirq(struct device * dev)306 static int l3_resume_noirq(struct device *dev)
307 {
308 	struct omap_l3 *l3 = dev_get_drvdata(dev);
309 	int i;
310 	struct l3_flagmux_data *flag_mux;
311 	void __iomem *base, *mask_regx = NULL;
312 	u32 mask_val;
313 
314 	for (i = 0; i < l3->num_modules; i++) {
315 		base = l3->l3_base[i];
316 		flag_mux = l3->l3_flagmux[i];
317 		if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
318 			continue;
319 
320 		mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
321 			   (L3_APPLICATION_ERROR << 3);
322 		mask_val = readl_relaxed(mask_regx);
323 		mask_val &= ~(flag_mux->mask_app_bits);
324 
325 		writel_relaxed(mask_val, mask_regx);
326 		mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
327 			   (L3_DEBUG_ERROR << 3);
328 		mask_val = readl_relaxed(mask_regx);
329 		mask_val &= ~(flag_mux->mask_dbg_bits);
330 
331 		writel_relaxed(mask_val, mask_regx);
332 	}
333 
334 	/* Dummy read to force OCP barrier */
335 	if (mask_regx)
336 		(void)readl(mask_regx);
337 
338 	return 0;
339 }
340 
341 static const struct dev_pm_ops l3_dev_pm_ops = {
342 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
343 };
344 
345 #define L3_DEV_PM_OPS (&l3_dev_pm_ops)
346 #else
347 #define L3_DEV_PM_OPS NULL
348 #endif
349 
350 static struct platform_driver omap_l3_driver = {
351 	.probe		= omap_l3_probe,
352 	.driver		= {
353 		.name		= "omap_l3_noc",
354 		.pm		= L3_DEV_PM_OPS,
355 		.of_match_table = of_match_ptr(l3_noc_match),
356 	},
357 };
358 
omap_l3_init(void)359 static int __init omap_l3_init(void)
360 {
361 	return platform_driver_register(&omap_l3_driver);
362 }
363 postcore_initcall_sync(omap_l3_init);
364 
omap_l3_exit(void)365 static void __exit omap_l3_exit(void)
366 {
367 	platform_driver_unregister(&omap_l3_driver);
368 }
369 module_exit(omap_l3_exit);
370 
371 MODULE_AUTHOR("Santosh Shilimkar");
372 MODULE_AUTHOR("Sricharan R");
373 MODULE_DESCRIPTION("OMAP L3 Interconnect error handling driver");
374 MODULE_LICENSE("GPL v2");
375