xref: /openbmc/linux/drivers/bus/bt1-apb.c (revision 84937573)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4  *
5  * Authors:
6  *   Serge Semin <Sergey.Semin@baikalelectronics.ru>
7  *
8  * Baikal-T1 APB-bus driver
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/device.h>
15 #include <linux/atomic.h>
16 #include <linux/platform_device.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/nmi.h>
20 #include <linux/of.h>
21 #include <linux/regmap.h>
22 #include <linux/clk.h>
23 #include <linux/reset.h>
24 #include <linux/time64.h>
25 #include <linux/clk.h>
26 #include <linux/sysfs.h>
27 
28 #define APB_EHB_ISR			0x00
29 #define APB_EHB_ISR_PENDING		BIT(0)
30 #define APB_EHB_ISR_MASK		BIT(1)
31 #define APB_EHB_ADDR			0x04
32 #define APB_EHB_TIMEOUT			0x08
33 
34 #define APB_EHB_TIMEOUT_MIN		0x000003FFU
35 #define APB_EHB_TIMEOUT_MAX		0xFFFFFFFFU
36 
37 /*
38  * struct bt1_apb - Baikal-T1 APB EHB private data
39  * @dev: Pointer to the device structure.
40  * @regs: APB EHB registers map.
41  * @res: No-device error injection memory region.
42  * @irq: Errors IRQ number.
43  * @rate: APB-bus reference clock rate.
44  * @pclk: APB-reference clock.
45  * @prst: APB domain reset line.
46  * @count: Number of errors detected.
47  */
48 struct bt1_apb {
49 	struct device *dev;
50 
51 	struct regmap *regs;
52 	void __iomem *res;
53 	int irq;
54 
55 	unsigned long rate;
56 	struct clk *pclk;
57 
58 	struct reset_control *prst;
59 
60 	atomic_t count;
61 };
62 
63 static const struct regmap_config bt1_apb_regmap_cfg = {
64 	.reg_bits = 32,
65 	.val_bits = 32,
66 	.reg_stride = 4,
67 	.max_register = APB_EHB_TIMEOUT,
68 	.fast_io = true
69 };
70 
bt1_apb_n_to_timeout_us(struct bt1_apb * apb,u32 n)71 static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
72 {
73 	u64 timeout = (u64)n * USEC_PER_SEC;
74 
75 	do_div(timeout, apb->rate);
76 
77 	return timeout;
78 
79 }
80 
bt1_apb_timeout_to_n_us(struct bt1_apb * apb,unsigned long timeout)81 static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
82 						    unsigned long timeout)
83 {
84 	u64 n = (u64)timeout * apb->rate;
85 
86 	do_div(n, USEC_PER_SEC);
87 
88 	return n;
89 
90 }
91 
bt1_apb_isr(int irq,void * data)92 static irqreturn_t bt1_apb_isr(int irq, void *data)
93 {
94 	struct bt1_apb *apb = data;
95 	u32 addr = 0;
96 
97 	regmap_read(apb->regs, APB_EHB_ADDR, &addr);
98 
99 	dev_crit_ratelimited(apb->dev,
100 		"APB-bus fault %d: Slave access timeout at 0x%08x\n",
101 		atomic_inc_return(&apb->count),
102 		addr);
103 
104 	/*
105 	 * Print backtrace on each CPU. This might be pointless if the fault
106 	 * has happened on the same CPU as the IRQ handler is executed or
107 	 * the other core proceeded further execution despite the error.
108 	 * But if it's not, by looking at the trace we would get straight to
109 	 * the cause of the problem.
110 	 */
111 	trigger_all_cpu_backtrace();
112 
113 	regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
114 
115 	return IRQ_HANDLED;
116 }
117 
bt1_apb_clear_data(void * data)118 static void bt1_apb_clear_data(void *data)
119 {
120 	struct bt1_apb *apb = data;
121 	struct platform_device *pdev = to_platform_device(apb->dev);
122 
123 	platform_set_drvdata(pdev, NULL);
124 }
125 
bt1_apb_create_data(struct platform_device * pdev)126 static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
127 {
128 	struct device *dev = &pdev->dev;
129 	struct bt1_apb *apb;
130 	int ret;
131 
132 	apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
133 	if (!apb)
134 		return ERR_PTR(-ENOMEM);
135 
136 	ret = devm_add_action(dev, bt1_apb_clear_data, apb);
137 	if (ret) {
138 		dev_err(dev, "Can't add APB EHB data clear action\n");
139 		return ERR_PTR(ret);
140 	}
141 
142 	apb->dev = dev;
143 	atomic_set(&apb->count, 0);
144 	platform_set_drvdata(pdev, apb);
145 
146 	return apb;
147 }
148 
bt1_apb_request_regs(struct bt1_apb * apb)149 static int bt1_apb_request_regs(struct bt1_apb *apb)
150 {
151 	struct platform_device *pdev = to_platform_device(apb->dev);
152 	void __iomem *regs;
153 
154 	regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
155 	if (IS_ERR(regs)) {
156 		dev_err(apb->dev, "Couldn't map APB EHB registers\n");
157 		return PTR_ERR(regs);
158 	}
159 
160 	apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
161 	if (IS_ERR(apb->regs)) {
162 		dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
163 		return PTR_ERR(apb->regs);
164 	}
165 
166 	apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
167 	if (IS_ERR(apb->res))
168 		dev_err(apb->dev, "Couldn't map reserved region\n");
169 
170 	return PTR_ERR_OR_ZERO(apb->res);
171 }
172 
bt1_apb_request_rst(struct bt1_apb * apb)173 static int bt1_apb_request_rst(struct bt1_apb *apb)
174 {
175 	int ret;
176 
177 	apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
178 	if (IS_ERR(apb->prst))
179 		return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
180 				     "Couldn't get reset control line\n");
181 
182 	ret = reset_control_deassert(apb->prst);
183 	if (ret)
184 		dev_err(apb->dev, "Failed to deassert the reset line\n");
185 
186 	return ret;
187 }
188 
bt1_apb_disable_clk(void * data)189 static void bt1_apb_disable_clk(void *data)
190 {
191 	struct bt1_apb *apb = data;
192 
193 	clk_disable_unprepare(apb->pclk);
194 }
195 
bt1_apb_request_clk(struct bt1_apb * apb)196 static int bt1_apb_request_clk(struct bt1_apb *apb)
197 {
198 	int ret;
199 
200 	apb->pclk = devm_clk_get(apb->dev, "pclk");
201 	if (IS_ERR(apb->pclk))
202 		return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
203 				     "Couldn't get APB clock descriptor\n");
204 
205 	ret = clk_prepare_enable(apb->pclk);
206 	if (ret) {
207 		dev_err(apb->dev, "Couldn't enable the APB clock\n");
208 		return ret;
209 	}
210 
211 	ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
212 	if (ret) {
213 		dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
214 		return ret;
215 	}
216 
217 	apb->rate = clk_get_rate(apb->pclk);
218 	if (!apb->rate) {
219 		dev_err(apb->dev, "Invalid clock rate\n");
220 		return -EINVAL;
221 	}
222 
223 	return 0;
224 }
225 
bt1_apb_clear_irq(void * data)226 static void bt1_apb_clear_irq(void *data)
227 {
228 	struct bt1_apb *apb = data;
229 
230 	regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
231 }
232 
bt1_apb_request_irq(struct bt1_apb * apb)233 static int bt1_apb_request_irq(struct bt1_apb *apb)
234 {
235 	struct platform_device *pdev = to_platform_device(apb->dev);
236 	int ret;
237 
238 	apb->irq = platform_get_irq(pdev, 0);
239 	if (apb->irq < 0)
240 		return apb->irq;
241 
242 	ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
243 			       "bt1-apb", apb);
244 	if (ret) {
245 		dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
246 		return ret;
247 	}
248 
249 	ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
250 	if (ret) {
251 		dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
252 		return ret;
253 	}
254 
255 	/* Unmask IRQ and clear it' pending flag. */
256 	regmap_update_bits(apb->regs, APB_EHB_ISR,
257 			   APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
258 			   APB_EHB_ISR_MASK);
259 
260 	return 0;
261 }
262 
count_show(struct device * dev,struct device_attribute * attr,char * buf)263 static ssize_t count_show(struct device *dev, struct device_attribute *attr,
264 			  char *buf)
265 {
266 	struct bt1_apb *apb = dev_get_drvdata(dev);
267 
268 	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
269 }
270 static DEVICE_ATTR_RO(count);
271 
timeout_show(struct device * dev,struct device_attribute * attr,char * buf)272 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
273 			    char *buf)
274 {
275 	struct bt1_apb *apb = dev_get_drvdata(dev);
276 	unsigned long timeout;
277 	int ret;
278 	u32 n;
279 
280 	ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
281 	if (ret)
282 		return ret;
283 
284 	timeout = bt1_apb_n_to_timeout_us(apb, n);
285 
286 	return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
287 }
288 
timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)289 static ssize_t timeout_store(struct device *dev,
290 			     struct device_attribute *attr,
291 			     const char *buf, size_t count)
292 {
293 	struct bt1_apb *apb = dev_get_drvdata(dev);
294 	unsigned long timeout;
295 	int ret;
296 	u32 n;
297 
298 	if (kstrtoul(buf, 0, &timeout) < 0)
299 		return -EINVAL;
300 
301 	n = bt1_apb_timeout_to_n_us(apb, timeout);
302 	n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
303 
304 	ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
305 
306 	return ret ?: count;
307 }
308 static DEVICE_ATTR_RW(timeout);
309 
inject_error_show(struct device * dev,struct device_attribute * attr,char * buf)310 static ssize_t inject_error_show(struct device *dev,
311 				 struct device_attribute *attr, char *buf)
312 {
313 	return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
314 }
315 
inject_error_store(struct device * dev,struct device_attribute * attr,const char * data,size_t count)316 static ssize_t inject_error_store(struct device *dev,
317 				  struct device_attribute *attr,
318 				  const char *data, size_t count)
319 {
320 	struct bt1_apb *apb = dev_get_drvdata(dev);
321 
322 	/*
323 	 * Either dummy read from the unmapped address in the APB IO area
324 	 * or manually set the IRQ status.
325 	 */
326 	if (sysfs_streq(data, "nodev"))
327 		readl(apb->res);
328 	else if (sysfs_streq(data, "irq"))
329 		regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
330 				   APB_EHB_ISR_PENDING);
331 	else
332 		return -EINVAL;
333 
334 	return count;
335 }
336 static DEVICE_ATTR_RW(inject_error);
337 
338 static struct attribute *bt1_apb_sysfs_attrs[] = {
339 	&dev_attr_count.attr,
340 	&dev_attr_timeout.attr,
341 	&dev_attr_inject_error.attr,
342 	NULL
343 };
344 ATTRIBUTE_GROUPS(bt1_apb_sysfs);
345 
bt1_apb_remove_sysfs(void * data)346 static void bt1_apb_remove_sysfs(void *data)
347 {
348 	struct bt1_apb *apb = data;
349 
350 	device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
351 }
352 
bt1_apb_init_sysfs(struct bt1_apb * apb)353 static int bt1_apb_init_sysfs(struct bt1_apb *apb)
354 {
355 	int ret;
356 
357 	ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
358 	if (ret) {
359 		dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
360 		return ret;
361 	}
362 
363 	ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
364 	if (ret)
365 		dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
366 
367 	return ret;
368 }
369 
bt1_apb_probe(struct platform_device * pdev)370 static int bt1_apb_probe(struct platform_device *pdev)
371 {
372 	struct bt1_apb *apb;
373 	int ret;
374 
375 	apb = bt1_apb_create_data(pdev);
376 	if (IS_ERR(apb))
377 		return PTR_ERR(apb);
378 
379 	ret = bt1_apb_request_regs(apb);
380 	if (ret)
381 		return ret;
382 
383 	ret = bt1_apb_request_rst(apb);
384 	if (ret)
385 		return ret;
386 
387 	ret = bt1_apb_request_clk(apb);
388 	if (ret)
389 		return ret;
390 
391 	ret = bt1_apb_request_irq(apb);
392 	if (ret)
393 		return ret;
394 
395 	ret = bt1_apb_init_sysfs(apb);
396 	if (ret)
397 		return ret;
398 
399 	return 0;
400 }
401 
402 static const struct of_device_id bt1_apb_of_match[] = {
403 	{ .compatible = "baikal,bt1-apb" },
404 	{ }
405 };
406 MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
407 
408 static struct platform_driver bt1_apb_driver = {
409 	.probe = bt1_apb_probe,
410 	.driver = {
411 		.name = "bt1-apb",
412 		.of_match_table = bt1_apb_of_match
413 	}
414 };
415 module_platform_driver(bt1_apb_driver);
416 
417 MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
418 MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
419