xref: /openbmc/linux/drivers/dma/ti/dma-crossbar.c (revision a61127c2)
1 /*
2  *  Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
3  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  */
10 #include <linux/slab.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/list.h>
14 #include <linux/io.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_dma.h>
18 
19 #define TI_XBAR_DRA7		0
20 #define TI_XBAR_AM335X		1
21 static const u32 ti_xbar_type[] = {
22 	[TI_XBAR_DRA7] = TI_XBAR_DRA7,
23 	[TI_XBAR_AM335X] = TI_XBAR_AM335X,
24 };
25 
26 static const struct of_device_id ti_dma_xbar_match[] = {
27 	{
28 		.compatible = "ti,dra7-dma-crossbar",
29 		.data = &ti_xbar_type[TI_XBAR_DRA7],
30 	},
31 	{
32 		.compatible = "ti,am335x-edma-crossbar",
33 		.data = &ti_xbar_type[TI_XBAR_AM335X],
34 	},
35 	{},
36 };
37 
38 /* Crossbar on AM335x/AM437x family */
39 #define TI_AM335X_XBAR_LINES	64
40 
41 struct ti_am335x_xbar_data {
42 	void __iomem *iomem;
43 
44 	struct dma_router dmarouter;
45 
46 	u32 xbar_events; /* maximum number of events to select in xbar */
47 	u32 dma_requests; /* number of DMA requests on eDMA */
48 };
49 
50 struct ti_am335x_xbar_map {
51 	u16 dma_line;
52 	u8 mux_val;
53 };
54 
55 static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
56 {
57 	/*
58 	 * TPCC_EVT_MUX_60_63 register layout is different than the
59 	 * rest, in the sense, that event 63 is mapped to lowest byte
60 	 * and event 60 is mapped to highest, handle it separately.
61 	 */
62 	if (event >= 60 && event <= 63)
63 		writeb_relaxed(val, iomem + (63 - event % 4));
64 	else
65 		writeb_relaxed(val, iomem + event);
66 }
67 
68 static void ti_am335x_xbar_free(struct device *dev, void *route_data)
69 {
70 	struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
71 	struct ti_am335x_xbar_map *map = route_data;
72 
73 	dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
74 		map->mux_val, map->dma_line);
75 
76 	ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
77 	kfree(map);
78 }
79 
80 static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
81 					   struct of_dma *ofdma)
82 {
83 	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
84 	struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
85 	struct ti_am335x_xbar_map *map;
86 
87 	if (dma_spec->args_count != 3)
88 		return ERR_PTR(-EINVAL);
89 
90 	if (dma_spec->args[2] >= xbar->xbar_events) {
91 		dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
92 			dma_spec->args[2]);
93 		return ERR_PTR(-EINVAL);
94 	}
95 
96 	if (dma_spec->args[0] >= xbar->dma_requests) {
97 		dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
98 			dma_spec->args[0]);
99 		return ERR_PTR(-EINVAL);
100 	}
101 
102 	/* The of_node_put() will be done in the core for the node */
103 	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
104 	if (!dma_spec->np) {
105 		dev_err(&pdev->dev, "Can't get DMA master\n");
106 		return ERR_PTR(-EINVAL);
107 	}
108 
109 	map = kzalloc(sizeof(*map), GFP_KERNEL);
110 	if (!map) {
111 		of_node_put(dma_spec->np);
112 		return ERR_PTR(-ENOMEM);
113 	}
114 
115 	map->dma_line = (u16)dma_spec->args[0];
116 	map->mux_val = (u8)dma_spec->args[2];
117 
118 	dma_spec->args[2] = 0;
119 	dma_spec->args_count = 2;
120 
121 	dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
122 		map->mux_val, map->dma_line);
123 
124 	ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
125 
126 	return map;
127 }
128 
129 static const struct of_device_id ti_am335x_master_match[] = {
130 	{ .compatible = "ti,edma3-tpcc", },
131 	{},
132 };
133 
134 static int ti_am335x_xbar_probe(struct platform_device *pdev)
135 {
136 	struct device_node *node = pdev->dev.of_node;
137 	const struct of_device_id *match;
138 	struct device_node *dma_node;
139 	struct ti_am335x_xbar_data *xbar;
140 	struct resource *res;
141 	void __iomem *iomem;
142 	int i, ret;
143 
144 	if (!node)
145 		return -ENODEV;
146 
147 	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
148 	if (!xbar)
149 		return -ENOMEM;
150 
151 	dma_node = of_parse_phandle(node, "dma-masters", 0);
152 	if (!dma_node) {
153 		dev_err(&pdev->dev, "Can't get DMA master node\n");
154 		return -ENODEV;
155 	}
156 
157 	match = of_match_node(ti_am335x_master_match, dma_node);
158 	if (!match) {
159 		dev_err(&pdev->dev, "DMA master is not supported\n");
160 		of_node_put(dma_node);
161 		return -EINVAL;
162 	}
163 
164 	if (of_property_read_u32(dma_node, "dma-requests",
165 				 &xbar->dma_requests)) {
166 		dev_info(&pdev->dev,
167 			 "Missing XBAR output information, using %u.\n",
168 			 TI_AM335X_XBAR_LINES);
169 		xbar->dma_requests = TI_AM335X_XBAR_LINES;
170 	}
171 	of_node_put(dma_node);
172 
173 	if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
174 		dev_info(&pdev->dev,
175 			 "Missing XBAR input information, using %u.\n",
176 			 TI_AM335X_XBAR_LINES);
177 		xbar->xbar_events = TI_AM335X_XBAR_LINES;
178 	}
179 
180 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 	iomem = devm_ioremap_resource(&pdev->dev, res);
182 	if (IS_ERR(iomem))
183 		return PTR_ERR(iomem);
184 
185 	xbar->iomem = iomem;
186 
187 	xbar->dmarouter.dev = &pdev->dev;
188 	xbar->dmarouter.route_free = ti_am335x_xbar_free;
189 
190 	platform_set_drvdata(pdev, xbar);
191 
192 	/* Reset the crossbar */
193 	for (i = 0; i < xbar->dma_requests; i++)
194 		ti_am335x_xbar_write(xbar->iomem, i, 0);
195 
196 	ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
197 				     &xbar->dmarouter);
198 
199 	return ret;
200 }
201 
202 /* Crossbar on DRA7xx family */
203 #define TI_DRA7_XBAR_OUTPUTS	127
204 #define TI_DRA7_XBAR_INPUTS	256
205 
206 struct ti_dra7_xbar_data {
207 	void __iomem *iomem;
208 
209 	struct dma_router dmarouter;
210 	struct mutex mutex;
211 	unsigned long *dma_inuse;
212 
213 	u16 safe_val; /* Value to rest the crossbar lines */
214 	u32 xbar_requests; /* number of DMA requests connected to XBAR */
215 	u32 dma_requests; /* number of DMA requests forwarded to DMA */
216 	u32 dma_offset;
217 };
218 
219 struct ti_dra7_xbar_map {
220 	u16 xbar_in;
221 	int xbar_out;
222 };
223 
224 static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
225 {
226 	writew_relaxed(val, iomem + (xbar * 2));
227 }
228 
229 static void ti_dra7_xbar_free(struct device *dev, void *route_data)
230 {
231 	struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
232 	struct ti_dra7_xbar_map *map = route_data;
233 
234 	dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
235 		map->xbar_in, map->xbar_out);
236 
237 	ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
238 	mutex_lock(&xbar->mutex);
239 	clear_bit(map->xbar_out, xbar->dma_inuse);
240 	mutex_unlock(&xbar->mutex);
241 	kfree(map);
242 }
243 
244 static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
245 					 struct of_dma *ofdma)
246 {
247 	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
248 	struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
249 	struct ti_dra7_xbar_map *map;
250 
251 	if (dma_spec->args[0] >= xbar->xbar_requests) {
252 		dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
253 			dma_spec->args[0]);
254 		return ERR_PTR(-EINVAL);
255 	}
256 
257 	/* The of_node_put() will be done in the core for the node */
258 	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
259 	if (!dma_spec->np) {
260 		dev_err(&pdev->dev, "Can't get DMA master\n");
261 		return ERR_PTR(-EINVAL);
262 	}
263 
264 	map = kzalloc(sizeof(*map), GFP_KERNEL);
265 	if (!map) {
266 		of_node_put(dma_spec->np);
267 		return ERR_PTR(-ENOMEM);
268 	}
269 
270 	mutex_lock(&xbar->mutex);
271 	map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
272 					    xbar->dma_requests);
273 	if (map->xbar_out == xbar->dma_requests) {
274 		mutex_unlock(&xbar->mutex);
275 		dev_err(&pdev->dev, "Run out of free DMA requests\n");
276 		kfree(map);
277 		return ERR_PTR(-ENOMEM);
278 	}
279 	set_bit(map->xbar_out, xbar->dma_inuse);
280 	mutex_unlock(&xbar->mutex);
281 
282 	map->xbar_in = (u16)dma_spec->args[0];
283 
284 	dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
285 
286 	dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
287 		map->xbar_in, map->xbar_out);
288 
289 	ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
290 
291 	return map;
292 }
293 
294 #define TI_XBAR_EDMA_OFFSET	0
295 #define TI_XBAR_SDMA_OFFSET	1
296 static const u32 ti_dma_offset[] = {
297 	[TI_XBAR_EDMA_OFFSET] = 0,
298 	[TI_XBAR_SDMA_OFFSET] = 1,
299 };
300 
301 static const struct of_device_id ti_dra7_master_match[] = {
302 	{
303 		.compatible = "ti,omap4430-sdma",
304 		.data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
305 	},
306 	{
307 		.compatible = "ti,edma3",
308 		.data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
309 	},
310 	{
311 		.compatible = "ti,edma3-tpcc",
312 		.data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
313 	},
314 	{},
315 };
316 
317 static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
318 {
319 	for (; len > 0; len--)
320 		set_bit(offset + (len - 1), p);
321 }
322 
323 static int ti_dra7_xbar_probe(struct platform_device *pdev)
324 {
325 	struct device_node *node = pdev->dev.of_node;
326 	const struct of_device_id *match;
327 	struct device_node *dma_node;
328 	struct ti_dra7_xbar_data *xbar;
329 	struct property *prop;
330 	struct resource *res;
331 	u32 safe_val;
332 	int sz;
333 	void __iomem *iomem;
334 	int i, ret;
335 
336 	if (!node)
337 		return -ENODEV;
338 
339 	xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
340 	if (!xbar)
341 		return -ENOMEM;
342 
343 	dma_node = of_parse_phandle(node, "dma-masters", 0);
344 	if (!dma_node) {
345 		dev_err(&pdev->dev, "Can't get DMA master node\n");
346 		return -ENODEV;
347 	}
348 
349 	match = of_match_node(ti_dra7_master_match, dma_node);
350 	if (!match) {
351 		dev_err(&pdev->dev, "DMA master is not supported\n");
352 		of_node_put(dma_node);
353 		return -EINVAL;
354 	}
355 
356 	if (of_property_read_u32(dma_node, "dma-requests",
357 				 &xbar->dma_requests)) {
358 		dev_info(&pdev->dev,
359 			 "Missing XBAR output information, using %u.\n",
360 			 TI_DRA7_XBAR_OUTPUTS);
361 		xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
362 	}
363 	of_node_put(dma_node);
364 
365 	xbar->dma_inuse = devm_kcalloc(&pdev->dev,
366 				       BITS_TO_LONGS(xbar->dma_requests),
367 				       sizeof(unsigned long), GFP_KERNEL);
368 	if (!xbar->dma_inuse)
369 		return -ENOMEM;
370 
371 	if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
372 		dev_info(&pdev->dev,
373 			 "Missing XBAR input information, using %u.\n",
374 			 TI_DRA7_XBAR_INPUTS);
375 		xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
376 	}
377 
378 	if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
379 		xbar->safe_val = (u16)safe_val;
380 
381 
382 	prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
383 	if (prop) {
384 		const char pname[] = "ti,reserved-dma-request-ranges";
385 		u32 (*rsv_events)[2];
386 		size_t nelm = sz / sizeof(*rsv_events);
387 		int i;
388 
389 		if (!nelm)
390 			return -EINVAL;
391 
392 		rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
393 		if (!rsv_events)
394 			return -ENOMEM;
395 
396 		ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
397 						 nelm * 2);
398 		if (ret)
399 			return ret;
400 
401 		for (i = 0; i < nelm; i++) {
402 			ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
403 					     xbar->dma_inuse);
404 		}
405 		kfree(rsv_events);
406 	}
407 
408 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
409 	iomem = devm_ioremap_resource(&pdev->dev, res);
410 	if (IS_ERR(iomem))
411 		return PTR_ERR(iomem);
412 
413 	xbar->iomem = iomem;
414 
415 	xbar->dmarouter.dev = &pdev->dev;
416 	xbar->dmarouter.route_free = ti_dra7_xbar_free;
417 	xbar->dma_offset = *(u32 *)match->data;
418 
419 	mutex_init(&xbar->mutex);
420 	platform_set_drvdata(pdev, xbar);
421 
422 	/* Reset the crossbar */
423 	for (i = 0; i < xbar->dma_requests; i++) {
424 		if (!test_bit(i, xbar->dma_inuse))
425 			ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
426 	}
427 
428 	ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
429 				     &xbar->dmarouter);
430 	if (ret) {
431 		/* Restore the defaults for the crossbar */
432 		for (i = 0; i < xbar->dma_requests; i++) {
433 			if (!test_bit(i, xbar->dma_inuse))
434 				ti_dra7_xbar_write(xbar->iomem, i, i);
435 		}
436 	}
437 
438 	return ret;
439 }
440 
441 static int ti_dma_xbar_probe(struct platform_device *pdev)
442 {
443 	const struct of_device_id *match;
444 	int ret;
445 
446 	match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
447 	if (unlikely(!match))
448 		return -EINVAL;
449 
450 	switch (*(u32 *)match->data) {
451 	case TI_XBAR_DRA7:
452 		ret = ti_dra7_xbar_probe(pdev);
453 		break;
454 	case TI_XBAR_AM335X:
455 		ret = ti_am335x_xbar_probe(pdev);
456 		break;
457 	default:
458 		dev_err(&pdev->dev, "Unsupported crossbar\n");
459 		ret = -ENODEV;
460 		break;
461 	}
462 
463 	return ret;
464 }
465 
466 static struct platform_driver ti_dma_xbar_driver = {
467 	.driver = {
468 		.name = "ti-dma-crossbar",
469 		.of_match_table = of_match_ptr(ti_dma_xbar_match),
470 	},
471 	.probe	= ti_dma_xbar_probe,
472 };
473 
474 static int omap_dmaxbar_init(void)
475 {
476 	return platform_driver_register(&ti_dma_xbar_driver);
477 }
478 arch_initcall(omap_dmaxbar_init);
479