1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 #include <linux/slab.h> 7 #include <linux/err.h> 8 #include <linux/init.h> 9 #include <linux/list.h> 10 #include <linux/io.h> 11 #include <linux/of_address.h> 12 #include <linux/of_device.h> 13 #include <linux/of_dma.h> 14 15 #define TI_XBAR_DRA7 0 16 #define TI_XBAR_AM335X 1 17 static const u32 ti_xbar_type[] = { 18 [TI_XBAR_DRA7] = TI_XBAR_DRA7, 19 [TI_XBAR_AM335X] = TI_XBAR_AM335X, 20 }; 21 22 static const struct of_device_id ti_dma_xbar_match[] = { 23 { 24 .compatible = "ti,dra7-dma-crossbar", 25 .data = &ti_xbar_type[TI_XBAR_DRA7], 26 }, 27 { 28 .compatible = "ti,am335x-edma-crossbar", 29 .data = &ti_xbar_type[TI_XBAR_AM335X], 30 }, 31 {}, 32 }; 33 34 /* Crossbar on AM335x/AM437x family */ 35 #define TI_AM335X_XBAR_LINES 64 36 37 struct ti_am335x_xbar_data { 38 void __iomem *iomem; 39 40 struct dma_router dmarouter; 41 42 u32 xbar_events; /* maximum number of events to select in xbar */ 43 u32 dma_requests; /* number of DMA requests on eDMA */ 44 }; 45 46 struct ti_am335x_xbar_map { 47 u16 dma_line; 48 u8 mux_val; 49 }; 50 51 static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) 52 { 53 /* 54 * TPCC_EVT_MUX_60_63 register layout is different than the 55 * rest, in the sense, that event 63 is mapped to lowest byte 56 * and event 60 is mapped to highest, handle it separately. 57 */ 58 if (event >= 60 && event <= 63) 59 writeb_relaxed(val, iomem + (63 - event % 4)); 60 else 61 writeb_relaxed(val, iomem + event); 62 } 63 64 static void ti_am335x_xbar_free(struct device *dev, void *route_data) 65 { 66 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); 67 struct ti_am335x_xbar_map *map = route_data; 68 69 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", 70 map->mux_val, map->dma_line); 71 72 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); 73 kfree(map); 74 } 75 76 static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, 77 struct of_dma *ofdma) 78 { 79 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 80 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); 81 struct ti_am335x_xbar_map *map; 82 83 if (dma_spec->args_count != 3) 84 return ERR_PTR(-EINVAL); 85 86 if (dma_spec->args[2] >= xbar->xbar_events) { 87 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", 88 dma_spec->args[2]); 89 return ERR_PTR(-EINVAL); 90 } 91 92 if (dma_spec->args[0] >= xbar->dma_requests) { 93 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", 94 dma_spec->args[0]); 95 return ERR_PTR(-EINVAL); 96 } 97 98 /* The of_node_put() will be done in the core for the node */ 99 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 100 if (!dma_spec->np) { 101 dev_err(&pdev->dev, "Can't get DMA master\n"); 102 return ERR_PTR(-EINVAL); 103 } 104 105 map = kzalloc(sizeof(*map), GFP_KERNEL); 106 if (!map) { 107 of_node_put(dma_spec->np); 108 return ERR_PTR(-ENOMEM); 109 } 110 111 map->dma_line = (u16)dma_spec->args[0]; 112 map->mux_val = (u8)dma_spec->args[2]; 113 114 dma_spec->args[2] = 0; 115 dma_spec->args_count = 2; 116 117 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", 118 map->mux_val, map->dma_line); 119 120 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); 121 122 return map; 123 } 124 125 static const struct of_device_id ti_am335x_master_match[] = { 126 { .compatible = "ti,edma3-tpcc", }, 127 {}, 128 }; 129 130 static int ti_am335x_xbar_probe(struct platform_device *pdev) 131 { 132 struct device_node *node = pdev->dev.of_node; 133 const struct of_device_id *match; 134 struct device_node *dma_node; 135 struct ti_am335x_xbar_data *xbar; 136 void __iomem *iomem; 137 int i, ret; 138 139 if (!node) 140 return -ENODEV; 141 142 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); 143 if (!xbar) 144 return -ENOMEM; 145 146 dma_node = of_parse_phandle(node, "dma-masters", 0); 147 if (!dma_node) { 148 dev_err(&pdev->dev, "Can't get DMA master node\n"); 149 return -ENODEV; 150 } 151 152 match = of_match_node(ti_am335x_master_match, dma_node); 153 if (!match) { 154 dev_err(&pdev->dev, "DMA master is not supported\n"); 155 of_node_put(dma_node); 156 return -EINVAL; 157 } 158 159 if (of_property_read_u32(dma_node, "dma-requests", 160 &xbar->dma_requests)) { 161 dev_info(&pdev->dev, 162 "Missing XBAR output information, using %u.\n", 163 TI_AM335X_XBAR_LINES); 164 xbar->dma_requests = TI_AM335X_XBAR_LINES; 165 } 166 of_node_put(dma_node); 167 168 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { 169 dev_info(&pdev->dev, 170 "Missing XBAR input information, using %u.\n", 171 TI_AM335X_XBAR_LINES); 172 xbar->xbar_events = TI_AM335X_XBAR_LINES; 173 } 174 175 iomem = devm_platform_ioremap_resource(pdev, 0); 176 if (IS_ERR(iomem)) 177 return PTR_ERR(iomem); 178 179 xbar->iomem = iomem; 180 181 xbar->dmarouter.dev = &pdev->dev; 182 xbar->dmarouter.route_free = ti_am335x_xbar_free; 183 184 platform_set_drvdata(pdev, xbar); 185 186 /* Reset the crossbar */ 187 for (i = 0; i < xbar->dma_requests; i++) 188 ti_am335x_xbar_write(xbar->iomem, i, 0); 189 190 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, 191 &xbar->dmarouter); 192 193 return ret; 194 } 195 196 /* Crossbar on DRA7xx family */ 197 #define TI_DRA7_XBAR_OUTPUTS 127 198 #define TI_DRA7_XBAR_INPUTS 256 199 200 struct ti_dra7_xbar_data { 201 void __iomem *iomem; 202 203 struct dma_router dmarouter; 204 struct mutex mutex; 205 unsigned long *dma_inuse; 206 207 u16 safe_val; /* Value to rest the crossbar lines */ 208 u32 xbar_requests; /* number of DMA requests connected to XBAR */ 209 u32 dma_requests; /* number of DMA requests forwarded to DMA */ 210 u32 dma_offset; 211 }; 212 213 struct ti_dra7_xbar_map { 214 u16 xbar_in; 215 int xbar_out; 216 }; 217 218 static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) 219 { 220 writew_relaxed(val, iomem + (xbar * 2)); 221 } 222 223 static void ti_dra7_xbar_free(struct device *dev, void *route_data) 224 { 225 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); 226 struct ti_dra7_xbar_map *map = route_data; 227 228 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", 229 map->xbar_in, map->xbar_out); 230 231 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); 232 mutex_lock(&xbar->mutex); 233 clear_bit(map->xbar_out, xbar->dma_inuse); 234 mutex_unlock(&xbar->mutex); 235 kfree(map); 236 } 237 238 static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, 239 struct of_dma *ofdma) 240 { 241 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 242 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); 243 struct ti_dra7_xbar_map *map; 244 245 if (dma_spec->args[0] >= xbar->xbar_requests) { 246 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", 247 dma_spec->args[0]); 248 return ERR_PTR(-EINVAL); 249 } 250 251 /* The of_node_put() will be done in the core for the node */ 252 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 253 if (!dma_spec->np) { 254 dev_err(&pdev->dev, "Can't get DMA master\n"); 255 return ERR_PTR(-EINVAL); 256 } 257 258 map = kzalloc(sizeof(*map), GFP_KERNEL); 259 if (!map) { 260 of_node_put(dma_spec->np); 261 return ERR_PTR(-ENOMEM); 262 } 263 264 mutex_lock(&xbar->mutex); 265 map->xbar_out = find_first_zero_bit(xbar->dma_inuse, 266 xbar->dma_requests); 267 if (map->xbar_out == xbar->dma_requests) { 268 mutex_unlock(&xbar->mutex); 269 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 270 kfree(map); 271 return ERR_PTR(-ENOMEM); 272 } 273 set_bit(map->xbar_out, xbar->dma_inuse); 274 mutex_unlock(&xbar->mutex); 275 276 map->xbar_in = (u16)dma_spec->args[0]; 277 278 dma_spec->args[0] = map->xbar_out + xbar->dma_offset; 279 280 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", 281 map->xbar_in, map->xbar_out); 282 283 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); 284 285 return map; 286 } 287 288 #define TI_XBAR_EDMA_OFFSET 0 289 #define TI_XBAR_SDMA_OFFSET 1 290 static const u32 ti_dma_offset[] = { 291 [TI_XBAR_EDMA_OFFSET] = 0, 292 [TI_XBAR_SDMA_OFFSET] = 1, 293 }; 294 295 static const struct of_device_id ti_dra7_master_match[] = { 296 { 297 .compatible = "ti,omap4430-sdma", 298 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET], 299 }, 300 { 301 .compatible = "ti,edma3", 302 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], 303 }, 304 { 305 .compatible = "ti,edma3-tpcc", 306 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], 307 }, 308 {}, 309 }; 310 311 static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) 312 { 313 for (; len > 0; len--) 314 set_bit(offset + (len - 1), p); 315 } 316 317 static int ti_dra7_xbar_probe(struct platform_device *pdev) 318 { 319 struct device_node *node = pdev->dev.of_node; 320 const struct of_device_id *match; 321 struct device_node *dma_node; 322 struct ti_dra7_xbar_data *xbar; 323 struct property *prop; 324 u32 safe_val; 325 int sz; 326 void __iomem *iomem; 327 int i, ret; 328 329 if (!node) 330 return -ENODEV; 331 332 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); 333 if (!xbar) 334 return -ENOMEM; 335 336 dma_node = of_parse_phandle(node, "dma-masters", 0); 337 if (!dma_node) { 338 dev_err(&pdev->dev, "Can't get DMA master node\n"); 339 return -ENODEV; 340 } 341 342 match = of_match_node(ti_dra7_master_match, dma_node); 343 if (!match) { 344 dev_err(&pdev->dev, "DMA master is not supported\n"); 345 of_node_put(dma_node); 346 return -EINVAL; 347 } 348 349 if (of_property_read_u32(dma_node, "dma-requests", 350 &xbar->dma_requests)) { 351 dev_info(&pdev->dev, 352 "Missing XBAR output information, using %u.\n", 353 TI_DRA7_XBAR_OUTPUTS); 354 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; 355 } 356 of_node_put(dma_node); 357 358 xbar->dma_inuse = devm_kcalloc(&pdev->dev, 359 BITS_TO_LONGS(xbar->dma_requests), 360 sizeof(unsigned long), GFP_KERNEL); 361 if (!xbar->dma_inuse) 362 return -ENOMEM; 363 364 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { 365 dev_info(&pdev->dev, 366 "Missing XBAR input information, using %u.\n", 367 TI_DRA7_XBAR_INPUTS); 368 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; 369 } 370 371 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) 372 xbar->safe_val = (u16)safe_val; 373 374 375 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz); 376 if (prop) { 377 const char pname[] = "ti,reserved-dma-request-ranges"; 378 u32 (*rsv_events)[2]; 379 size_t nelm = sz / sizeof(*rsv_events); 380 int i; 381 382 if (!nelm) 383 return -EINVAL; 384 385 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL); 386 if (!rsv_events) 387 return -ENOMEM; 388 389 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, 390 nelm * 2); 391 if (ret) { 392 kfree(rsv_events); 393 return ret; 394 } 395 396 for (i = 0; i < nelm; i++) { 397 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], 398 xbar->dma_inuse); 399 } 400 kfree(rsv_events); 401 } 402 403 iomem = devm_platform_ioremap_resource(pdev, 0); 404 if (IS_ERR(iomem)) 405 return PTR_ERR(iomem); 406 407 xbar->iomem = iomem; 408 409 xbar->dmarouter.dev = &pdev->dev; 410 xbar->dmarouter.route_free = ti_dra7_xbar_free; 411 xbar->dma_offset = *(u32 *)match->data; 412 413 mutex_init(&xbar->mutex); 414 platform_set_drvdata(pdev, xbar); 415 416 /* Reset the crossbar */ 417 for (i = 0; i < xbar->dma_requests; i++) { 418 if (!test_bit(i, xbar->dma_inuse)) 419 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); 420 } 421 422 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, 423 &xbar->dmarouter); 424 if (ret) { 425 /* Restore the defaults for the crossbar */ 426 for (i = 0; i < xbar->dma_requests; i++) { 427 if (!test_bit(i, xbar->dma_inuse)) 428 ti_dra7_xbar_write(xbar->iomem, i, i); 429 } 430 } 431 432 return ret; 433 } 434 435 static int ti_dma_xbar_probe(struct platform_device *pdev) 436 { 437 const struct of_device_id *match; 438 int ret; 439 440 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); 441 if (unlikely(!match)) 442 return -EINVAL; 443 444 switch (*(u32 *)match->data) { 445 case TI_XBAR_DRA7: 446 ret = ti_dra7_xbar_probe(pdev); 447 break; 448 case TI_XBAR_AM335X: 449 ret = ti_am335x_xbar_probe(pdev); 450 break; 451 default: 452 dev_err(&pdev->dev, "Unsupported crossbar\n"); 453 ret = -ENODEV; 454 break; 455 } 456 457 return ret; 458 } 459 460 static struct platform_driver ti_dma_xbar_driver = { 461 .driver = { 462 .name = "ti-dma-crossbar", 463 .of_match_table = of_match_ptr(ti_dma_xbar_match), 464 }, 465 .probe = ti_dma_xbar_probe, 466 }; 467 468 static int omap_dmaxbar_init(void) 469 { 470 return platform_driver_register(&ti_dma_xbar_driver); 471 } 472 arch_initcall(omap_dmaxbar_init); 473