1 /* 2 * Qualcomm Technologies HIDMA DMA engine Management interface 3 * 4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/dmaengine.h> 17 #include <linux/acpi.h> 18 #include <linux/of.h> 19 #include <linux/property.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_platform.h> 22 #include <linux/module.h> 23 #include <linux/uaccess.h> 24 #include <linux/slab.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/bitops.h> 27 #include <linux/dma-mapping.h> 28 29 #include "hidma_mgmt.h" 30 31 #define HIDMA_QOS_N_OFFSET 0x700 32 #define HIDMA_CFG_OFFSET 0x400 33 #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C 34 #define HIDMA_MAX_XACTIONS_OFFSET 0x420 35 #define HIDMA_HW_VERSION_OFFSET 0x424 36 #define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418 37 38 #define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0) 39 #define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0) 40 #define HIDMA_WEIGHT_MASK GENMASK(6, 0) 41 #define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0) 42 #define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0) 43 44 #define HIDMA_MAX_WR_XACTIONS_BIT_POS 16 45 #define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16 46 #define HIDMA_WRR_BIT_POS 8 47 #define HIDMA_PRIORITY_BIT_POS 15 48 49 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 50 #define HIDMA_MAX_CHANNEL_WEIGHT 15 51 52 static unsigned int max_write_request; 53 module_param(max_write_request, uint, 0644); 54 MODULE_PARM_DESC(max_write_request, 55 "maximum write burst (default: ACPI/DT value)"); 56 57 static unsigned int max_read_request; 58 module_param(max_read_request, uint, 0644); 59 MODULE_PARM_DESC(max_read_request, 60 "maximum read burst (default: ACPI/DT value)"); 61 62 static unsigned int max_wr_xactions; 63 module_param(max_wr_xactions, uint, 0644); 64 MODULE_PARM_DESC(max_wr_xactions, 65 "maximum number of write transactions (default: ACPI/DT value)"); 66 67 static unsigned int max_rd_xactions; 68 module_param(max_rd_xactions, uint, 0644); 69 MODULE_PARM_DESC(max_rd_xactions, 70 "maximum number of read transactions (default: ACPI/DT value)"); 71 72 int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev) 73 { 74 unsigned int i; 75 u32 val; 76 77 if (!is_power_of_2(mgmtdev->max_write_request) || 78 (mgmtdev->max_write_request < 128) || 79 (mgmtdev->max_write_request > 1024)) { 80 dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n", 81 mgmtdev->max_write_request); 82 return -EINVAL; 83 } 84 85 if (!is_power_of_2(mgmtdev->max_read_request) || 86 (mgmtdev->max_read_request < 128) || 87 (mgmtdev->max_read_request > 1024)) { 88 dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n", 89 mgmtdev->max_read_request); 90 return -EINVAL; 91 } 92 93 if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) { 94 dev_err(&mgmtdev->pdev->dev, 95 "max_wr_xactions cannot be bigger than %ld\n", 96 HIDMA_MAX_WR_XACTIONS_MASK); 97 return -EINVAL; 98 } 99 100 if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) { 101 dev_err(&mgmtdev->pdev->dev, 102 "max_rd_xactions cannot be bigger than %ld\n", 103 HIDMA_MAX_RD_XACTIONS_MASK); 104 return -EINVAL; 105 } 106 107 for (i = 0; i < mgmtdev->dma_channels; i++) { 108 if (mgmtdev->priority[i] > 1) { 109 dev_err(&mgmtdev->pdev->dev, 110 "priority can be 0 or 1\n"); 111 return -EINVAL; 112 } 113 114 if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) { 115 dev_err(&mgmtdev->pdev->dev, 116 "max value of weight can be %d.\n", 117 HIDMA_MAX_CHANNEL_WEIGHT); 118 return -EINVAL; 119 } 120 121 /* weight needs to be at least one */ 122 if (mgmtdev->weight[i] == 0) 123 mgmtdev->weight[i] = 1; 124 } 125 126 pm_runtime_get_sync(&mgmtdev->pdev->dev); 127 val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); 128 val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS); 129 val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS; 130 val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK; 131 val |= mgmtdev->max_read_request; 132 writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); 133 134 val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); 135 val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS); 136 val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS; 137 val &= ~HIDMA_MAX_RD_XACTIONS_MASK; 138 val |= mgmtdev->max_rd_xactions; 139 writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); 140 141 mgmtdev->hw_version = 142 readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET); 143 mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF; 144 mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF; 145 146 for (i = 0; i < mgmtdev->dma_channels; i++) { 147 u32 weight = mgmtdev->weight[i]; 148 u32 priority = mgmtdev->priority[i]; 149 150 val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); 151 val &= ~(1 << HIDMA_PRIORITY_BIT_POS); 152 val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS; 153 val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS); 154 val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS; 155 writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); 156 } 157 158 val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); 159 val &= ~HIDMA_CHRESET_TIMEOUT_MASK; 160 val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK; 161 writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); 162 163 pm_runtime_mark_last_busy(&mgmtdev->pdev->dev); 164 pm_runtime_put_autosuspend(&mgmtdev->pdev->dev); 165 return 0; 166 } 167 EXPORT_SYMBOL_GPL(hidma_mgmt_setup); 168 169 static int hidma_mgmt_probe(struct platform_device *pdev) 170 { 171 struct hidma_mgmt_dev *mgmtdev; 172 struct resource *res; 173 void __iomem *virtaddr; 174 int irq; 175 int rc; 176 u32 val; 177 178 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); 179 pm_runtime_use_autosuspend(&pdev->dev); 180 pm_runtime_set_active(&pdev->dev); 181 pm_runtime_enable(&pdev->dev); 182 pm_runtime_get_sync(&pdev->dev); 183 184 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 185 virtaddr = devm_ioremap_resource(&pdev->dev, res); 186 if (IS_ERR(virtaddr)) { 187 rc = -ENOMEM; 188 goto out; 189 } 190 191 irq = platform_get_irq(pdev, 0); 192 if (irq < 0) { 193 dev_err(&pdev->dev, "irq resources not found\n"); 194 rc = irq; 195 goto out; 196 } 197 198 mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL); 199 if (!mgmtdev) { 200 rc = -ENOMEM; 201 goto out; 202 } 203 204 mgmtdev->pdev = pdev; 205 mgmtdev->addrsize = resource_size(res); 206 mgmtdev->virtaddr = virtaddr; 207 208 rc = device_property_read_u32(&pdev->dev, "dma-channels", 209 &mgmtdev->dma_channels); 210 if (rc) { 211 dev_err(&pdev->dev, "number of channels missing\n"); 212 goto out; 213 } 214 215 rc = device_property_read_u32(&pdev->dev, 216 "channel-reset-timeout-cycles", 217 &mgmtdev->chreset_timeout_cycles); 218 if (rc) { 219 dev_err(&pdev->dev, "channel reset timeout missing\n"); 220 goto out; 221 } 222 223 rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes", 224 &mgmtdev->max_write_request); 225 if (rc) { 226 dev_err(&pdev->dev, "max-write-burst-bytes missing\n"); 227 goto out; 228 } 229 230 if (max_write_request && 231 (max_write_request != mgmtdev->max_write_request)) { 232 dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n", 233 max_write_request); 234 mgmtdev->max_write_request = max_write_request; 235 } else 236 max_write_request = mgmtdev->max_write_request; 237 238 rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes", 239 &mgmtdev->max_read_request); 240 if (rc) { 241 dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); 242 goto out; 243 } 244 if (max_read_request && 245 (max_read_request != mgmtdev->max_read_request)) { 246 dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n", 247 max_read_request); 248 mgmtdev->max_read_request = max_read_request; 249 } else 250 max_read_request = mgmtdev->max_read_request; 251 252 rc = device_property_read_u32(&pdev->dev, "max-write-transactions", 253 &mgmtdev->max_wr_xactions); 254 if (rc) { 255 dev_err(&pdev->dev, "max-write-transactions missing\n"); 256 goto out; 257 } 258 if (max_wr_xactions && 259 (max_wr_xactions != mgmtdev->max_wr_xactions)) { 260 dev_info(&pdev->dev, "overriding max-write-transactions: %d\n", 261 max_wr_xactions); 262 mgmtdev->max_wr_xactions = max_wr_xactions; 263 } else 264 max_wr_xactions = mgmtdev->max_wr_xactions; 265 266 rc = device_property_read_u32(&pdev->dev, "max-read-transactions", 267 &mgmtdev->max_rd_xactions); 268 if (rc) { 269 dev_err(&pdev->dev, "max-read-transactions missing\n"); 270 goto out; 271 } 272 if (max_rd_xactions && 273 (max_rd_xactions != mgmtdev->max_rd_xactions)) { 274 dev_info(&pdev->dev, "overriding max-read-transactions: %d\n", 275 max_rd_xactions); 276 mgmtdev->max_rd_xactions = max_rd_xactions; 277 } else 278 max_rd_xactions = mgmtdev->max_rd_xactions; 279 280 mgmtdev->priority = devm_kcalloc(&pdev->dev, 281 mgmtdev->dma_channels, 282 sizeof(*mgmtdev->priority), 283 GFP_KERNEL); 284 if (!mgmtdev->priority) { 285 rc = -ENOMEM; 286 goto out; 287 } 288 289 mgmtdev->weight = devm_kcalloc(&pdev->dev, 290 mgmtdev->dma_channels, 291 sizeof(*mgmtdev->weight), GFP_KERNEL); 292 if (!mgmtdev->weight) { 293 rc = -ENOMEM; 294 goto out; 295 } 296 297 rc = hidma_mgmt_setup(mgmtdev); 298 if (rc) { 299 dev_err(&pdev->dev, "setup failed\n"); 300 goto out; 301 } 302 303 /* start the HW */ 304 val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET); 305 val |= 1; 306 writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET); 307 308 rc = hidma_mgmt_init_sys(mgmtdev); 309 if (rc) { 310 dev_err(&pdev->dev, "sysfs setup failed\n"); 311 goto out; 312 } 313 314 dev_info(&pdev->dev, 315 "HW rev: %d.%d @ %pa with %d physical channels\n", 316 mgmtdev->hw_version_major, mgmtdev->hw_version_minor, 317 &res->start, mgmtdev->dma_channels); 318 319 platform_set_drvdata(pdev, mgmtdev); 320 pm_runtime_mark_last_busy(&pdev->dev); 321 pm_runtime_put_autosuspend(&pdev->dev); 322 return 0; 323 out: 324 pm_runtime_put_sync_suspend(&pdev->dev); 325 pm_runtime_disable(&pdev->dev); 326 return rc; 327 } 328 329 #if IS_ENABLED(CONFIG_ACPI) 330 static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { 331 {"QCOM8060"}, 332 {}, 333 }; 334 MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); 335 #endif 336 337 static const struct of_device_id hidma_mgmt_match[] = { 338 {.compatible = "qcom,hidma-mgmt-1.0",}, 339 {}, 340 }; 341 MODULE_DEVICE_TABLE(of, hidma_mgmt_match); 342 343 static struct platform_driver hidma_mgmt_driver = { 344 .probe = hidma_mgmt_probe, 345 .driver = { 346 .name = "hidma-mgmt", 347 .of_match_table = hidma_mgmt_match, 348 .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), 349 }, 350 }; 351 352 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) 353 static int object_counter; 354 355 static int __init hidma_mgmt_of_populate_channels(struct device_node *np) 356 { 357 struct platform_device *pdev_parent = of_find_device_by_node(np); 358 struct platform_device_info pdevinfo; 359 struct of_phandle_args out_irq; 360 struct device_node *child; 361 struct resource *res = NULL; 362 const __be32 *cell; 363 int ret = 0, size, i, num; 364 u64 addr, addr_size; 365 366 for_each_available_child_of_node(np, child) { 367 struct resource *res_iter; 368 struct platform_device *new_pdev; 369 370 cell = of_get_property(child, "reg", &size); 371 if (!cell) { 372 ret = -EINVAL; 373 goto out; 374 } 375 376 size /= sizeof(*cell); 377 num = size / 378 (of_n_addr_cells(child) + of_n_size_cells(child)) + 1; 379 380 /* allocate a resource array */ 381 res = kcalloc(num, sizeof(*res), GFP_KERNEL); 382 if (!res) { 383 ret = -ENOMEM; 384 goto out; 385 } 386 387 /* read each reg value */ 388 i = 0; 389 res_iter = res; 390 while (i < size) { 391 addr = of_read_number(&cell[i], 392 of_n_addr_cells(child)); 393 i += of_n_addr_cells(child); 394 395 addr_size = of_read_number(&cell[i], 396 of_n_size_cells(child)); 397 i += of_n_size_cells(child); 398 399 res_iter->start = addr; 400 res_iter->end = res_iter->start + addr_size - 1; 401 res_iter->flags = IORESOURCE_MEM; 402 res_iter++; 403 } 404 405 ret = of_irq_parse_one(child, 0, &out_irq); 406 if (ret) 407 goto out; 408 409 res_iter->start = irq_create_of_mapping(&out_irq); 410 res_iter->name = "hidma event irq"; 411 res_iter->flags = IORESOURCE_IRQ; 412 413 memset(&pdevinfo, 0, sizeof(pdevinfo)); 414 pdevinfo.fwnode = &child->fwnode; 415 pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; 416 pdevinfo.name = child->name; 417 pdevinfo.id = object_counter++; 418 pdevinfo.res = res; 419 pdevinfo.num_res = num; 420 pdevinfo.data = NULL; 421 pdevinfo.size_data = 0; 422 pdevinfo.dma_mask = DMA_BIT_MASK(64); 423 new_pdev = platform_device_register_full(&pdevinfo); 424 if (IS_ERR(new_pdev)) { 425 ret = PTR_ERR(new_pdev); 426 goto out; 427 } 428 of_node_get(child); 429 new_pdev->dev.of_node = child; 430 of_dma_configure(&new_pdev->dev, child); 431 /* 432 * It is assumed that calling of_msi_configure is safe on 433 * platforms with or without MSI support. 434 */ 435 of_msi_configure(&new_pdev->dev, child); 436 of_node_put(child); 437 kfree(res); 438 res = NULL; 439 } 440 out: 441 kfree(res); 442 443 return ret; 444 } 445 #endif 446 447 static int __init hidma_mgmt_init(void) 448 { 449 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) 450 struct device_node *child; 451 452 for_each_matching_node(child, hidma_mgmt_match) { 453 /* device tree based firmware here */ 454 hidma_mgmt_of_populate_channels(child); 455 } 456 #endif 457 platform_driver_register(&hidma_mgmt_driver); 458 459 return 0; 460 } 461 module_init(hidma_mgmt_init); 462 MODULE_LICENSE("GPL v2"); 463