1 /* 2 * Platform driver for the Synopsys DesignWare DMA Controller 3 * 4 * Copyright (C) 2007-2008 Atmel Corporation 5 * Copyright (C) 2010-2011 ST Microelectronics 6 * Copyright (C) 2013 Intel Corporation 7 * 8 * Some parts of this driver are derived from the original dw_dmac. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/device.h> 17 #include <linux/clk.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/platform_device.h> 20 #include <linux/dmaengine.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/of.h> 23 #include <linux/of_dma.h> 24 #include <linux/acpi.h> 25 #include <linux/acpi_dma.h> 26 27 #include "internal.h" 28 29 #define DRV_NAME "dw_dmac" 30 31 static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 32 struct of_dma *ofdma) 33 { 34 struct dw_dma *dw = ofdma->of_dma_data; 35 struct dw_dma_slave slave = { 36 .dma_dev = dw->dma.dev, 37 }; 38 dma_cap_mask_t cap; 39 40 if (dma_spec->args_count != 3) 41 return NULL; 42 43 slave.src_id = dma_spec->args[0]; 44 slave.dst_id = dma_spec->args[0]; 45 slave.m_master = dma_spec->args[1]; 46 slave.p_master = dma_spec->args[2]; 47 48 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || 49 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || 50 slave.m_master >= dw->pdata->nr_masters || 51 slave.p_master >= dw->pdata->nr_masters)) 52 return NULL; 53 54 dma_cap_zero(cap); 55 dma_cap_set(DMA_SLAVE, cap); 56 57 /* TODO: there should be a simpler way to do this */ 58 return dma_request_channel(cap, dw_dma_filter, &slave); 59 } 60 61 #ifdef CONFIG_ACPI 62 static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) 63 { 64 struct acpi_dma_spec *dma_spec = param; 65 struct dw_dma_slave slave = { 66 .dma_dev = dma_spec->dev, 67 .src_id = dma_spec->slave_id, 68 .dst_id = dma_spec->slave_id, 69 .m_master = 0, 70 .p_master = 1, 71 }; 72 73 return dw_dma_filter(chan, &slave); 74 } 75 76 static void dw_dma_acpi_controller_register(struct dw_dma *dw) 77 { 78 struct device *dev = dw->dma.dev; 79 struct acpi_dma_filter_info *info; 80 int ret; 81 82 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 83 if (!info) 84 return; 85 86 dma_cap_zero(info->dma_cap); 87 dma_cap_set(DMA_SLAVE, info->dma_cap); 88 info->filter_fn = dw_dma_acpi_filter; 89 90 ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, 91 info); 92 if (ret) 93 dev_err(dev, "could not register acpi_dma_controller\n"); 94 } 95 #else /* !CONFIG_ACPI */ 96 static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} 97 #endif /* !CONFIG_ACPI */ 98 99 #ifdef CONFIG_OF 100 static struct dw_dma_platform_data * 101 dw_dma_parse_dt(struct platform_device *pdev) 102 { 103 struct device_node *np = pdev->dev.of_node; 104 struct dw_dma_platform_data *pdata; 105 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS]; 106 u32 nr_masters; 107 u32 nr_channels; 108 109 if (!np) { 110 dev_err(&pdev->dev, "Missing DT data\n"); 111 return NULL; 112 } 113 114 if (of_property_read_u32(np, "dma-masters", &nr_masters)) 115 return NULL; 116 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS) 117 return NULL; 118 119 if (of_property_read_u32(np, "dma-channels", &nr_channels)) 120 return NULL; 121 if (nr_channels > DW_DMA_MAX_NR_CHANNELS) 122 return NULL; 123 124 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 125 if (!pdata) 126 return NULL; 127 128 pdata->nr_masters = nr_masters; 129 pdata->nr_channels = nr_channels; 130 131 if (of_property_read_bool(np, "is_private")) 132 pdata->is_private = true; 133 134 /* 135 * All known devices, which use DT for configuration, support 136 * memory-to-memory transfers. So enable it by default. 137 */ 138 pdata->is_memcpy = true; 139 140 if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) 141 pdata->chan_allocation_order = (unsigned char)tmp; 142 143 if (!of_property_read_u32(np, "chan_priority", &tmp)) 144 pdata->chan_priority = tmp; 145 146 if (!of_property_read_u32(np, "block_size", &tmp)) 147 pdata->block_size = tmp; 148 149 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) { 150 for (tmp = 0; tmp < nr_masters; tmp++) 151 pdata->data_width[tmp] = arr[tmp]; 152 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) { 153 for (tmp = 0; tmp < nr_masters; tmp++) 154 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); 155 } 156 157 if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) { 158 for (tmp = 0; tmp < nr_channels; tmp++) 159 pdata->multi_block[tmp] = mb[tmp]; 160 } else { 161 for (tmp = 0; tmp < nr_channels; tmp++) 162 pdata->multi_block[tmp] = 1; 163 } 164 165 if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { 166 if (tmp > CHAN_PROTCTL_MASK) 167 return NULL; 168 pdata->protctl = tmp; 169 } 170 171 return pdata; 172 } 173 #else 174 static inline struct dw_dma_platform_data * 175 dw_dma_parse_dt(struct platform_device *pdev) 176 { 177 return NULL; 178 } 179 #endif 180 181 static int dw_probe(struct platform_device *pdev) 182 { 183 struct dw_dma_chip *chip; 184 struct device *dev = &pdev->dev; 185 struct resource *mem; 186 const struct dw_dma_platform_data *pdata; 187 int err; 188 189 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); 190 if (!chip) 191 return -ENOMEM; 192 193 chip->irq = platform_get_irq(pdev, 0); 194 if (chip->irq < 0) 195 return chip->irq; 196 197 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 198 chip->regs = devm_ioremap_resource(dev, mem); 199 if (IS_ERR(chip->regs)) 200 return PTR_ERR(chip->regs); 201 202 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 203 if (err) 204 return err; 205 206 pdata = dev_get_platdata(dev); 207 if (!pdata) 208 pdata = dw_dma_parse_dt(pdev); 209 210 chip->dev = dev; 211 chip->id = pdev->id; 212 chip->pdata = pdata; 213 214 chip->clk = devm_clk_get(chip->dev, "hclk"); 215 if (IS_ERR(chip->clk)) 216 return PTR_ERR(chip->clk); 217 err = clk_prepare_enable(chip->clk); 218 if (err) 219 return err; 220 221 pm_runtime_enable(&pdev->dev); 222 223 err = dw_dma_probe(chip); 224 if (err) 225 goto err_dw_dma_probe; 226 227 platform_set_drvdata(pdev, chip); 228 229 if (pdev->dev.of_node) { 230 err = of_dma_controller_register(pdev->dev.of_node, 231 dw_dma_of_xlate, chip->dw); 232 if (err) 233 dev_err(&pdev->dev, 234 "could not register of_dma_controller\n"); 235 } 236 237 if (ACPI_HANDLE(&pdev->dev)) 238 dw_dma_acpi_controller_register(chip->dw); 239 240 return 0; 241 242 err_dw_dma_probe: 243 pm_runtime_disable(&pdev->dev); 244 clk_disable_unprepare(chip->clk); 245 return err; 246 } 247 248 static int dw_remove(struct platform_device *pdev) 249 { 250 struct dw_dma_chip *chip = platform_get_drvdata(pdev); 251 252 if (pdev->dev.of_node) 253 of_dma_controller_free(pdev->dev.of_node); 254 255 dw_dma_remove(chip); 256 pm_runtime_disable(&pdev->dev); 257 clk_disable_unprepare(chip->clk); 258 259 return 0; 260 } 261 262 static void dw_shutdown(struct platform_device *pdev) 263 { 264 struct dw_dma_chip *chip = platform_get_drvdata(pdev); 265 266 /* 267 * We have to call dw_dma_disable() to stop any ongoing transfer. On 268 * some platforms we can't do that since DMA device is powered off. 269 * Moreover we have no possibility to check if the platform is affected 270 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() 271 * unconditionally. On the other hand we can't use 272 * pm_runtime_suspended() because runtime PM framework is not fully 273 * used by the driver. 274 */ 275 pm_runtime_get_sync(chip->dev); 276 dw_dma_disable(chip); 277 pm_runtime_put_sync_suspend(chip->dev); 278 279 clk_disable_unprepare(chip->clk); 280 } 281 282 #ifdef CONFIG_OF 283 static const struct of_device_id dw_dma_of_id_table[] = { 284 { .compatible = "snps,dma-spear1340" }, 285 {} 286 }; 287 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); 288 #endif 289 290 #ifdef CONFIG_ACPI 291 static const struct acpi_device_id dw_dma_acpi_id_table[] = { 292 { "INTL9C60", 0 }, 293 { "80862286", 0 }, 294 { "808622C0", 0 }, 295 { } 296 }; 297 MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); 298 #endif 299 300 #ifdef CONFIG_PM_SLEEP 301 302 static int dw_suspend_late(struct device *dev) 303 { 304 struct dw_dma_chip *chip = dev_get_drvdata(dev); 305 306 dw_dma_disable(chip); 307 clk_disable_unprepare(chip->clk); 308 309 return 0; 310 } 311 312 static int dw_resume_early(struct device *dev) 313 { 314 struct dw_dma_chip *chip = dev_get_drvdata(dev); 315 int ret; 316 317 ret = clk_prepare_enable(chip->clk); 318 if (ret) 319 return ret; 320 321 return dw_dma_enable(chip); 322 } 323 324 #endif /* CONFIG_PM_SLEEP */ 325 326 static const struct dev_pm_ops dw_dev_pm_ops = { 327 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early) 328 }; 329 330 static struct platform_driver dw_driver = { 331 .probe = dw_probe, 332 .remove = dw_remove, 333 .shutdown = dw_shutdown, 334 .driver = { 335 .name = DRV_NAME, 336 .pm = &dw_dev_pm_ops, 337 .of_match_table = of_match_ptr(dw_dma_of_id_table), 338 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 339 }, 340 }; 341 342 static int __init dw_init(void) 343 { 344 return platform_driver_register(&dw_driver); 345 } 346 subsys_initcall(dw_init); 347 348 static void __exit dw_exit(void) 349 { 350 platform_driver_unregister(&dw_driver); 351 } 352 module_exit(dw_exit); 353 354 MODULE_LICENSE("GPL v2"); 355 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 356 MODULE_ALIAS("platform:" DRV_NAME); 357