1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) STMicroelectronics SA 2017 5 * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com> 6 * Pierre-Yves Mordret <pierre-yves.mordret@st.com> 7 * 8 * DMA Router driver for STM32 DMA MUX 9 * 10 * Based on TI DMA Crossbar driver 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/delay.h> 15 #include <linux/err.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/of_dma.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/reset.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 25 #define STM32_DMAMUX_CCR(x) (0x4 * (x)) 26 #define STM32_DMAMUX_MAX_DMA_REQUESTS 32 27 #define STM32_DMAMUX_MAX_REQUESTS 255 28 29 struct stm32_dmamux { 30 u32 master; 31 u32 request; 32 u32 chan_id; 33 }; 34 35 struct stm32_dmamux_data { 36 struct dma_router dmarouter; 37 struct clk *clk; 38 struct reset_control *rst; 39 void __iomem *iomem; 40 u32 dma_requests; /* Number of DMA requests connected to DMAMUX */ 41 u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */ 42 spinlock_t lock; /* Protects register access */ 43 unsigned long *dma_inuse; /* Used DMA channel */ 44 u32 dma_reqs[]; /* Number of DMA Request per DMA masters. 45 * [0] holds number of DMA Masters. 46 * To be kept at very end end of this structure 47 */ 48 }; 49 50 static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg) 51 { 52 return readl_relaxed(iomem + reg); 53 } 54 55 static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val) 56 { 57 writel_relaxed(val, iomem + reg); 58 } 59 60 static void stm32_dmamux_free(struct device *dev, void *route_data) 61 { 62 struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev); 63 struct stm32_dmamux *mux = route_data; 64 unsigned long flags; 65 66 /* Clear dma request */ 67 spin_lock_irqsave(&dmamux->lock, flags); 68 69 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); 70 clear_bit(mux->chan_id, dmamux->dma_inuse); 71 72 pm_runtime_put_sync(dev); 73 74 spin_unlock_irqrestore(&dmamux->lock, flags); 75 76 dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n", 77 mux->request, mux->master, mux->chan_id); 78 79 kfree(mux); 80 } 81 82 static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, 83 struct of_dma *ofdma) 84 { 85 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 86 struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); 87 struct stm32_dmamux *mux; 88 u32 i, min, max; 89 int ret; 90 unsigned long flags; 91 92 if (dma_spec->args_count != 3) { 93 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 94 return ERR_PTR(-EINVAL); 95 } 96 97 if (dma_spec->args[0] > dmamux->dmamux_requests) { 98 dev_err(&pdev->dev, "invalid mux request number: %d\n", 99 dma_spec->args[0]); 100 return ERR_PTR(-EINVAL); 101 } 102 103 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 104 if (!mux) 105 return ERR_PTR(-ENOMEM); 106 107 spin_lock_irqsave(&dmamux->lock, flags); 108 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, 109 dmamux->dma_requests); 110 111 if (mux->chan_id == dmamux->dma_requests) { 112 spin_unlock_irqrestore(&dmamux->lock, flags); 113 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 114 ret = -ENOMEM; 115 goto error_chan_id; 116 } 117 set_bit(mux->chan_id, dmamux->dma_inuse); 118 spin_unlock_irqrestore(&dmamux->lock, flags); 119 120 /* Look for DMA Master */ 121 for (i = 1, min = 0, max = dmamux->dma_reqs[i]; 122 i <= dmamux->dma_reqs[0]; 123 min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i]) 124 if (mux->chan_id < max) 125 break; 126 mux->master = i - 1; 127 128 /* The of_node_put() will be done in of_dma_router_xlate function */ 129 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); 130 if (!dma_spec->np) { 131 dev_err(&pdev->dev, "can't get dma master\n"); 132 ret = -EINVAL; 133 goto error; 134 } 135 136 /* Set dma request */ 137 spin_lock_irqsave(&dmamux->lock, flags); 138 ret = pm_runtime_get_sync(&pdev->dev); 139 if (ret < 0) { 140 spin_unlock_irqrestore(&dmamux->lock, flags); 141 goto error; 142 } 143 spin_unlock_irqrestore(&dmamux->lock, flags); 144 145 mux->request = dma_spec->args[0]; 146 147 /* craft DMA spec */ 148 dma_spec->args[3] = dma_spec->args[2]; 149 dma_spec->args[2] = dma_spec->args[1]; 150 dma_spec->args[1] = 0; 151 dma_spec->args[0] = mux->chan_id - min; 152 dma_spec->args_count = 4; 153 154 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 155 mux->request); 156 dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", 157 mux->request, mux->master, mux->chan_id); 158 159 return mux; 160 161 error: 162 clear_bit(mux->chan_id, dmamux->dma_inuse); 163 164 error_chan_id: 165 kfree(mux); 166 return ERR_PTR(ret); 167 } 168 169 static const struct of_device_id stm32_stm32dma_master_match[] = { 170 { .compatible = "st,stm32-dma", }, 171 {}, 172 }; 173 174 static int stm32_dmamux_probe(struct platform_device *pdev) 175 { 176 struct device_node *node = pdev->dev.of_node; 177 const struct of_device_id *match; 178 struct device_node *dma_node; 179 struct stm32_dmamux_data *stm32_dmamux; 180 struct resource *res; 181 void __iomem *iomem; 182 int i, count, ret; 183 u32 dma_req; 184 185 if (!node) 186 return -ENODEV; 187 188 count = device_property_read_u32_array(&pdev->dev, "dma-masters", 189 NULL, 0); 190 if (count < 0) { 191 dev_err(&pdev->dev, "Can't get DMA master(s) node\n"); 192 return -ENODEV; 193 } 194 195 stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) + 196 sizeof(u32) * (count + 1), GFP_KERNEL); 197 if (!stm32_dmamux) 198 return -ENOMEM; 199 200 dma_req = 0; 201 for (i = 1; i <= count; i++) { 202 dma_node = of_parse_phandle(node, "dma-masters", i - 1); 203 204 match = of_match_node(stm32_stm32dma_master_match, dma_node); 205 if (!match) { 206 dev_err(&pdev->dev, "DMA master is not supported\n"); 207 of_node_put(dma_node); 208 return -EINVAL; 209 } 210 211 if (of_property_read_u32(dma_node, "dma-requests", 212 &stm32_dmamux->dma_reqs[i])) { 213 dev_info(&pdev->dev, 214 "Missing MUX output information, using %u.\n", 215 STM32_DMAMUX_MAX_DMA_REQUESTS); 216 stm32_dmamux->dma_reqs[i] = 217 STM32_DMAMUX_MAX_DMA_REQUESTS; 218 } 219 dma_req += stm32_dmamux->dma_reqs[i]; 220 of_node_put(dma_node); 221 } 222 223 if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) { 224 dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n"); 225 return -ENODEV; 226 } 227 228 stm32_dmamux->dma_requests = dma_req; 229 stm32_dmamux->dma_reqs[0] = count; 230 stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev, 231 BITS_TO_LONGS(dma_req), 232 sizeof(unsigned long), 233 GFP_KERNEL); 234 if (!stm32_dmamux->dma_inuse) 235 return -ENOMEM; 236 237 if (device_property_read_u32(&pdev->dev, "dma-requests", 238 &stm32_dmamux->dmamux_requests)) { 239 stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS; 240 dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", 241 stm32_dmamux->dmamux_requests); 242 } 243 pm_runtime_get_noresume(&pdev->dev); 244 245 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 246 iomem = devm_ioremap_resource(&pdev->dev, res); 247 if (IS_ERR(iomem)) 248 return PTR_ERR(iomem); 249 250 spin_lock_init(&stm32_dmamux->lock); 251 252 stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL); 253 if (IS_ERR(stm32_dmamux->clk)) { 254 ret = PTR_ERR(stm32_dmamux->clk); 255 if (ret == -EPROBE_DEFER) 256 dev_info(&pdev->dev, "Missing controller clock\n"); 257 return ret; 258 } 259 260 stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL); 261 if (!IS_ERR(stm32_dmamux->rst)) { 262 reset_control_assert(stm32_dmamux->rst); 263 udelay(2); 264 reset_control_deassert(stm32_dmamux->rst); 265 } 266 267 stm32_dmamux->iomem = iomem; 268 stm32_dmamux->dmarouter.dev = &pdev->dev; 269 stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; 270 271 platform_set_drvdata(pdev, stm32_dmamux); 272 pm_runtime_set_active(&pdev->dev); 273 pm_runtime_enable(&pdev->dev); 274 275 if (!IS_ERR(stm32_dmamux->clk)) { 276 ret = clk_prepare_enable(stm32_dmamux->clk); 277 if (ret < 0) { 278 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); 279 return ret; 280 } 281 } 282 283 pm_runtime_get_noresume(&pdev->dev); 284 285 /* Reset the dmamux */ 286 for (i = 0; i < stm32_dmamux->dma_requests; i++) 287 stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); 288 289 pm_runtime_put(&pdev->dev); 290 291 return of_dma_router_register(node, stm32_dmamux_route_allocate, 292 &stm32_dmamux->dmarouter); 293 } 294 295 #ifdef CONFIG_PM 296 static int stm32_dmamux_runtime_suspend(struct device *dev) 297 { 298 struct platform_device *pdev = to_platform_device(dev); 299 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); 300 301 clk_disable_unprepare(stm32_dmamux->clk); 302 303 return 0; 304 } 305 306 static int stm32_dmamux_runtime_resume(struct device *dev) 307 { 308 struct platform_device *pdev = to_platform_device(dev); 309 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); 310 int ret; 311 312 ret = clk_prepare_enable(stm32_dmamux->clk); 313 if (ret) { 314 dev_err(&pdev->dev, "failed to prepare_enable clock\n"); 315 return ret; 316 } 317 318 return 0; 319 } 320 #endif 321 322 static const struct dev_pm_ops stm32_dmamux_pm_ops = { 323 SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend, 324 stm32_dmamux_runtime_resume, NULL) 325 }; 326 327 static const struct of_device_id stm32_dmamux_match[] = { 328 { .compatible = "st,stm32h7-dmamux" }, 329 {}, 330 }; 331 332 static struct platform_driver stm32_dmamux_driver = { 333 .probe = stm32_dmamux_probe, 334 .driver = { 335 .name = "stm32-dmamux", 336 .of_match_table = stm32_dmamux_match, 337 .pm = &stm32_dmamux_pm_ops, 338 }, 339 }; 340 341 static int __init stm32_dmamux_init(void) 342 { 343 return platform_driver_register(&stm32_dmamux_driver); 344 } 345 arch_initcall(stm32_dmamux_init); 346 347 MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX"); 348 MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); 349 MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>"); 350 MODULE_LICENSE("GPL v2"); 351