1 /* 2 * 3 * Copyright (C) STMicroelectronics SA 2017 4 * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com> 5 * Pierre-Yves Mordret <pierre-yves.mordret@st.com> 6 * 7 * License terms: GPL V2.0. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License version 2 as published by 11 * the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 16 * details. 17 * 18 * DMA Router driver for STM32 DMA MUX 19 * 20 * Based on TI DMA Crossbar driver 21 * 22 */ 23 24 #include <linux/clk.h> 25 #include <linux/delay.h> 26 #include <linux/err.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/of_device.h> 30 #include <linux/of_dma.h> 31 #include <linux/reset.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 35 #define STM32_DMAMUX_CCR(x) (0x4 * (x)) 36 #define STM32_DMAMUX_MAX_DMA_REQUESTS 32 37 #define STM32_DMAMUX_MAX_REQUESTS 255 38 39 struct stm32_dmamux { 40 u32 master; 41 u32 request; 42 u32 chan_id; 43 }; 44 45 struct stm32_dmamux_data { 46 struct dma_router dmarouter; 47 struct clk *clk; 48 struct reset_control *rst; 49 void __iomem *iomem; 50 u32 dma_requests; /* Number of DMA requests connected to DMAMUX */ 51 u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */ 52 spinlock_t lock; /* Protects register access */ 53 unsigned long *dma_inuse; /* Used DMA channel */ 54 u32 dma_reqs[]; /* Number of DMA Request per DMA masters. 55 * [0] holds number of DMA Masters. 56 * To be kept at very end end of this structure 57 */ 58 }; 59 60 static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg) 61 { 62 return readl_relaxed(iomem + reg); 63 } 64 65 static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val) 66 { 67 writel_relaxed(val, iomem + reg); 68 } 69 70 static void stm32_dmamux_free(struct device *dev, void *route_data) 71 { 72 struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev); 73 struct stm32_dmamux *mux = route_data; 74 unsigned long flags; 75 76 /* Clear dma request */ 77 spin_lock_irqsave(&dmamux->lock, flags); 78 79 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); 80 clear_bit(mux->chan_id, dmamux->dma_inuse); 81 82 if (!IS_ERR(dmamux->clk)) 83 clk_disable(dmamux->clk); 84 85 spin_unlock_irqrestore(&dmamux->lock, flags); 86 87 dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n", 88 mux->request, mux->master, mux->chan_id); 89 90 kfree(mux); 91 } 92 93 static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, 94 struct of_dma *ofdma) 95 { 96 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 97 struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); 98 struct stm32_dmamux *mux; 99 u32 i, min, max; 100 int ret; 101 unsigned long flags; 102 103 if (dma_spec->args_count != 3) { 104 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 105 return ERR_PTR(-EINVAL); 106 } 107 108 if (dma_spec->args[0] > dmamux->dmamux_requests) { 109 dev_err(&pdev->dev, "invalid mux request number: %d\n", 110 dma_spec->args[0]); 111 return ERR_PTR(-EINVAL); 112 } 113 114 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 115 if (!mux) 116 return ERR_PTR(-ENOMEM); 117 118 spin_lock_irqsave(&dmamux->lock, flags); 119 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, 120 dmamux->dma_requests); 121 122 if (mux->chan_id == dmamux->dma_requests) { 123 spin_unlock_irqrestore(&dmamux->lock, flags); 124 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 125 ret = -ENOMEM; 126 goto error_chan_id; 127 } 128 set_bit(mux->chan_id, dmamux->dma_inuse); 129 spin_unlock_irqrestore(&dmamux->lock, flags); 130 131 /* Look for DMA Master */ 132 for (i = 1, min = 0, max = dmamux->dma_reqs[i]; 133 i <= dmamux->dma_reqs[0]; 134 min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i]) 135 if (mux->chan_id < max) 136 break; 137 mux->master = i - 1; 138 139 /* The of_node_put() will be done in of_dma_router_xlate function */ 140 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); 141 if (!dma_spec->np) { 142 dev_err(&pdev->dev, "can't get dma master\n"); 143 ret = -EINVAL; 144 goto error; 145 } 146 147 /* Set dma request */ 148 spin_lock_irqsave(&dmamux->lock, flags); 149 if (!IS_ERR(dmamux->clk)) { 150 ret = clk_enable(dmamux->clk); 151 if (ret < 0) { 152 spin_unlock_irqrestore(&dmamux->lock, flags); 153 dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret); 154 goto error; 155 } 156 } 157 spin_unlock_irqrestore(&dmamux->lock, flags); 158 159 mux->request = dma_spec->args[0]; 160 161 /* craft DMA spec */ 162 dma_spec->args[3] = dma_spec->args[2]; 163 dma_spec->args[2] = dma_spec->args[1]; 164 dma_spec->args[1] = 0; 165 dma_spec->args[0] = mux->chan_id - min; 166 dma_spec->args_count = 4; 167 168 stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 169 mux->request); 170 dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", 171 mux->request, mux->master, mux->chan_id); 172 173 return mux; 174 175 error: 176 clear_bit(mux->chan_id, dmamux->dma_inuse); 177 178 error_chan_id: 179 kfree(mux); 180 return ERR_PTR(ret); 181 } 182 183 static const struct of_device_id stm32_stm32dma_master_match[] = { 184 { .compatible = "st,stm32-dma", }, 185 {}, 186 }; 187 188 static int stm32_dmamux_probe(struct platform_device *pdev) 189 { 190 struct device_node *node = pdev->dev.of_node; 191 const struct of_device_id *match; 192 struct device_node *dma_node; 193 struct stm32_dmamux_data *stm32_dmamux; 194 struct resource *res; 195 void __iomem *iomem; 196 int i, count, ret; 197 u32 dma_req; 198 199 if (!node) 200 return -ENODEV; 201 202 count = device_property_read_u32_array(&pdev->dev, "dma-masters", 203 NULL, 0); 204 if (count < 0) { 205 dev_err(&pdev->dev, "Can't get DMA master(s) node\n"); 206 return -ENODEV; 207 } 208 209 stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) + 210 sizeof(u32) * (count + 1), GFP_KERNEL); 211 if (!stm32_dmamux) 212 return -ENOMEM; 213 214 dma_req = 0; 215 for (i = 1; i <= count; i++) { 216 dma_node = of_parse_phandle(node, "dma-masters", i - 1); 217 218 match = of_match_node(stm32_stm32dma_master_match, dma_node); 219 if (!match) { 220 dev_err(&pdev->dev, "DMA master is not supported\n"); 221 of_node_put(dma_node); 222 return -EINVAL; 223 } 224 225 if (of_property_read_u32(dma_node, "dma-requests", 226 &stm32_dmamux->dma_reqs[i])) { 227 dev_info(&pdev->dev, 228 "Missing MUX output information, using %u.\n", 229 STM32_DMAMUX_MAX_DMA_REQUESTS); 230 stm32_dmamux->dma_reqs[i] = 231 STM32_DMAMUX_MAX_DMA_REQUESTS; 232 } 233 dma_req += stm32_dmamux->dma_reqs[i]; 234 of_node_put(dma_node); 235 } 236 237 if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) { 238 dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n"); 239 return -ENODEV; 240 } 241 242 stm32_dmamux->dma_requests = dma_req; 243 stm32_dmamux->dma_reqs[0] = count; 244 stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev, 245 BITS_TO_LONGS(dma_req), 246 sizeof(unsigned long), 247 GFP_KERNEL); 248 if (!stm32_dmamux->dma_inuse) 249 return -ENOMEM; 250 251 if (device_property_read_u32(&pdev->dev, "dma-requests", 252 &stm32_dmamux->dmamux_requests)) { 253 stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS; 254 dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", 255 stm32_dmamux->dmamux_requests); 256 } 257 258 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 259 iomem = devm_ioremap_resource(&pdev->dev, res); 260 if (IS_ERR(iomem)) 261 return PTR_ERR(iomem); 262 263 spin_lock_init(&stm32_dmamux->lock); 264 265 stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL); 266 if (IS_ERR(stm32_dmamux->clk)) { 267 ret = PTR_ERR(stm32_dmamux->clk); 268 if (ret == -EPROBE_DEFER) 269 dev_info(&pdev->dev, "Missing controller clock\n"); 270 return ret; 271 } 272 273 stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL); 274 if (!IS_ERR(stm32_dmamux->rst)) { 275 reset_control_assert(stm32_dmamux->rst); 276 udelay(2); 277 reset_control_deassert(stm32_dmamux->rst); 278 } 279 280 stm32_dmamux->iomem = iomem; 281 stm32_dmamux->dmarouter.dev = &pdev->dev; 282 stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; 283 284 platform_set_drvdata(pdev, stm32_dmamux); 285 286 if (!IS_ERR(stm32_dmamux->clk)) { 287 ret = clk_prepare_enable(stm32_dmamux->clk); 288 if (ret < 0) { 289 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); 290 return ret; 291 } 292 } 293 294 /* Reset the dmamux */ 295 for (i = 0; i < stm32_dmamux->dma_requests; i++) 296 stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); 297 298 if (!IS_ERR(stm32_dmamux->clk)) 299 clk_disable(stm32_dmamux->clk); 300 301 return of_dma_router_register(node, stm32_dmamux_route_allocate, 302 &stm32_dmamux->dmarouter); 303 } 304 305 static const struct of_device_id stm32_dmamux_match[] = { 306 { .compatible = "st,stm32h7-dmamux" }, 307 {}, 308 }; 309 310 static struct platform_driver stm32_dmamux_driver = { 311 .probe = stm32_dmamux_probe, 312 .driver = { 313 .name = "stm32-dmamux", 314 .of_match_table = stm32_dmamux_match, 315 }, 316 }; 317 318 static int __init stm32_dmamux_init(void) 319 { 320 return platform_driver_register(&stm32_dmamux_driver); 321 } 322 arch_initcall(stm32_dmamux_init); 323 324 MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX"); 325 MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); 326 MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>"); 327 MODULE_LICENSE("GPL v2"); 328