xref: /openbmc/linux/drivers/dma/qcom/hidma_mgmt.c (revision a977d045)
1 /*
2  * Qualcomm Technologies HIDMA DMA engine Management interface
3  *
4  * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 and
8  * only version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/dmaengine.h>
17 #include <linux/acpi.h>
18 #include <linux/of.h>
19 #include <linux/property.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_platform.h>
22 #include <linux/module.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/bitops.h>
27 #include <linux/dma-mapping.h>
28 
29 #include "hidma_mgmt.h"
30 
31 #define HIDMA_QOS_N_OFFSET		0x300
32 #define HIDMA_CFG_OFFSET		0x400
33 #define HIDMA_MAX_BUS_REQ_LEN_OFFSET	0x41C
34 #define HIDMA_MAX_XACTIONS_OFFSET	0x420
35 #define HIDMA_HW_VERSION_OFFSET	0x424
36 #define HIDMA_CHRESET_TIMEOUT_OFFSET	0x418
37 
38 #define HIDMA_MAX_WR_XACTIONS_MASK	GENMASK(4, 0)
39 #define HIDMA_MAX_RD_XACTIONS_MASK	GENMASK(4, 0)
40 #define HIDMA_WEIGHT_MASK		GENMASK(6, 0)
41 #define HIDMA_MAX_BUS_REQ_LEN_MASK	GENMASK(15, 0)
42 #define HIDMA_CHRESET_TIMEOUT_MASK	GENMASK(19, 0)
43 
44 #define HIDMA_MAX_WR_XACTIONS_BIT_POS	16
45 #define HIDMA_MAX_BUS_WR_REQ_BIT_POS	16
46 #define HIDMA_WRR_BIT_POS		8
47 #define HIDMA_PRIORITY_BIT_POS		15
48 
49 #define HIDMA_AUTOSUSPEND_TIMEOUT	2000
50 #define HIDMA_MAX_CHANNEL_WEIGHT	15
51 
52 static unsigned int max_write_request;
53 module_param(max_write_request, uint, 0644);
54 MODULE_PARM_DESC(max_write_request,
55 		"maximum write burst (default: ACPI/DT value)");
56 
57 static unsigned int max_read_request;
58 module_param(max_read_request, uint, 0644);
59 MODULE_PARM_DESC(max_read_request,
60 		"maximum read burst (default: ACPI/DT value)");
61 
62 static unsigned int max_wr_xactions;
63 module_param(max_wr_xactions, uint, 0644);
64 MODULE_PARM_DESC(max_wr_xactions,
65 	"maximum number of write transactions (default: ACPI/DT value)");
66 
67 static unsigned int max_rd_xactions;
68 module_param(max_rd_xactions, uint, 0644);
69 MODULE_PARM_DESC(max_rd_xactions,
70 	"maximum number of read transactions (default: ACPI/DT value)");
71 
72 int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
73 {
74 	unsigned int i;
75 	u32 val;
76 
77 	if (!is_power_of_2(mgmtdev->max_write_request) ||
78 	    (mgmtdev->max_write_request < 128) ||
79 	    (mgmtdev->max_write_request > 1024)) {
80 		dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
81 			mgmtdev->max_write_request);
82 		return -EINVAL;
83 	}
84 
85 	if (!is_power_of_2(mgmtdev->max_read_request) ||
86 	    (mgmtdev->max_read_request < 128) ||
87 	    (mgmtdev->max_read_request > 1024)) {
88 		dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
89 			mgmtdev->max_read_request);
90 		return -EINVAL;
91 	}
92 
93 	if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
94 		dev_err(&mgmtdev->pdev->dev,
95 			"max_wr_xactions cannot be bigger than %ld\n",
96 			HIDMA_MAX_WR_XACTIONS_MASK);
97 		return -EINVAL;
98 	}
99 
100 	if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
101 		dev_err(&mgmtdev->pdev->dev,
102 			"max_rd_xactions cannot be bigger than %ld\n",
103 			HIDMA_MAX_RD_XACTIONS_MASK);
104 		return -EINVAL;
105 	}
106 
107 	for (i = 0; i < mgmtdev->dma_channels; i++) {
108 		if (mgmtdev->priority[i] > 1) {
109 			dev_err(&mgmtdev->pdev->dev,
110 				"priority can be 0 or 1\n");
111 			return -EINVAL;
112 		}
113 
114 		if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
115 			dev_err(&mgmtdev->pdev->dev,
116 				"max value of weight can be %d.\n",
117 				HIDMA_MAX_CHANNEL_WEIGHT);
118 			return -EINVAL;
119 		}
120 
121 		/* weight needs to be at least one */
122 		if (mgmtdev->weight[i] == 0)
123 			mgmtdev->weight[i] = 1;
124 	}
125 
126 	pm_runtime_get_sync(&mgmtdev->pdev->dev);
127 	val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
128 	val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
129 	val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
130 	val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
131 	val |= mgmtdev->max_read_request;
132 	writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
133 
134 	val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
135 	val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
136 	val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
137 	val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
138 	val |= mgmtdev->max_rd_xactions;
139 	writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
140 
141 	mgmtdev->hw_version =
142 	    readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
143 	mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
144 	mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
145 
146 	for (i = 0; i < mgmtdev->dma_channels; i++) {
147 		u32 weight = mgmtdev->weight[i];
148 		u32 priority = mgmtdev->priority[i];
149 
150 		val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
151 		val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
152 		val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
153 		val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
154 		val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
155 		writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
156 	}
157 
158 	val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
159 	val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
160 	val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
161 	writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
162 
163 	pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
164 	pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
165 	return 0;
166 }
167 EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
168 
169 static int hidma_mgmt_probe(struct platform_device *pdev)
170 {
171 	struct hidma_mgmt_dev *mgmtdev;
172 	struct resource *res;
173 	void __iomem *virtaddr;
174 	int irq;
175 	int rc;
176 	u32 val;
177 
178 	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
179 	pm_runtime_use_autosuspend(&pdev->dev);
180 	pm_runtime_set_active(&pdev->dev);
181 	pm_runtime_enable(&pdev->dev);
182 	pm_runtime_get_sync(&pdev->dev);
183 
184 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
185 	virtaddr = devm_ioremap_resource(&pdev->dev, res);
186 	if (IS_ERR(virtaddr)) {
187 		rc = -ENOMEM;
188 		goto out;
189 	}
190 
191 	irq = platform_get_irq(pdev, 0);
192 	if (irq < 0) {
193 		dev_err(&pdev->dev, "irq resources not found\n");
194 		rc = irq;
195 		goto out;
196 	}
197 
198 	mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
199 	if (!mgmtdev) {
200 		rc = -ENOMEM;
201 		goto out;
202 	}
203 
204 	mgmtdev->pdev = pdev;
205 	mgmtdev->addrsize = resource_size(res);
206 	mgmtdev->virtaddr = virtaddr;
207 
208 	rc = device_property_read_u32(&pdev->dev, "dma-channels",
209 				      &mgmtdev->dma_channels);
210 	if (rc) {
211 		dev_err(&pdev->dev, "number of channels missing\n");
212 		goto out;
213 	}
214 
215 	rc = device_property_read_u32(&pdev->dev,
216 				      "channel-reset-timeout-cycles",
217 				      &mgmtdev->chreset_timeout_cycles);
218 	if (rc) {
219 		dev_err(&pdev->dev, "channel reset timeout missing\n");
220 		goto out;
221 	}
222 
223 	rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
224 				      &mgmtdev->max_write_request);
225 	if (rc) {
226 		dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
227 		goto out;
228 	}
229 
230 	if (max_write_request) {
231 		dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
232 			max_write_request);
233 		mgmtdev->max_write_request = max_write_request;
234 	} else
235 		max_write_request = mgmtdev->max_write_request;
236 
237 	rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
238 				      &mgmtdev->max_read_request);
239 	if (rc) {
240 		dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
241 		goto out;
242 	}
243 	if (max_read_request) {
244 		dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
245 			max_read_request);
246 		mgmtdev->max_read_request = max_read_request;
247 	} else
248 		max_read_request = mgmtdev->max_read_request;
249 
250 	rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
251 				      &mgmtdev->max_wr_xactions);
252 	if (rc) {
253 		dev_err(&pdev->dev, "max-write-transactions missing\n");
254 		goto out;
255 	}
256 	if (max_wr_xactions) {
257 		dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
258 			max_wr_xactions);
259 		mgmtdev->max_wr_xactions = max_wr_xactions;
260 	} else
261 		max_wr_xactions = mgmtdev->max_wr_xactions;
262 
263 	rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
264 				      &mgmtdev->max_rd_xactions);
265 	if (rc) {
266 		dev_err(&pdev->dev, "max-read-transactions missing\n");
267 		goto out;
268 	}
269 	if (max_rd_xactions) {
270 		dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
271 			max_rd_xactions);
272 		mgmtdev->max_rd_xactions = max_rd_xactions;
273 	} else
274 		max_rd_xactions = mgmtdev->max_rd_xactions;
275 
276 	mgmtdev->priority = devm_kcalloc(&pdev->dev,
277 					 mgmtdev->dma_channels,
278 					 sizeof(*mgmtdev->priority),
279 					 GFP_KERNEL);
280 	if (!mgmtdev->priority) {
281 		rc = -ENOMEM;
282 		goto out;
283 	}
284 
285 	mgmtdev->weight = devm_kcalloc(&pdev->dev,
286 				       mgmtdev->dma_channels,
287 				       sizeof(*mgmtdev->weight), GFP_KERNEL);
288 	if (!mgmtdev->weight) {
289 		rc = -ENOMEM;
290 		goto out;
291 	}
292 
293 	rc = hidma_mgmt_setup(mgmtdev);
294 	if (rc) {
295 		dev_err(&pdev->dev, "setup failed\n");
296 		goto out;
297 	}
298 
299 	/* start the HW */
300 	val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
301 	val |= 1;
302 	writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
303 
304 	rc = hidma_mgmt_init_sys(mgmtdev);
305 	if (rc) {
306 		dev_err(&pdev->dev, "sysfs setup failed\n");
307 		goto out;
308 	}
309 
310 	dev_info(&pdev->dev,
311 		 "HW rev: %d.%d @ %pa with %d physical channels\n",
312 		 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
313 		 &res->start, mgmtdev->dma_channels);
314 
315 	platform_set_drvdata(pdev, mgmtdev);
316 	pm_runtime_mark_last_busy(&pdev->dev);
317 	pm_runtime_put_autosuspend(&pdev->dev);
318 	return 0;
319 out:
320 	pm_runtime_put_sync_suspend(&pdev->dev);
321 	pm_runtime_disable(&pdev->dev);
322 	return rc;
323 }
324 
325 #if IS_ENABLED(CONFIG_ACPI)
326 static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
327 	{"QCOM8060"},
328 	{},
329 };
330 MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
331 #endif
332 
333 static const struct of_device_id hidma_mgmt_match[] = {
334 	{.compatible = "qcom,hidma-mgmt-1.0",},
335 	{},
336 };
337 MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
338 
339 static struct platform_driver hidma_mgmt_driver = {
340 	.probe = hidma_mgmt_probe,
341 	.driver = {
342 		   .name = "hidma-mgmt",
343 		   .of_match_table = hidma_mgmt_match,
344 		   .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
345 	},
346 };
347 
348 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
349 static int object_counter;
350 
351 static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
352 {
353 	struct platform_device *pdev_parent = of_find_device_by_node(np);
354 	struct platform_device_info pdevinfo;
355 	struct of_phandle_args out_irq;
356 	struct device_node *child;
357 	struct resource *res;
358 	const __be32 *cell;
359 	int ret = 0, size, i, num;
360 	u64 addr, addr_size;
361 
362 	for_each_available_child_of_node(np, child) {
363 		struct resource *res_iter;
364 		struct platform_device *new_pdev;
365 
366 		cell = of_get_property(child, "reg", &size);
367 		if (!cell) {
368 			ret = -EINVAL;
369 			goto out;
370 		}
371 
372 		size /= sizeof(*cell);
373 		num = size /
374 			(of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
375 
376 		/* allocate a resource array */
377 		res = kcalloc(num, sizeof(*res), GFP_KERNEL);
378 		if (!res) {
379 			ret = -ENOMEM;
380 			goto out;
381 		}
382 
383 		/* read each reg value */
384 		i = 0;
385 		res_iter = res;
386 		while (i < size) {
387 			addr = of_read_number(&cell[i],
388 					      of_n_addr_cells(child));
389 			i += of_n_addr_cells(child);
390 
391 			addr_size = of_read_number(&cell[i],
392 						   of_n_size_cells(child));
393 			i += of_n_size_cells(child);
394 
395 			res_iter->start = addr;
396 			res_iter->end = res_iter->start + addr_size - 1;
397 			res_iter->flags = IORESOURCE_MEM;
398 			res_iter++;
399 		}
400 
401 		ret = of_irq_parse_one(child, 0, &out_irq);
402 		if (ret)
403 			goto out;
404 
405 		res_iter->start = irq_create_of_mapping(&out_irq);
406 		res_iter->name = "hidma event irq";
407 		res_iter->flags = IORESOURCE_IRQ;
408 
409 		memset(&pdevinfo, 0, sizeof(pdevinfo));
410 		pdevinfo.fwnode = &child->fwnode;
411 		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
412 		pdevinfo.name = child->name;
413 		pdevinfo.id = object_counter++;
414 		pdevinfo.res = res;
415 		pdevinfo.num_res = num;
416 		pdevinfo.data = NULL;
417 		pdevinfo.size_data = 0;
418 		pdevinfo.dma_mask = DMA_BIT_MASK(64);
419 		new_pdev = platform_device_register_full(&pdevinfo);
420 		if (IS_ERR(new_pdev)) {
421 			ret = PTR_ERR(new_pdev);
422 			goto out;
423 		}
424 		of_node_get(child);
425 		new_pdev->dev.of_node = child;
426 		of_dma_configure(&new_pdev->dev, child);
427 		/*
428 		 * It is assumed that calling of_msi_configure is safe on
429 		 * platforms with or without MSI support.
430 		 */
431 		of_msi_configure(&new_pdev->dev, child);
432 		of_node_put(child);
433 		kfree(res);
434 		res = NULL;
435 	}
436 out:
437 	kfree(res);
438 
439 	return ret;
440 }
441 #endif
442 
443 static int __init hidma_mgmt_init(void)
444 {
445 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
446 	struct device_node *child;
447 
448 	for_each_matching_node(child, hidma_mgmt_match) {
449 		/* device tree based firmware here */
450 		hidma_mgmt_of_populate_channels(child);
451 	}
452 #endif
453 	platform_driver_register(&hidma_mgmt_driver);
454 
455 	return 0;
456 }
457 module_init(hidma_mgmt_init);
458 MODULE_LICENSE("GPL v2");
459