xref: /openbmc/linux/drivers/mtd/hyperbus/hbmc-am654.c (revision 00c9cf49a6136e0839eaa2c74fbd58c607318b2c)
1b07079f1SVignesh Raghavendra // SPDX-License-Identifier: GPL-2.0
2b07079f1SVignesh Raghavendra //
3614a895fSAlexander A. Klimov // Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
4b07079f1SVignesh Raghavendra // Author: Vignesh Raghavendra <vigneshr@ti.com>
5b07079f1SVignesh Raghavendra 
6*00c9cf49SVignesh Raghavendra #include <linux/completion.h>
7*00c9cf49SVignesh Raghavendra #include <linux/dma-direction.h>
8*00c9cf49SVignesh Raghavendra #include <linux/dma-mapping.h>
9*00c9cf49SVignesh Raghavendra #include <linux/dmaengine.h>
10b07079f1SVignesh Raghavendra #include <linux/err.h>
11b07079f1SVignesh Raghavendra #include <linux/kernel.h>
12b07079f1SVignesh Raghavendra #include <linux/module.h>
13b07079f1SVignesh Raghavendra #include <linux/mtd/cfi.h>
14b07079f1SVignesh Raghavendra #include <linux/mtd/hyperbus.h>
15b07079f1SVignesh Raghavendra #include <linux/mtd/mtd.h>
16b07079f1SVignesh Raghavendra #include <linux/mux/consumer.h>
17b07079f1SVignesh Raghavendra #include <linux/of.h>
18b6fe8bc6SSergei Shtylyov #include <linux/of_address.h>
19b07079f1SVignesh Raghavendra #include <linux/platform_device.h>
20*00c9cf49SVignesh Raghavendra #include <linux/sched/task_stack.h>
21b07079f1SVignesh Raghavendra #include <linux/types.h>
22b07079f1SVignesh Raghavendra 
23b07079f1SVignesh Raghavendra #define AM654_HBMC_CALIB_COUNT 25
24b07079f1SVignesh Raghavendra 
25*00c9cf49SVignesh Raghavendra struct am654_hbmc_device_priv {
26*00c9cf49SVignesh Raghavendra 	struct completion rx_dma_complete;
27*00c9cf49SVignesh Raghavendra 	phys_addr_t device_base;
28*00c9cf49SVignesh Raghavendra 	struct hyperbus_ctlr *ctlr;
29*00c9cf49SVignesh Raghavendra 	struct dma_chan *rx_chan;
30*00c9cf49SVignesh Raghavendra };
31*00c9cf49SVignesh Raghavendra 
32b07079f1SVignesh Raghavendra struct am654_hbmc_priv {
33b07079f1SVignesh Raghavendra 	struct hyperbus_ctlr ctlr;
34b07079f1SVignesh Raghavendra 	struct hyperbus_device hbdev;
35b07079f1SVignesh Raghavendra 	struct mux_control *mux_ctrl;
36b07079f1SVignesh Raghavendra };
37b07079f1SVignesh Raghavendra 
38b07079f1SVignesh Raghavendra static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
39b07079f1SVignesh Raghavendra {
40b07079f1SVignesh Raghavendra 	struct map_info *map = &hbdev->map;
41b07079f1SVignesh Raghavendra 	struct cfi_private cfi;
42b07079f1SVignesh Raghavendra 	int count = AM654_HBMC_CALIB_COUNT;
43b07079f1SVignesh Raghavendra 	int pass_count = 0;
44b07079f1SVignesh Raghavendra 	int ret;
45b07079f1SVignesh Raghavendra 
46b07079f1SVignesh Raghavendra 	cfi.interleave = 1;
47b07079f1SVignesh Raghavendra 	cfi.device_type = CFI_DEVICETYPE_X16;
48b07079f1SVignesh Raghavendra 	cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
49b07079f1SVignesh Raghavendra 	cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
50b07079f1SVignesh Raghavendra 
51b07079f1SVignesh Raghavendra 	while (count--) {
52b07079f1SVignesh Raghavendra 		ret = cfi_qry_present(map, 0, &cfi);
53b07079f1SVignesh Raghavendra 		if (ret)
54b07079f1SVignesh Raghavendra 			pass_count++;
55b07079f1SVignesh Raghavendra 		else
56b07079f1SVignesh Raghavendra 			pass_count = 0;
57b07079f1SVignesh Raghavendra 		if (pass_count == 5)
58b07079f1SVignesh Raghavendra 			break;
59b07079f1SVignesh Raghavendra 	}
60b07079f1SVignesh Raghavendra 
61b07079f1SVignesh Raghavendra 	cfi_qry_mode_off(0, map, &cfi);
62b07079f1SVignesh Raghavendra 
63b07079f1SVignesh Raghavendra 	return ret;
64b07079f1SVignesh Raghavendra }
65b07079f1SVignesh Raghavendra 
66*00c9cf49SVignesh Raghavendra static void am654_hbmc_dma_callback(void *param)
67*00c9cf49SVignesh Raghavendra {
68*00c9cf49SVignesh Raghavendra 	struct am654_hbmc_device_priv *priv = param;
69*00c9cf49SVignesh Raghavendra 
70*00c9cf49SVignesh Raghavendra 	complete(&priv->rx_dma_complete);
71*00c9cf49SVignesh Raghavendra }
72*00c9cf49SVignesh Raghavendra 
73*00c9cf49SVignesh Raghavendra static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
74*00c9cf49SVignesh Raghavendra 			       unsigned long from, ssize_t len)
75*00c9cf49SVignesh Raghavendra 
76*00c9cf49SVignesh Raghavendra {
77*00c9cf49SVignesh Raghavendra 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
78*00c9cf49SVignesh Raghavendra 	struct dma_chan *rx_chan = priv->rx_chan;
79*00c9cf49SVignesh Raghavendra 	struct dma_async_tx_descriptor *tx;
80*00c9cf49SVignesh Raghavendra 	dma_addr_t dma_dst, dma_src;
81*00c9cf49SVignesh Raghavendra 	dma_cookie_t cookie;
82*00c9cf49SVignesh Raghavendra 	int ret;
83*00c9cf49SVignesh Raghavendra 
84*00c9cf49SVignesh Raghavendra 	if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
85*00c9cf49SVignesh Raghavendra 		return -EINVAL;
86*00c9cf49SVignesh Raghavendra 
87*00c9cf49SVignesh Raghavendra 	dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
88*00c9cf49SVignesh Raghavendra 	if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
89*00c9cf49SVignesh Raghavendra 		dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
90*00c9cf49SVignesh Raghavendra 		return -EIO;
91*00c9cf49SVignesh Raghavendra 	}
92*00c9cf49SVignesh Raghavendra 
93*00c9cf49SVignesh Raghavendra 	dma_src = priv->device_base + from;
94*00c9cf49SVignesh Raghavendra 	tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
95*00c9cf49SVignesh Raghavendra 	if (!tx) {
96*00c9cf49SVignesh Raghavendra 		dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
97*00c9cf49SVignesh Raghavendra 		ret = -EIO;
98*00c9cf49SVignesh Raghavendra 		goto unmap_dma;
99*00c9cf49SVignesh Raghavendra 	}
100*00c9cf49SVignesh Raghavendra 
101*00c9cf49SVignesh Raghavendra 	reinit_completion(&priv->rx_dma_complete);
102*00c9cf49SVignesh Raghavendra 	tx->callback = am654_hbmc_dma_callback;
103*00c9cf49SVignesh Raghavendra 	tx->callback_param = priv;
104*00c9cf49SVignesh Raghavendra 	cookie = dmaengine_submit(tx);
105*00c9cf49SVignesh Raghavendra 
106*00c9cf49SVignesh Raghavendra 	ret = dma_submit_error(cookie);
107*00c9cf49SVignesh Raghavendra 	if (ret) {
108*00c9cf49SVignesh Raghavendra 		dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
109*00c9cf49SVignesh Raghavendra 		goto unmap_dma;
110*00c9cf49SVignesh Raghavendra 	}
111*00c9cf49SVignesh Raghavendra 
112*00c9cf49SVignesh Raghavendra 	dma_async_issue_pending(rx_chan);
113*00c9cf49SVignesh Raghavendra 	if (!wait_for_completion_timeout(&priv->rx_dma_complete,  msecs_to_jiffies(len + 1000))) {
114*00c9cf49SVignesh Raghavendra 		dmaengine_terminate_sync(rx_chan);
115*00c9cf49SVignesh Raghavendra 		dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
116*00c9cf49SVignesh Raghavendra 		ret = -ETIMEDOUT;
117*00c9cf49SVignesh Raghavendra 	}
118*00c9cf49SVignesh Raghavendra 
119*00c9cf49SVignesh Raghavendra unmap_dma:
120*00c9cf49SVignesh Raghavendra 	dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
121*00c9cf49SVignesh Raghavendra 	return ret;
122*00c9cf49SVignesh Raghavendra }
123*00c9cf49SVignesh Raghavendra 
124*00c9cf49SVignesh Raghavendra static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
125*00c9cf49SVignesh Raghavendra 			    unsigned long from, ssize_t len)
126*00c9cf49SVignesh Raghavendra {
127*00c9cf49SVignesh Raghavendra 	struct am654_hbmc_device_priv *priv = hbdev->priv;
128*00c9cf49SVignesh Raghavendra 
129*00c9cf49SVignesh Raghavendra 	if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
130*00c9cf49SVignesh Raghavendra 		memcpy_fromio(to, hbdev->map.virt + from, len);
131*00c9cf49SVignesh Raghavendra }
132*00c9cf49SVignesh Raghavendra 
133b07079f1SVignesh Raghavendra static const struct hyperbus_ops am654_hbmc_ops = {
134b07079f1SVignesh Raghavendra 	.calibrate = am654_hbmc_calibrate,
135*00c9cf49SVignesh Raghavendra 	.copy_from = am654_hbmc_read,
136b07079f1SVignesh Raghavendra };
137b07079f1SVignesh Raghavendra 
138*00c9cf49SVignesh Raghavendra static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
139*00c9cf49SVignesh Raghavendra {
140*00c9cf49SVignesh Raghavendra 	struct dma_chan *rx_chan;
141*00c9cf49SVignesh Raghavendra 	dma_cap_mask_t mask;
142*00c9cf49SVignesh Raghavendra 
143*00c9cf49SVignesh Raghavendra 	dma_cap_zero(mask);
144*00c9cf49SVignesh Raghavendra 	dma_cap_set(DMA_MEMCPY, mask);
145*00c9cf49SVignesh Raghavendra 
146*00c9cf49SVignesh Raghavendra 	rx_chan = dma_request_chan_by_mask(&mask);
147*00c9cf49SVignesh Raghavendra 	if (IS_ERR(rx_chan)) {
148*00c9cf49SVignesh Raghavendra 		if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
149*00c9cf49SVignesh Raghavendra 			return -EPROBE_DEFER;
150*00c9cf49SVignesh Raghavendra 		dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
151*00c9cf49SVignesh Raghavendra 		return 0;
152*00c9cf49SVignesh Raghavendra 	}
153*00c9cf49SVignesh Raghavendra 	priv->rx_chan = rx_chan;
154*00c9cf49SVignesh Raghavendra 	init_completion(&priv->rx_dma_complete);
155*00c9cf49SVignesh Raghavendra 
156*00c9cf49SVignesh Raghavendra 	return 0;
157*00c9cf49SVignesh Raghavendra }
158*00c9cf49SVignesh Raghavendra 
159b07079f1SVignesh Raghavendra static int am654_hbmc_probe(struct platform_device *pdev)
160b07079f1SVignesh Raghavendra {
161b6fe8bc6SSergei Shtylyov 	struct device_node *np = pdev->dev.of_node;
162*00c9cf49SVignesh Raghavendra 	struct am654_hbmc_device_priv *dev_priv;
163b07079f1SVignesh Raghavendra 	struct device *dev = &pdev->dev;
164b07079f1SVignesh Raghavendra 	struct am654_hbmc_priv *priv;
165b6fe8bc6SSergei Shtylyov 	struct resource res;
166b07079f1SVignesh Raghavendra 	int ret;
167b07079f1SVignesh Raghavendra 
168b07079f1SVignesh Raghavendra 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
169b07079f1SVignesh Raghavendra 	if (!priv)
170b07079f1SVignesh Raghavendra 		return -ENOMEM;
171b07079f1SVignesh Raghavendra 
172b07079f1SVignesh Raghavendra 	platform_set_drvdata(pdev, priv);
173b07079f1SVignesh Raghavendra 
174aca31ce9SVignesh Raghavendra 	priv->hbdev.np = of_get_next_child(np, NULL);
175aca31ce9SVignesh Raghavendra 	ret = of_address_to_resource(priv->hbdev.np, 0, &res);
176b6fe8bc6SSergei Shtylyov 	if (ret)
177b6fe8bc6SSergei Shtylyov 		return ret;
178b6fe8bc6SSergei Shtylyov 
179b07079f1SVignesh Raghavendra 	if (of_property_read_bool(dev->of_node, "mux-controls")) {
180b07079f1SVignesh Raghavendra 		struct mux_control *control = devm_mux_control_get(dev, NULL);
181b07079f1SVignesh Raghavendra 
182b07079f1SVignesh Raghavendra 		if (IS_ERR(control))
183b07079f1SVignesh Raghavendra 			return PTR_ERR(control);
184b07079f1SVignesh Raghavendra 
185b07079f1SVignesh Raghavendra 		ret = mux_control_select(control, 1);
186b07079f1SVignesh Raghavendra 		if (ret) {
187b07079f1SVignesh Raghavendra 			dev_err(dev, "Failed to select HBMC mux\n");
188b07079f1SVignesh Raghavendra 			return ret;
189b07079f1SVignesh Raghavendra 		}
190b07079f1SVignesh Raghavendra 		priv->mux_ctrl = control;
191b07079f1SVignesh Raghavendra 	}
192b07079f1SVignesh Raghavendra 
193b6fe8bc6SSergei Shtylyov 	priv->hbdev.map.size = resource_size(&res);
194b6fe8bc6SSergei Shtylyov 	priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
195b6fe8bc6SSergei Shtylyov 	if (IS_ERR(priv->hbdev.map.virt))
196b6fe8bc6SSergei Shtylyov 		return PTR_ERR(priv->hbdev.map.virt);
197b6fe8bc6SSergei Shtylyov 
198b07079f1SVignesh Raghavendra 	priv->ctlr.dev = dev;
199b07079f1SVignesh Raghavendra 	priv->ctlr.ops = &am654_hbmc_ops;
200b07079f1SVignesh Raghavendra 	priv->hbdev.ctlr = &priv->ctlr;
201*00c9cf49SVignesh Raghavendra 
202*00c9cf49SVignesh Raghavendra 	dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
203*00c9cf49SVignesh Raghavendra 	if (!dev_priv) {
204*00c9cf49SVignesh Raghavendra 		ret = -ENOMEM;
205992df3bbSVignesh Raghavendra 		goto disable_mux;
206b07079f1SVignesh Raghavendra 	}
207b07079f1SVignesh Raghavendra 
208*00c9cf49SVignesh Raghavendra 	priv->hbdev.priv = dev_priv;
209*00c9cf49SVignesh Raghavendra 	dev_priv->device_base = res.start;
210*00c9cf49SVignesh Raghavendra 	dev_priv->ctlr = &priv->ctlr;
211*00c9cf49SVignesh Raghavendra 
212*00c9cf49SVignesh Raghavendra 	ret = am654_hbmc_request_mmap_dma(dev_priv);
213*00c9cf49SVignesh Raghavendra 	if (ret)
214*00c9cf49SVignesh Raghavendra 		goto disable_mux;
215*00c9cf49SVignesh Raghavendra 
216*00c9cf49SVignesh Raghavendra 	ret = hyperbus_register_device(&priv->hbdev);
217*00c9cf49SVignesh Raghavendra 	if (ret) {
218*00c9cf49SVignesh Raghavendra 		dev_err(dev, "failed to register controller\n");
219*00c9cf49SVignesh Raghavendra 		goto release_dma;
220*00c9cf49SVignesh Raghavendra 	}
221*00c9cf49SVignesh Raghavendra 
222b07079f1SVignesh Raghavendra 	return 0;
223*00c9cf49SVignesh Raghavendra release_dma:
224*00c9cf49SVignesh Raghavendra 	if (dev_priv->rx_chan)
225*00c9cf49SVignesh Raghavendra 		dma_release_channel(dev_priv->rx_chan);
226992df3bbSVignesh Raghavendra disable_mux:
227b07079f1SVignesh Raghavendra 	if (priv->mux_ctrl)
228b07079f1SVignesh Raghavendra 		mux_control_deselect(priv->mux_ctrl);
229b07079f1SVignesh Raghavendra 	return ret;
230b07079f1SVignesh Raghavendra }
231b07079f1SVignesh Raghavendra 
232b07079f1SVignesh Raghavendra static int am654_hbmc_remove(struct platform_device *pdev)
233b07079f1SVignesh Raghavendra {
234b07079f1SVignesh Raghavendra 	struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
235*00c9cf49SVignesh Raghavendra 	struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
236b07079f1SVignesh Raghavendra 	int ret;
237b07079f1SVignesh Raghavendra 
238b07079f1SVignesh Raghavendra 	ret = hyperbus_unregister_device(&priv->hbdev);
239b07079f1SVignesh Raghavendra 	if (priv->mux_ctrl)
240b07079f1SVignesh Raghavendra 		mux_control_deselect(priv->mux_ctrl);
241b07079f1SVignesh Raghavendra 
242*00c9cf49SVignesh Raghavendra 	if (dev_priv->rx_chan)
243*00c9cf49SVignesh Raghavendra 		dma_release_channel(dev_priv->rx_chan);
244*00c9cf49SVignesh Raghavendra 
245b07079f1SVignesh Raghavendra 	return ret;
246b07079f1SVignesh Raghavendra }
247b07079f1SVignesh Raghavendra 
248b07079f1SVignesh Raghavendra static const struct of_device_id am654_hbmc_dt_ids[] = {
249b07079f1SVignesh Raghavendra 	{
250b07079f1SVignesh Raghavendra 		.compatible = "ti,am654-hbmc",
251b07079f1SVignesh Raghavendra 	},
252b07079f1SVignesh Raghavendra 	{ /* end of table */ }
253b07079f1SVignesh Raghavendra };
254b07079f1SVignesh Raghavendra 
255b07079f1SVignesh Raghavendra MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
256b07079f1SVignesh Raghavendra 
257b07079f1SVignesh Raghavendra static struct platform_driver am654_hbmc_platform_driver = {
258b07079f1SVignesh Raghavendra 	.probe = am654_hbmc_probe,
259b07079f1SVignesh Raghavendra 	.remove = am654_hbmc_remove,
260b07079f1SVignesh Raghavendra 	.driver = {
261b07079f1SVignesh Raghavendra 		.name = "hbmc-am654",
262b07079f1SVignesh Raghavendra 		.of_match_table = am654_hbmc_dt_ids,
263b07079f1SVignesh Raghavendra 	},
264b07079f1SVignesh Raghavendra };
265b07079f1SVignesh Raghavendra 
266b07079f1SVignesh Raghavendra module_platform_driver(am654_hbmc_platform_driver);
267b07079f1SVignesh Raghavendra 
268b07079f1SVignesh Raghavendra MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
269b07079f1SVignesh Raghavendra MODULE_LICENSE("GPL v2");
270b07079f1SVignesh Raghavendra MODULE_ALIAS("platform:hbmc-am654");
271b07079f1SVignesh Raghavendra MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
272