xref: /openbmc/linux/drivers/dma/hsu/hsu.c (revision 2b49e0c5)
1 /*
2  * Core driver for the High Speed UART DMA
3  *
4  * Copyright (C) 2015 Intel Corporation
5  * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6  *
7  * Partially based on the bits found in drivers/tty/serial/mfd.c.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 /*
15  * DMA channel allocation:
16  * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
17  *    Write (UART RX).
18  * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
19  *    port 3, and so on.
20  */
21 
22 #include <linux/delay.h>
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 
29 #include "hsu.h"
30 
31 #define HSU_DMA_BUSWIDTHS				\
32 	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	|	\
33 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
34 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
35 	BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)		|	\
36 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)		|	\
37 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)		|	\
38 	BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
39 
40 static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
41 {
42 	hsu_chan_writel(hsuc, HSU_CH_CR, 0);
43 }
44 
45 static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
46 {
47 	u32 cr = HSU_CH_CR_CHA;
48 
49 	if (hsuc->direction == DMA_MEM_TO_DEV)
50 		cr &= ~HSU_CH_CR_CHD;
51 	else if (hsuc->direction == DMA_DEV_TO_MEM)
52 		cr |= HSU_CH_CR_CHD;
53 
54 	hsu_chan_writel(hsuc, HSU_CH_CR, cr);
55 }
56 
57 static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
58 {
59 	struct dma_slave_config *config = &hsuc->config;
60 	struct hsu_dma_desc *desc = hsuc->desc;
61 	u32 bsr, mtsr;
62 	u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
63 	unsigned int i, count;
64 
65 	if (hsuc->direction == DMA_MEM_TO_DEV) {
66 		bsr = config->dst_maxburst;
67 		mtsr = config->dst_addr_width;
68 	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
69 		bsr = config->src_maxburst;
70 		mtsr = config->src_addr_width;
71 	} else {
72 		/* Not supported direction */
73 		return;
74 	}
75 
76 	hsu_chan_disable(hsuc);
77 
78 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
79 	hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
80 	hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
81 
82 	/* Set descriptors */
83 	count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
84 	for (i = 0; i < count; i++) {
85 		hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
86 		hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
87 
88 		/* Prepare value for DCR */
89 		dcr |= HSU_CH_DCR_DESCA(i);
90 		dcr |= HSU_CH_DCR_CHTOI(i);	/* timeout bit, see HSU Errata 1 */
91 
92 		desc->active++;
93 	}
94 	/* Only for the last descriptor in the chain */
95 	dcr |= HSU_CH_DCR_CHSOD(count - 1);
96 	dcr |= HSU_CH_DCR_CHDI(count - 1);
97 
98 	hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
99 
100 	hsu_chan_enable(hsuc);
101 }
102 
103 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
104 {
105 	unsigned long flags;
106 
107 	spin_lock_irqsave(&hsuc->lock, flags);
108 	hsu_chan_disable(hsuc);
109 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
110 	spin_unlock_irqrestore(&hsuc->lock, flags);
111 }
112 
113 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
114 {
115 	unsigned long flags;
116 
117 	spin_lock_irqsave(&hsuc->lock, flags);
118 	hsu_dma_chan_start(hsuc);
119 	spin_unlock_irqrestore(&hsuc->lock, flags);
120 }
121 
122 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
123 {
124 	struct virt_dma_desc *vdesc;
125 
126 	/* Get the next descriptor */
127 	vdesc = vchan_next_desc(&hsuc->vchan);
128 	if (!vdesc) {
129 		hsuc->desc = NULL;
130 		return;
131 	}
132 
133 	list_del(&vdesc->node);
134 	hsuc->desc = to_hsu_dma_desc(vdesc);
135 
136 	/* Start the channel with a new descriptor */
137 	hsu_dma_start_channel(hsuc);
138 }
139 
140 static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
141 {
142 	unsigned long flags;
143 	u32 sr;
144 
145 	spin_lock_irqsave(&hsuc->lock, flags);
146 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
147 	spin_unlock_irqrestore(&hsuc->lock, flags);
148 
149 	return sr;
150 }
151 
152 irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
153 {
154 	struct hsu_dma_chan *hsuc;
155 	struct hsu_dma_desc *desc;
156 	unsigned long flags;
157 	u32 sr;
158 
159 	/* Sanity check */
160 	if (nr >= chip->pdata->nr_channels)
161 		return IRQ_NONE;
162 
163 	hsuc = &chip->hsu->chan[nr];
164 
165 	/*
166 	 * No matter what situation, need read clear the IRQ status
167 	 * There is a bug, see Errata 5, HSD 2900918
168 	 */
169 	sr = hsu_dma_chan_get_sr(hsuc);
170 	if (!sr)
171 		return IRQ_NONE;
172 
173 	/* Timeout IRQ, need wait some time, see Errata 2 */
174 	if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
175 		udelay(2);
176 
177 	sr &= ~HSU_CH_SR_DESCTO_ANY;
178 	if (!sr)
179 		return IRQ_HANDLED;
180 
181 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
182 	desc = hsuc->desc;
183 	if (desc) {
184 		if (sr & HSU_CH_SR_CHE) {
185 			desc->status = DMA_ERROR;
186 		} else if (desc->active < desc->nents) {
187 			hsu_dma_start_channel(hsuc);
188 		} else {
189 			vchan_cookie_complete(&desc->vdesc);
190 			desc->status = DMA_COMPLETE;
191 			hsu_dma_start_transfer(hsuc);
192 		}
193 	}
194 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
195 
196 	return IRQ_HANDLED;
197 }
198 EXPORT_SYMBOL_GPL(hsu_dma_irq);
199 
200 static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
201 {
202 	struct hsu_dma_desc *desc;
203 
204 	desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
205 	if (!desc)
206 		return NULL;
207 
208 	desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_ATOMIC);
209 	if (!desc->sg) {
210 		kfree(desc);
211 		return NULL;
212 	}
213 
214 	return desc;
215 }
216 
217 static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
218 {
219 	struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
220 
221 	kfree(desc->sg);
222 	kfree(desc);
223 }
224 
225 static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
226 		struct dma_chan *chan, struct scatterlist *sgl,
227 		unsigned int sg_len, enum dma_transfer_direction direction,
228 		unsigned long flags, void *context)
229 {
230 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
231 	struct hsu_dma_desc *desc;
232 	struct scatterlist *sg;
233 	unsigned int i;
234 
235 	desc = hsu_dma_alloc_desc(sg_len);
236 	if (!desc)
237 		return NULL;
238 
239 	for_each_sg(sgl, sg, sg_len, i) {
240 		desc->sg[i].addr = sg_dma_address(sg);
241 		desc->sg[i].len = sg_dma_len(sg);
242 	}
243 
244 	desc->nents = sg_len;
245 	desc->direction = direction;
246 	desc->active = 0;
247 	desc->status = DMA_IN_PROGRESS;
248 
249 	return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
250 }
251 
252 static void hsu_dma_issue_pending(struct dma_chan *chan)
253 {
254 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
258 	if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
259 		hsu_dma_start_transfer(hsuc);
260 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
261 }
262 
263 static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
264 {
265 	size_t bytes = 0;
266 	unsigned int i;
267 
268 	for (i = desc->active; i < desc->nents; i++)
269 		bytes += desc->sg[i].len;
270 
271 	return bytes;
272 }
273 
274 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
275 {
276 	struct hsu_dma_desc *desc = hsuc->desc;
277 	size_t bytes = hsu_dma_desc_size(desc);
278 	int i;
279 	unsigned long flags;
280 
281 	spin_lock_irqsave(&hsuc->lock, flags);
282 	i = desc->active % HSU_DMA_CHAN_NR_DESC;
283 	do {
284 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
285 	} while (--i >= 0);
286 	spin_unlock_irqrestore(&hsuc->lock, flags);
287 
288 	return bytes;
289 }
290 
291 static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
292 	dma_cookie_t cookie, struct dma_tx_state *state)
293 {
294 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
295 	struct virt_dma_desc *vdesc;
296 	enum dma_status status;
297 	size_t bytes;
298 	unsigned long flags;
299 
300 	status = dma_cookie_status(chan, cookie, state);
301 	if (status == DMA_COMPLETE)
302 		return status;
303 
304 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
305 	vdesc = vchan_find_desc(&hsuc->vchan, cookie);
306 	if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
307 		bytes = hsu_dma_active_desc_size(hsuc);
308 		dma_set_residue(state, bytes);
309 		status = hsuc->desc->status;
310 	} else if (vdesc) {
311 		bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
312 		dma_set_residue(state, bytes);
313 	}
314 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
315 
316 	return status;
317 }
318 
319 static int hsu_dma_slave_config(struct dma_chan *chan,
320 				struct dma_slave_config *config)
321 {
322 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
323 
324 	/* Check if chan will be configured for slave transfers */
325 	if (!is_slave_direction(config->direction))
326 		return -EINVAL;
327 
328 	memcpy(&hsuc->config, config, sizeof(hsuc->config));
329 
330 	return 0;
331 }
332 
333 static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
334 {
335 	unsigned long flags;
336 
337 	spin_lock_irqsave(&hsuc->lock, flags);
338 	hsu_chan_disable(hsuc);
339 	spin_unlock_irqrestore(&hsuc->lock, flags);
340 }
341 
342 static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
343 {
344 	unsigned long flags;
345 
346 	spin_lock_irqsave(&hsuc->lock, flags);
347 	hsu_chan_enable(hsuc);
348 	spin_unlock_irqrestore(&hsuc->lock, flags);
349 }
350 
351 static int hsu_dma_pause(struct dma_chan *chan)
352 {
353 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
354 	unsigned long flags;
355 
356 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
357 	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
358 		hsu_dma_chan_deactivate(hsuc);
359 		hsuc->desc->status = DMA_PAUSED;
360 	}
361 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
362 
363 	return 0;
364 }
365 
366 static int hsu_dma_resume(struct dma_chan *chan)
367 {
368 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
369 	unsigned long flags;
370 
371 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
372 	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
373 		hsuc->desc->status = DMA_IN_PROGRESS;
374 		hsu_dma_chan_activate(hsuc);
375 	}
376 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
377 
378 	return 0;
379 }
380 
381 static int hsu_dma_terminate_all(struct dma_chan *chan)
382 {
383 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
384 	unsigned long flags;
385 	LIST_HEAD(head);
386 
387 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
388 
389 	hsu_dma_stop_channel(hsuc);
390 	hsuc->desc = NULL;
391 
392 	vchan_get_all_descriptors(&hsuc->vchan, &head);
393 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
394 	vchan_dma_desc_free_list(&hsuc->vchan, &head);
395 
396 	return 0;
397 }
398 
399 static int hsu_dma_alloc_chan_resources(struct dma_chan *chan)
400 {
401 	return 0;
402 }
403 
404 static void hsu_dma_free_chan_resources(struct dma_chan *chan)
405 {
406 	vchan_free_chan_resources(to_virt_chan(chan));
407 }
408 
409 int hsu_dma_probe(struct hsu_dma_chip *chip)
410 {
411 	struct hsu_dma *hsu;
412 	struct hsu_dma_platform_data *pdata = chip->pdata;
413 	void __iomem *addr = chip->regs + chip->offset;
414 	unsigned short i;
415 	int ret;
416 
417 	hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
418 	if (!hsu)
419 		return -ENOMEM;
420 
421 	chip->hsu = hsu;
422 
423 	if (!pdata) {
424 		pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
425 		if (!pdata)
426 			return -ENOMEM;
427 
428 		chip->pdata = pdata;
429 
430 		/* Guess nr_channels from the IO space length */
431 		pdata->nr_channels = (chip->length - chip->offset) /
432 				     HSU_DMA_CHAN_LENGTH;
433 	}
434 
435 	hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels,
436 				 sizeof(*hsu->chan), GFP_KERNEL);
437 	if (!hsu->chan)
438 		return -ENOMEM;
439 
440 	INIT_LIST_HEAD(&hsu->dma.channels);
441 	for (i = 0; i < pdata->nr_channels; i++) {
442 		struct hsu_dma_chan *hsuc = &hsu->chan[i];
443 
444 		hsuc->vchan.desc_free = hsu_dma_desc_free;
445 		vchan_init(&hsuc->vchan, &hsu->dma);
446 
447 		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
448 		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
449 
450 		spin_lock_init(&hsuc->lock);
451 	}
452 
453 	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
454 	dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
455 
456 	hsu->dma.device_alloc_chan_resources = hsu_dma_alloc_chan_resources;
457 	hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
458 
459 	hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
460 
461 	hsu->dma.device_issue_pending = hsu_dma_issue_pending;
462 	hsu->dma.device_tx_status = hsu_dma_tx_status;
463 
464 	hsu->dma.device_config = hsu_dma_slave_config;
465 	hsu->dma.device_pause = hsu_dma_pause;
466 	hsu->dma.device_resume = hsu_dma_resume;
467 	hsu->dma.device_terminate_all = hsu_dma_terminate_all;
468 
469 	hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
470 	hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
471 	hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
472 	hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
473 
474 	hsu->dma.dev = chip->dev;
475 
476 	ret = dma_async_device_register(&hsu->dma);
477 	if (ret)
478 		return ret;
479 
480 	dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels);
481 	return 0;
482 }
483 EXPORT_SYMBOL_GPL(hsu_dma_probe);
484 
485 int hsu_dma_remove(struct hsu_dma_chip *chip)
486 {
487 	struct hsu_dma *hsu = chip->hsu;
488 	unsigned short i;
489 
490 	dma_async_device_unregister(&hsu->dma);
491 
492 	for (i = 0; i < chip->pdata->nr_channels; i++) {
493 		struct hsu_dma_chan *hsuc = &hsu->chan[i];
494 
495 		tasklet_kill(&hsuc->vchan.task);
496 	}
497 
498 	return 0;
499 }
500 EXPORT_SYMBOL_GPL(hsu_dma_remove);
501 
502 MODULE_LICENSE("GPL v2");
503 MODULE_DESCRIPTION("High Speed UART DMA core driver");
504 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
505