xref: /openbmc/linux/drivers/dma/hsu/hsu.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
22b49e0c5SAndy Shevchenko /*
32b49e0c5SAndy Shevchenko  * Core driver for the High Speed UART DMA
42b49e0c5SAndy Shevchenko  *
52b49e0c5SAndy Shevchenko  * Copyright (C) 2015 Intel Corporation
62b49e0c5SAndy Shevchenko  * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
72b49e0c5SAndy Shevchenko  *
82b49e0c5SAndy Shevchenko  * Partially based on the bits found in drivers/tty/serial/mfd.c.
92b49e0c5SAndy Shevchenko  */
102b49e0c5SAndy Shevchenko 
112b49e0c5SAndy Shevchenko /*
122b49e0c5SAndy Shevchenko  * DMA channel allocation:
132b49e0c5SAndy Shevchenko  * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
142b49e0c5SAndy Shevchenko  *    Write (UART RX).
152b49e0c5SAndy Shevchenko  * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
162b49e0c5SAndy Shevchenko  *    port 3, and so on.
172b49e0c5SAndy Shevchenko  */
182b49e0c5SAndy Shevchenko 
19*9c060026SAndy Shevchenko #include <linux/bits.h>
202b49e0c5SAndy Shevchenko #include <linux/delay.h>
21*9c060026SAndy Shevchenko #include <linux/device.h>
222b49e0c5SAndy Shevchenko #include <linux/dmaengine.h>
232b49e0c5SAndy Shevchenko #include <linux/dma-mapping.h>
242b49e0c5SAndy Shevchenko #include <linux/init.h>
25*9c060026SAndy Shevchenko #include <linux/interrupt.h>
26*9c060026SAndy Shevchenko #include <linux/list.h>
272b49e0c5SAndy Shevchenko #include <linux/module.h>
28*9c060026SAndy Shevchenko #include <linux/percpu-defs.h>
29*9c060026SAndy Shevchenko #include <linux/scatterlist.h>
302b49e0c5SAndy Shevchenko #include <linux/slab.h>
31*9c060026SAndy Shevchenko #include <linux/string.h>
32*9c060026SAndy Shevchenko #include <linux/spinlock.h>
332b49e0c5SAndy Shevchenko 
342b49e0c5SAndy Shevchenko #include "hsu.h"
352b49e0c5SAndy Shevchenko 
362b49e0c5SAndy Shevchenko #define HSU_DMA_BUSWIDTHS				\
372b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	|	\
382b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
392b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
402b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)		|	\
412b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)		|	\
422b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)		|	\
432b49e0c5SAndy Shevchenko 	BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
442b49e0c5SAndy Shevchenko 
hsu_chan_disable(struct hsu_dma_chan * hsuc)452b49e0c5SAndy Shevchenko static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
462b49e0c5SAndy Shevchenko {
472b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_CR, 0);
482b49e0c5SAndy Shevchenko }
492b49e0c5SAndy Shevchenko 
hsu_chan_enable(struct hsu_dma_chan * hsuc)502b49e0c5SAndy Shevchenko static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
512b49e0c5SAndy Shevchenko {
522b49e0c5SAndy Shevchenko 	u32 cr = HSU_CH_CR_CHA;
532b49e0c5SAndy Shevchenko 
542b49e0c5SAndy Shevchenko 	if (hsuc->direction == DMA_MEM_TO_DEV)
552b49e0c5SAndy Shevchenko 		cr &= ~HSU_CH_CR_CHD;
562b49e0c5SAndy Shevchenko 	else if (hsuc->direction == DMA_DEV_TO_MEM)
572b49e0c5SAndy Shevchenko 		cr |= HSU_CH_CR_CHD;
582b49e0c5SAndy Shevchenko 
592b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_CR, cr);
602b49e0c5SAndy Shevchenko }
612b49e0c5SAndy Shevchenko 
hsu_dma_chan_start(struct hsu_dma_chan * hsuc)622b49e0c5SAndy Shevchenko static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
632b49e0c5SAndy Shevchenko {
642b49e0c5SAndy Shevchenko 	struct dma_slave_config *config = &hsuc->config;
652b49e0c5SAndy Shevchenko 	struct hsu_dma_desc *desc = hsuc->desc;
664bb82458SAndy Shevchenko 	u32 bsr = 0, mtsr = 0;	/* to shut the compiler up */
672b49e0c5SAndy Shevchenko 	u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
682b49e0c5SAndy Shevchenko 	unsigned int i, count;
692b49e0c5SAndy Shevchenko 
702b49e0c5SAndy Shevchenko 	if (hsuc->direction == DMA_MEM_TO_DEV) {
712b49e0c5SAndy Shevchenko 		bsr = config->dst_maxburst;
72c24a5c73SAndy Shevchenko 		mtsr = config->dst_addr_width;
732b49e0c5SAndy Shevchenko 	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
742b49e0c5SAndy Shevchenko 		bsr = config->src_maxburst;
75c24a5c73SAndy Shevchenko 		mtsr = config->src_addr_width;
762b49e0c5SAndy Shevchenko 	}
772b49e0c5SAndy Shevchenko 
782b49e0c5SAndy Shevchenko 	hsu_chan_disable(hsuc);
792b49e0c5SAndy Shevchenko 
802b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
812b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
822b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
832b49e0c5SAndy Shevchenko 
842b49e0c5SAndy Shevchenko 	/* Set descriptors */
852d4d689fSAndy Shevchenko 	count = desc->nents - desc->active;
862d4d689fSAndy Shevchenko 	for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
872b49e0c5SAndy Shevchenko 		hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
882b49e0c5SAndy Shevchenko 		hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
892b49e0c5SAndy Shevchenko 
902b49e0c5SAndy Shevchenko 		/* Prepare value for DCR */
912b49e0c5SAndy Shevchenko 		dcr |= HSU_CH_DCR_DESCA(i);
922b49e0c5SAndy Shevchenko 		dcr |= HSU_CH_DCR_CHTOI(i);	/* timeout bit, see HSU Errata 1 */
932b49e0c5SAndy Shevchenko 
942b49e0c5SAndy Shevchenko 		desc->active++;
952b49e0c5SAndy Shevchenko 	}
962b49e0c5SAndy Shevchenko 	/* Only for the last descriptor in the chain */
972b49e0c5SAndy Shevchenko 	dcr |= HSU_CH_DCR_CHSOD(count - 1);
982b49e0c5SAndy Shevchenko 	dcr |= HSU_CH_DCR_CHDI(count - 1);
992b49e0c5SAndy Shevchenko 
1002b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
1012b49e0c5SAndy Shevchenko 
1022b49e0c5SAndy Shevchenko 	hsu_chan_enable(hsuc);
1032b49e0c5SAndy Shevchenko }
1042b49e0c5SAndy Shevchenko 
hsu_dma_stop_channel(struct hsu_dma_chan * hsuc)1052b49e0c5SAndy Shevchenko static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
1062b49e0c5SAndy Shevchenko {
1072b49e0c5SAndy Shevchenko 	hsu_chan_disable(hsuc);
1082b49e0c5SAndy Shevchenko 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
1092b49e0c5SAndy Shevchenko }
1102b49e0c5SAndy Shevchenko 
hsu_dma_start_channel(struct hsu_dma_chan * hsuc)1112b49e0c5SAndy Shevchenko static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
1122b49e0c5SAndy Shevchenko {
1132b49e0c5SAndy Shevchenko 	hsu_dma_chan_start(hsuc);
1142b49e0c5SAndy Shevchenko }
1152b49e0c5SAndy Shevchenko 
hsu_dma_start_transfer(struct hsu_dma_chan * hsuc)1162b49e0c5SAndy Shevchenko static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
1172b49e0c5SAndy Shevchenko {
1182b49e0c5SAndy Shevchenko 	struct virt_dma_desc *vdesc;
1192b49e0c5SAndy Shevchenko 
1202b49e0c5SAndy Shevchenko 	/* Get the next descriptor */
1212b49e0c5SAndy Shevchenko 	vdesc = vchan_next_desc(&hsuc->vchan);
1222b49e0c5SAndy Shevchenko 	if (!vdesc) {
1232b49e0c5SAndy Shevchenko 		hsuc->desc = NULL;
1242b49e0c5SAndy Shevchenko 		return;
1252b49e0c5SAndy Shevchenko 	}
1262b49e0c5SAndy Shevchenko 
1272b49e0c5SAndy Shevchenko 	list_del(&vdesc->node);
1282b49e0c5SAndy Shevchenko 	hsuc->desc = to_hsu_dma_desc(vdesc);
1292b49e0c5SAndy Shevchenko 
1302b49e0c5SAndy Shevchenko 	/* Start the channel with a new descriptor */
1312b49e0c5SAndy Shevchenko 	hsu_dma_start_channel(hsuc);
1322b49e0c5SAndy Shevchenko }
1332b49e0c5SAndy Shevchenko 
134c6f82787SChuah, Kim Tatt /*
135c6f82787SChuah, Kim Tatt  *      hsu_dma_get_status() - get DMA channel status
136c6f82787SChuah, Kim Tatt  *      @chip: HSUART DMA chip
137c6f82787SChuah, Kim Tatt  *      @nr: DMA channel number
138c6f82787SChuah, Kim Tatt  *      @status: pointer for DMA Channel Status Register value
139c6f82787SChuah, Kim Tatt  *
140c6f82787SChuah, Kim Tatt  *      Description:
141c6f82787SChuah, Kim Tatt  *      The function reads and clears the DMA Channel Status Register, checks
142c6f82787SChuah, Kim Tatt  *      if it was a timeout interrupt and returns a corresponding value.
143c6f82787SChuah, Kim Tatt  *
144c6f82787SChuah, Kim Tatt  *      Caller should provide a valid pointer for the DMA Channel Status
145c6f82787SChuah, Kim Tatt  *      Register value that will be returned in @status.
146c6f82787SChuah, Kim Tatt  *
147c6f82787SChuah, Kim Tatt  *      Return:
148c6f82787SChuah, Kim Tatt  *      1 for DMA timeout status, 0 for other DMA status, or error code for
149c6f82787SChuah, Kim Tatt  *      invalid parameters or no interrupt pending.
150c6f82787SChuah, Kim Tatt  */
hsu_dma_get_status(struct hsu_dma_chip * chip,unsigned short nr,u32 * status)151c6f82787SChuah, Kim Tatt int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
152c6f82787SChuah, Kim Tatt 		       u32 *status)
1532b49e0c5SAndy Shevchenko {
1542b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc;
1552b49e0c5SAndy Shevchenko 	unsigned long flags;
1562b49e0c5SAndy Shevchenko 	u32 sr;
1572b49e0c5SAndy Shevchenko 
1582b49e0c5SAndy Shevchenko 	/* Sanity check */
1594c97ad99SHeikki Krogerus 	if (nr >= chip->hsu->nr_channels)
160c6f82787SChuah, Kim Tatt 		return -EINVAL;
1612b49e0c5SAndy Shevchenko 
1622b49e0c5SAndy Shevchenko 	hsuc = &chip->hsu->chan[nr];
1632b49e0c5SAndy Shevchenko 
1642b49e0c5SAndy Shevchenko 	/*
1652b49e0c5SAndy Shevchenko 	 * No matter what situation, need read clear the IRQ status
1662b49e0c5SAndy Shevchenko 	 * There is a bug, see Errata 5, HSD 2900918
1672b49e0c5SAndy Shevchenko 	 */
168c6f82787SChuah, Kim Tatt 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
169c6f82787SChuah, Kim Tatt 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
170c6f82787SChuah, Kim Tatt 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
171c6f82787SChuah, Kim Tatt 
172c6f82787SChuah, Kim Tatt 	/* Check if any interrupt is pending */
173c6f82787SChuah, Kim Tatt 	sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
1742b49e0c5SAndy Shevchenko 	if (!sr)
175c6f82787SChuah, Kim Tatt 		return -EIO;
1762b49e0c5SAndy Shevchenko 
1772b49e0c5SAndy Shevchenko 	/* Timeout IRQ, need wait some time, see Errata 2 */
178c36a0176SAndy Shevchenko 	if (sr & HSU_CH_SR_DESCTO_ANY)
1792b49e0c5SAndy Shevchenko 		udelay(2);
1802b49e0c5SAndy Shevchenko 
181c6f82787SChuah, Kim Tatt 	/*
182c6f82787SChuah, Kim Tatt 	 * At this point, at least one of Descriptor Time Out, Channel Error
183c6f82787SChuah, Kim Tatt 	 * or Descriptor Done bits must be set. Clear the Descriptor Time Out
184c6f82787SChuah, Kim Tatt 	 * bits and if sr is still non-zero, it must be channel error or
185c6f82787SChuah, Kim Tatt 	 * descriptor done which are higher priority than timeout and handled
186c6f82787SChuah, Kim Tatt 	 * in hsu_dma_do_irq(). Else, it must be a timeout.
187c6f82787SChuah, Kim Tatt 	 */
1882b49e0c5SAndy Shevchenko 	sr &= ~HSU_CH_SR_DESCTO_ANY;
189c6f82787SChuah, Kim Tatt 
190c6f82787SChuah, Kim Tatt 	*status = sr;
191c6f82787SChuah, Kim Tatt 
192c6f82787SChuah, Kim Tatt 	return sr ? 0 : 1;
193c6f82787SChuah, Kim Tatt }
194c6f82787SChuah, Kim Tatt EXPORT_SYMBOL_GPL(hsu_dma_get_status);
195c6f82787SChuah, Kim Tatt 
196c6f82787SChuah, Kim Tatt /*
197c6f82787SChuah, Kim Tatt  *      hsu_dma_do_irq() - DMA interrupt handler
198c6f82787SChuah, Kim Tatt  *      @chip: HSUART DMA chip
199c6f82787SChuah, Kim Tatt  *      @nr: DMA channel number
200c6f82787SChuah, Kim Tatt  *      @status: Channel Status Register value
201c6f82787SChuah, Kim Tatt  *
202c6f82787SChuah, Kim Tatt  *      Description:
203c6f82787SChuah, Kim Tatt  *      This function handles Channel Error and Descriptor Done interrupts.
204c6f82787SChuah, Kim Tatt  *      This function should be called after determining that the DMA interrupt
205c6f82787SChuah, Kim Tatt  *      is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
206c6f82787SChuah, Kim Tatt  *
207c6f82787SChuah, Kim Tatt  *      Return:
208d2f5a731SAndy Shevchenko  *      0 for invalid channel number, 1 otherwise.
209c6f82787SChuah, Kim Tatt  */
hsu_dma_do_irq(struct hsu_dma_chip * chip,unsigned short nr,u32 status)210d2f5a731SAndy Shevchenko int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
211c6f82787SChuah, Kim Tatt {
21266fde179SAndy Shevchenko 	struct dma_chan_percpu *stat;
213c6f82787SChuah, Kim Tatt 	struct hsu_dma_chan *hsuc;
214c6f82787SChuah, Kim Tatt 	struct hsu_dma_desc *desc;
215c6f82787SChuah, Kim Tatt 	unsigned long flags;
216c6f82787SChuah, Kim Tatt 
217c6f82787SChuah, Kim Tatt 	/* Sanity check */
218c6f82787SChuah, Kim Tatt 	if (nr >= chip->hsu->nr_channels)
219d2f5a731SAndy Shevchenko 		return 0;
220c6f82787SChuah, Kim Tatt 
221c6f82787SChuah, Kim Tatt 	hsuc = &chip->hsu->chan[nr];
22266fde179SAndy Shevchenko 	stat = this_cpu_ptr(hsuc->vchan.chan.local);
2232b49e0c5SAndy Shevchenko 
2242b49e0c5SAndy Shevchenko 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
2252b49e0c5SAndy Shevchenko 	desc = hsuc->desc;
2262b49e0c5SAndy Shevchenko 	if (desc) {
227c6f82787SChuah, Kim Tatt 		if (status & HSU_CH_SR_CHE) {
2282b49e0c5SAndy Shevchenko 			desc->status = DMA_ERROR;
2292b49e0c5SAndy Shevchenko 		} else if (desc->active < desc->nents) {
2302b49e0c5SAndy Shevchenko 			hsu_dma_start_channel(hsuc);
2312b49e0c5SAndy Shevchenko 		} else {
2322b49e0c5SAndy Shevchenko 			vchan_cookie_complete(&desc->vdesc);
2332b49e0c5SAndy Shevchenko 			desc->status = DMA_COMPLETE;
23466fde179SAndy Shevchenko 			stat->bytes_transferred += desc->length;
2352b49e0c5SAndy Shevchenko 			hsu_dma_start_transfer(hsuc);
2362b49e0c5SAndy Shevchenko 		}
2372b49e0c5SAndy Shevchenko 	}
2382b49e0c5SAndy Shevchenko 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
2392b49e0c5SAndy Shevchenko 
240d2f5a731SAndy Shevchenko 	return 1;
2412b49e0c5SAndy Shevchenko }
242c6f82787SChuah, Kim Tatt EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
2432b49e0c5SAndy Shevchenko 
hsu_dma_alloc_desc(unsigned int nents)2442b49e0c5SAndy Shevchenko static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
2452b49e0c5SAndy Shevchenko {
2462b49e0c5SAndy Shevchenko 	struct hsu_dma_desc *desc;
2472b49e0c5SAndy Shevchenko 
248ad53b26cSAndy Shevchenko 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
2492b49e0c5SAndy Shevchenko 	if (!desc)
2502b49e0c5SAndy Shevchenko 		return NULL;
2512b49e0c5SAndy Shevchenko 
252ad53b26cSAndy Shevchenko 	desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
2532b49e0c5SAndy Shevchenko 	if (!desc->sg) {
2542b49e0c5SAndy Shevchenko 		kfree(desc);
2552b49e0c5SAndy Shevchenko 		return NULL;
2562b49e0c5SAndy Shevchenko 	}
2572b49e0c5SAndy Shevchenko 
2582b49e0c5SAndy Shevchenko 	return desc;
2592b49e0c5SAndy Shevchenko }
2602b49e0c5SAndy Shevchenko 
hsu_dma_desc_free(struct virt_dma_desc * vdesc)2612b49e0c5SAndy Shevchenko static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
2622b49e0c5SAndy Shevchenko {
2632b49e0c5SAndy Shevchenko 	struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
2642b49e0c5SAndy Shevchenko 
2652b49e0c5SAndy Shevchenko 	kfree(desc->sg);
2662b49e0c5SAndy Shevchenko 	kfree(desc);
2672b49e0c5SAndy Shevchenko }
2682b49e0c5SAndy Shevchenko 
hsu_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2692b49e0c5SAndy Shevchenko static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
2702b49e0c5SAndy Shevchenko 		struct dma_chan *chan, struct scatterlist *sgl,
2712b49e0c5SAndy Shevchenko 		unsigned int sg_len, enum dma_transfer_direction direction,
2722b49e0c5SAndy Shevchenko 		unsigned long flags, void *context)
2732b49e0c5SAndy Shevchenko {
2742b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
2752b49e0c5SAndy Shevchenko 	struct hsu_dma_desc *desc;
2762b49e0c5SAndy Shevchenko 	struct scatterlist *sg;
2772b49e0c5SAndy Shevchenko 	unsigned int i;
2782b49e0c5SAndy Shevchenko 
2792b49e0c5SAndy Shevchenko 	desc = hsu_dma_alloc_desc(sg_len);
2802b49e0c5SAndy Shevchenko 	if (!desc)
2812b49e0c5SAndy Shevchenko 		return NULL;
2822b49e0c5SAndy Shevchenko 
2832b49e0c5SAndy Shevchenko 	for_each_sg(sgl, sg, sg_len, i) {
2842b49e0c5SAndy Shevchenko 		desc->sg[i].addr = sg_dma_address(sg);
2852b49e0c5SAndy Shevchenko 		desc->sg[i].len = sg_dma_len(sg);
286f0579c8cSAndy Shevchenko 
287f0579c8cSAndy Shevchenko 		desc->length += sg_dma_len(sg);
2882b49e0c5SAndy Shevchenko 	}
2892b49e0c5SAndy Shevchenko 
2902b49e0c5SAndy Shevchenko 	desc->nents = sg_len;
2912b49e0c5SAndy Shevchenko 	desc->direction = direction;
2924bb82458SAndy Shevchenko 	/* desc->active = 0 by kzalloc */
2932b49e0c5SAndy Shevchenko 	desc->status = DMA_IN_PROGRESS;
2942b49e0c5SAndy Shevchenko 
2952b49e0c5SAndy Shevchenko 	return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
2962b49e0c5SAndy Shevchenko }
2972b49e0c5SAndy Shevchenko 
hsu_dma_issue_pending(struct dma_chan * chan)2982b49e0c5SAndy Shevchenko static void hsu_dma_issue_pending(struct dma_chan *chan)
2992b49e0c5SAndy Shevchenko {
3002b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
3012b49e0c5SAndy Shevchenko 	unsigned long flags;
3022b49e0c5SAndy Shevchenko 
3032b49e0c5SAndy Shevchenko 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
3042b49e0c5SAndy Shevchenko 	if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
3052b49e0c5SAndy Shevchenko 		hsu_dma_start_transfer(hsuc);
3062b49e0c5SAndy Shevchenko 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
3072b49e0c5SAndy Shevchenko }
3082b49e0c5SAndy Shevchenko 
hsu_dma_active_desc_size(struct hsu_dma_chan * hsuc)3092b49e0c5SAndy Shevchenko static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
3102b49e0c5SAndy Shevchenko {
3112b49e0c5SAndy Shevchenko 	struct hsu_dma_desc *desc = hsuc->desc;
312a197f3c7SAndy Shevchenko 	size_t bytes = 0;
3132b49e0c5SAndy Shevchenko 	int i;
3142b49e0c5SAndy Shevchenko 
315a197f3c7SAndy Shevchenko 	for (i = desc->active; i < desc->nents; i++)
316a197f3c7SAndy Shevchenko 		bytes += desc->sg[i].len;
317a197f3c7SAndy Shevchenko 
318a197f3c7SAndy Shevchenko 	i = HSU_DMA_CHAN_NR_DESC - 1;
3192b49e0c5SAndy Shevchenko 	do {
3202b49e0c5SAndy Shevchenko 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
3212b49e0c5SAndy Shevchenko 	} while (--i >= 0);
3222b49e0c5SAndy Shevchenko 
3232b49e0c5SAndy Shevchenko 	return bytes;
3242b49e0c5SAndy Shevchenko }
3252b49e0c5SAndy Shevchenko 
hsu_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)3262b49e0c5SAndy Shevchenko static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
3272b49e0c5SAndy Shevchenko 	dma_cookie_t cookie, struct dma_tx_state *state)
3282b49e0c5SAndy Shevchenko {
3292b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
3302b49e0c5SAndy Shevchenko 	struct virt_dma_desc *vdesc;
3312b49e0c5SAndy Shevchenko 	enum dma_status status;
3322b49e0c5SAndy Shevchenko 	size_t bytes;
3332b49e0c5SAndy Shevchenko 	unsigned long flags;
3342b49e0c5SAndy Shevchenko 
3352b49e0c5SAndy Shevchenko 	status = dma_cookie_status(chan, cookie, state);
3362b49e0c5SAndy Shevchenko 	if (status == DMA_COMPLETE)
3372b49e0c5SAndy Shevchenko 		return status;
3382b49e0c5SAndy Shevchenko 
3392b49e0c5SAndy Shevchenko 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
3402b49e0c5SAndy Shevchenko 	vdesc = vchan_find_desc(&hsuc->vchan, cookie);
3412b49e0c5SAndy Shevchenko 	if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
3422b49e0c5SAndy Shevchenko 		bytes = hsu_dma_active_desc_size(hsuc);
3432b49e0c5SAndy Shevchenko 		dma_set_residue(state, bytes);
3442b49e0c5SAndy Shevchenko 		status = hsuc->desc->status;
3452b49e0c5SAndy Shevchenko 	} else if (vdesc) {
346f0579c8cSAndy Shevchenko 		bytes = to_hsu_dma_desc(vdesc)->length;
3472b49e0c5SAndy Shevchenko 		dma_set_residue(state, bytes);
3482b49e0c5SAndy Shevchenko 	}
3492b49e0c5SAndy Shevchenko 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
3502b49e0c5SAndy Shevchenko 
3512b49e0c5SAndy Shevchenko 	return status;
3522b49e0c5SAndy Shevchenko }
3532b49e0c5SAndy Shevchenko 
hsu_dma_slave_config(struct dma_chan * chan,struct dma_slave_config * config)3542b49e0c5SAndy Shevchenko static int hsu_dma_slave_config(struct dma_chan *chan,
3552b49e0c5SAndy Shevchenko 				struct dma_slave_config *config)
3562b49e0c5SAndy Shevchenko {
3572b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
3582b49e0c5SAndy Shevchenko 
3592b49e0c5SAndy Shevchenko 	memcpy(&hsuc->config, config, sizeof(hsuc->config));
3602b49e0c5SAndy Shevchenko 
3612b49e0c5SAndy Shevchenko 	return 0;
3622b49e0c5SAndy Shevchenko }
3632b49e0c5SAndy Shevchenko 
hsu_dma_pause(struct dma_chan * chan)3642b49e0c5SAndy Shevchenko static int hsu_dma_pause(struct dma_chan *chan)
3652b49e0c5SAndy Shevchenko {
3662b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
3672b49e0c5SAndy Shevchenko 	unsigned long flags;
3682b49e0c5SAndy Shevchenko 
3692b49e0c5SAndy Shevchenko 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
3702b49e0c5SAndy Shevchenko 	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
37103734485SAndy Shevchenko 		hsu_chan_disable(hsuc);
3722b49e0c5SAndy Shevchenko 		hsuc->desc->status = DMA_PAUSED;
3732b49e0c5SAndy Shevchenko 	}
3742b49e0c5SAndy Shevchenko 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
3752b49e0c5SAndy Shevchenko 
3762b49e0c5SAndy Shevchenko 	return 0;
3772b49e0c5SAndy Shevchenko }
3782b49e0c5SAndy Shevchenko 
hsu_dma_resume(struct dma_chan * chan)3792b49e0c5SAndy Shevchenko static int hsu_dma_resume(struct dma_chan *chan)
3802b49e0c5SAndy Shevchenko {
3812b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
3822b49e0c5SAndy Shevchenko 	unsigned long flags;
3832b49e0c5SAndy Shevchenko 
3842b49e0c5SAndy Shevchenko 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
3852b49e0c5SAndy Shevchenko 	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
3862b49e0c5SAndy Shevchenko 		hsuc->desc->status = DMA_IN_PROGRESS;
38703734485SAndy Shevchenko 		hsu_chan_enable(hsuc);
3882b49e0c5SAndy Shevchenko 	}
3892b49e0c5SAndy Shevchenko 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
3902b49e0c5SAndy Shevchenko 
3912b49e0c5SAndy Shevchenko 	return 0;
3922b49e0c5SAndy Shevchenko }
3932b49e0c5SAndy Shevchenko 
hsu_dma_terminate_all(struct dma_chan * chan)3942b49e0c5SAndy Shevchenko static int hsu_dma_terminate_all(struct dma_chan *chan)
3952b49e0c5SAndy Shevchenko {
3962b49e0c5SAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
3972b49e0c5SAndy Shevchenko 	unsigned long flags;
3982b49e0c5SAndy Shevchenko 	LIST_HEAD(head);
3992b49e0c5SAndy Shevchenko 
4002b49e0c5SAndy Shevchenko 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
4012b49e0c5SAndy Shevchenko 
4022b49e0c5SAndy Shevchenko 	hsu_dma_stop_channel(hsuc);
40342977082SPeter Ujfalusi 	if (hsuc->desc) {
40442977082SPeter Ujfalusi 		hsu_dma_desc_free(&hsuc->desc->vdesc);
4052b49e0c5SAndy Shevchenko 		hsuc->desc = NULL;
40642977082SPeter Ujfalusi 	}
4072b49e0c5SAndy Shevchenko 
4082b49e0c5SAndy Shevchenko 	vchan_get_all_descriptors(&hsuc->vchan, &head);
4092b49e0c5SAndy Shevchenko 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
4102b49e0c5SAndy Shevchenko 	vchan_dma_desc_free_list(&hsuc->vchan, &head);
4112b49e0c5SAndy Shevchenko 
4122b49e0c5SAndy Shevchenko 	return 0;
4132b49e0c5SAndy Shevchenko }
4142b49e0c5SAndy Shevchenko 
hsu_dma_free_chan_resources(struct dma_chan * chan)4152b49e0c5SAndy Shevchenko static void hsu_dma_free_chan_resources(struct dma_chan *chan)
4162b49e0c5SAndy Shevchenko {
4172b49e0c5SAndy Shevchenko 	vchan_free_chan_resources(to_virt_chan(chan));
4182b49e0c5SAndy Shevchenko }
4192b49e0c5SAndy Shevchenko 
hsu_dma_synchronize(struct dma_chan * chan)4202abc66cdSAndy Shevchenko static void hsu_dma_synchronize(struct dma_chan *chan)
4212abc66cdSAndy Shevchenko {
4222abc66cdSAndy Shevchenko 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
4232abc66cdSAndy Shevchenko 
4242abc66cdSAndy Shevchenko 	vchan_synchronize(&hsuc->vchan);
4252abc66cdSAndy Shevchenko }
4262abc66cdSAndy Shevchenko 
hsu_dma_probe(struct hsu_dma_chip * chip)4272b49e0c5SAndy Shevchenko int hsu_dma_probe(struct hsu_dma_chip *chip)
4282b49e0c5SAndy Shevchenko {
4292b49e0c5SAndy Shevchenko 	struct hsu_dma *hsu;
4302b49e0c5SAndy Shevchenko 	void __iomem *addr = chip->regs + chip->offset;
4312b49e0c5SAndy Shevchenko 	unsigned short i;
4322b49e0c5SAndy Shevchenko 	int ret;
4332b49e0c5SAndy Shevchenko 
4342b49e0c5SAndy Shevchenko 	hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
4352b49e0c5SAndy Shevchenko 	if (!hsu)
4362b49e0c5SAndy Shevchenko 		return -ENOMEM;
4372b49e0c5SAndy Shevchenko 
4382b49e0c5SAndy Shevchenko 	chip->hsu = hsu;
4392b49e0c5SAndy Shevchenko 
4404c97ad99SHeikki Krogerus 	/* Calculate nr_channels from the IO space length */
4414c97ad99SHeikki Krogerus 	hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
4422b49e0c5SAndy Shevchenko 
4434c97ad99SHeikki Krogerus 	hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
4442b49e0c5SAndy Shevchenko 				 sizeof(*hsu->chan), GFP_KERNEL);
4452b49e0c5SAndy Shevchenko 	if (!hsu->chan)
4462b49e0c5SAndy Shevchenko 		return -ENOMEM;
4472b49e0c5SAndy Shevchenko 
4482b49e0c5SAndy Shevchenko 	INIT_LIST_HEAD(&hsu->dma.channels);
4494c97ad99SHeikki Krogerus 	for (i = 0; i < hsu->nr_channels; i++) {
4502b49e0c5SAndy Shevchenko 		struct hsu_dma_chan *hsuc = &hsu->chan[i];
4512b49e0c5SAndy Shevchenko 
4522b49e0c5SAndy Shevchenko 		hsuc->vchan.desc_free = hsu_dma_desc_free;
4532b49e0c5SAndy Shevchenko 		vchan_init(&hsuc->vchan, &hsu->dma);
4542b49e0c5SAndy Shevchenko 
4552b49e0c5SAndy Shevchenko 		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
4562b49e0c5SAndy Shevchenko 		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
4572b49e0c5SAndy Shevchenko 	}
4582b49e0c5SAndy Shevchenko 
4592b49e0c5SAndy Shevchenko 	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
4602b49e0c5SAndy Shevchenko 	dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
4612b49e0c5SAndy Shevchenko 
4622b49e0c5SAndy Shevchenko 	hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
4632b49e0c5SAndy Shevchenko 
4642b49e0c5SAndy Shevchenko 	hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
4652b49e0c5SAndy Shevchenko 
4662b49e0c5SAndy Shevchenko 	hsu->dma.device_issue_pending = hsu_dma_issue_pending;
4672b49e0c5SAndy Shevchenko 	hsu->dma.device_tx_status = hsu_dma_tx_status;
4682b49e0c5SAndy Shevchenko 
4692b49e0c5SAndy Shevchenko 	hsu->dma.device_config = hsu_dma_slave_config;
4702b49e0c5SAndy Shevchenko 	hsu->dma.device_pause = hsu_dma_pause;
4712b49e0c5SAndy Shevchenko 	hsu->dma.device_resume = hsu_dma_resume;
4722b49e0c5SAndy Shevchenko 	hsu->dma.device_terminate_all = hsu_dma_terminate_all;
4732abc66cdSAndy Shevchenko 	hsu->dma.device_synchronize = hsu_dma_synchronize;
4742b49e0c5SAndy Shevchenko 
4752b49e0c5SAndy Shevchenko 	hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
4762b49e0c5SAndy Shevchenko 	hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
4772b49e0c5SAndy Shevchenko 	hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
4782b49e0c5SAndy Shevchenko 	hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
4792b49e0c5SAndy Shevchenko 
4802b49e0c5SAndy Shevchenko 	hsu->dma.dev = chip->dev;
4812b49e0c5SAndy Shevchenko 
48217b3cf42SAndy Shevchenko 	dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
48317b3cf42SAndy Shevchenko 
4842b49e0c5SAndy Shevchenko 	ret = dma_async_device_register(&hsu->dma);
4852b49e0c5SAndy Shevchenko 	if (ret)
4862b49e0c5SAndy Shevchenko 		return ret;
4872b49e0c5SAndy Shevchenko 
4884c97ad99SHeikki Krogerus 	dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
4892b49e0c5SAndy Shevchenko 	return 0;
4902b49e0c5SAndy Shevchenko }
4912b49e0c5SAndy Shevchenko EXPORT_SYMBOL_GPL(hsu_dma_probe);
4922b49e0c5SAndy Shevchenko 
hsu_dma_remove(struct hsu_dma_chip * chip)4932b49e0c5SAndy Shevchenko int hsu_dma_remove(struct hsu_dma_chip *chip)
4942b49e0c5SAndy Shevchenko {
4952b49e0c5SAndy Shevchenko 	struct hsu_dma *hsu = chip->hsu;
4962b49e0c5SAndy Shevchenko 	unsigned short i;
4972b49e0c5SAndy Shevchenko 
4982b49e0c5SAndy Shevchenko 	dma_async_device_unregister(&hsu->dma);
4992b49e0c5SAndy Shevchenko 
5004c97ad99SHeikki Krogerus 	for (i = 0; i < hsu->nr_channels; i++) {
5012b49e0c5SAndy Shevchenko 		struct hsu_dma_chan *hsuc = &hsu->chan[i];
5022b49e0c5SAndy Shevchenko 
5032b49e0c5SAndy Shevchenko 		tasklet_kill(&hsuc->vchan.task);
5042b49e0c5SAndy Shevchenko 	}
5052b49e0c5SAndy Shevchenko 
5062b49e0c5SAndy Shevchenko 	return 0;
5072b49e0c5SAndy Shevchenko }
5082b49e0c5SAndy Shevchenko EXPORT_SYMBOL_GPL(hsu_dma_remove);
5092b49e0c5SAndy Shevchenko 
5102b49e0c5SAndy Shevchenko MODULE_LICENSE("GPL v2");
5112b49e0c5SAndy Shevchenko MODULE_DESCRIPTION("High Speed UART DMA core driver");
5122b49e0c5SAndy Shevchenko MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
513