xref: /openbmc/linux/drivers/net/wan/fsl_ucc_hdlc.c (revision f291209eca5eba0b4704fa0832af57b12dbc1a02)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c19b6d24SZhao Qiang /* Freescale QUICC Engine HDLC Device Driver
3c19b6d24SZhao Qiang  *
4c19b6d24SZhao Qiang  * Copyright 2016 Freescale Semiconductor Inc.
5c19b6d24SZhao Qiang  */
6c19b6d24SZhao Qiang 
7c19b6d24SZhao Qiang #include <linux/delay.h>
8c19b6d24SZhao Qiang #include <linux/dma-mapping.h>
9c19b6d24SZhao Qiang #include <linux/hdlc.h>
10c19b6d24SZhao Qiang #include <linux/init.h>
11c19b6d24SZhao Qiang #include <linux/interrupt.h>
12c19b6d24SZhao Qiang #include <linux/io.h>
13c19b6d24SZhao Qiang #include <linux/irq.h>
14c19b6d24SZhao Qiang #include <linux/kernel.h>
15c19b6d24SZhao Qiang #include <linux/module.h>
16c19b6d24SZhao Qiang #include <linux/netdevice.h>
17c19b6d24SZhao Qiang #include <linux/of_address.h>
18c19b6d24SZhao Qiang #include <linux/of_irq.h>
19c19b6d24SZhao Qiang #include <linux/of_platform.h>
20c19b6d24SZhao Qiang #include <linux/platform_device.h>
21c19b6d24SZhao Qiang #include <linux/sched.h>
22c19b6d24SZhao Qiang #include <linux/skbuff.h>
23c19b6d24SZhao Qiang #include <linux/slab.h>
24c19b6d24SZhao Qiang #include <linux/spinlock.h>
25c19b6d24SZhao Qiang #include <linux/stddef.h>
26c19b6d24SZhao Qiang #include <soc/fsl/qe/qe_tdm.h>
27c19b6d24SZhao Qiang #include <uapi/linux/if_arp.h>
28c19b6d24SZhao Qiang 
29c19b6d24SZhao Qiang #include "fsl_ucc_hdlc.h"
30c19b6d24SZhao Qiang 
31c19b6d24SZhao Qiang #define DRV_DESC "Freescale QE UCC HDLC Driver"
32c19b6d24SZhao Qiang #define DRV_NAME "ucc_hdlc"
33c19b6d24SZhao Qiang 
34c19b6d24SZhao Qiang #define TDM_PPPOHT_SLIC_MAXIN
35ba59d570SMathias Thore #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
36c19b6d24SZhao Qiang 
37*a59addacSAlexandra Diupina static int uhdlc_close(struct net_device *dev);
38*a59addacSAlexandra Diupina 
39c19b6d24SZhao Qiang static struct ucc_tdm_info utdm_primary_info = {
40c19b6d24SZhao Qiang 	.uf_info = {
41c19b6d24SZhao Qiang 		.tsa = 0,
42c19b6d24SZhao Qiang 		.cdp = 0,
43c19b6d24SZhao Qiang 		.cds = 1,
44c19b6d24SZhao Qiang 		.ctsp = 1,
45c19b6d24SZhao Qiang 		.ctss = 1,
46c19b6d24SZhao Qiang 		.revd = 0,
47c19b6d24SZhao Qiang 		.urfs = 256,
48c19b6d24SZhao Qiang 		.utfs = 256,
49c19b6d24SZhao Qiang 		.urfet = 128,
50c19b6d24SZhao Qiang 		.urfset = 192,
51c19b6d24SZhao Qiang 		.utfet = 128,
52c19b6d24SZhao Qiang 		.utftt = 0x40,
53c19b6d24SZhao Qiang 		.ufpt = 256,
54c19b6d24SZhao Qiang 		.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
55c19b6d24SZhao Qiang 		.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
56c19b6d24SZhao Qiang 		.tenc = UCC_FAST_TX_ENCODING_NRZ,
57c19b6d24SZhao Qiang 		.renc = UCC_FAST_RX_ENCODING_NRZ,
58c19b6d24SZhao Qiang 		.tcrc = UCC_FAST_16_BIT_CRC,
59c19b6d24SZhao Qiang 		.synl = UCC_FAST_SYNC_LEN_NOT_USED,
60c19b6d24SZhao Qiang 	},
61c19b6d24SZhao Qiang 
62c19b6d24SZhao Qiang 	.si_info = {
63c19b6d24SZhao Qiang #ifdef TDM_PPPOHT_SLIC_MAXIN
64c19b6d24SZhao Qiang 		.simr_rfsd = 1,
65c19b6d24SZhao Qiang 		.simr_tfsd = 2,
66c19b6d24SZhao Qiang #else
67c19b6d24SZhao Qiang 		.simr_rfsd = 0,
68c19b6d24SZhao Qiang 		.simr_tfsd = 0,
69c19b6d24SZhao Qiang #endif
70c19b6d24SZhao Qiang 		.simr_crt = 0,
71c19b6d24SZhao Qiang 		.simr_sl = 0,
72c19b6d24SZhao Qiang 		.simr_ce = 1,
73c19b6d24SZhao Qiang 		.simr_fe = 1,
74c19b6d24SZhao Qiang 		.simr_gm = 0,
75c19b6d24SZhao Qiang 	},
76c19b6d24SZhao Qiang };
77c19b6d24SZhao Qiang 
78ddf42039SColin Ian King static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
79c19b6d24SZhao Qiang 
uhdlc_init(struct ucc_hdlc_private * priv)80c19b6d24SZhao Qiang static int uhdlc_init(struct ucc_hdlc_private *priv)
81c19b6d24SZhao Qiang {
82c19b6d24SZhao Qiang 	struct ucc_tdm_info *ut_info;
83c19b6d24SZhao Qiang 	struct ucc_fast_info *uf_info;
84c19b6d24SZhao Qiang 	u32 cecr_subblock;
85c19b6d24SZhao Qiang 	u16 bd_status;
86c19b6d24SZhao Qiang 	int ret, i;
87c19b6d24SZhao Qiang 	void *bd_buffer;
88c19b6d24SZhao Qiang 	dma_addr_t bd_dma_addr;
89be2e9415SRasmus Villemoes 	s32 riptr;
90be2e9415SRasmus Villemoes 	s32 tiptr;
91c19b6d24SZhao Qiang 	u32 gumr;
92c19b6d24SZhao Qiang 
93c19b6d24SZhao Qiang 	ut_info = priv->ut_info;
94c19b6d24SZhao Qiang 	uf_info = &ut_info->uf_info;
95c19b6d24SZhao Qiang 
96c19b6d24SZhao Qiang 	if (priv->tsa) {
97c19b6d24SZhao Qiang 		uf_info->tsa = 1;
98c19b6d24SZhao Qiang 		uf_info->ctsp = 1;
99040b7c94SDavid Gounaris 		uf_info->cds = 1;
100040b7c94SDavid Gounaris 		uf_info->ctss = 1;
101040b7c94SDavid Gounaris 	} else {
102040b7c94SDavid Gounaris 		uf_info->cds = 0;
103040b7c94SDavid Gounaris 		uf_info->ctsp = 0;
104040b7c94SDavid Gounaris 		uf_info->ctss = 0;
105c19b6d24SZhao Qiang 	}
106067bb938SHolger Brunck 
107067bb938SHolger Brunck 	/* This sets HPM register in CMXUCR register which configures a
108067bb938SHolger Brunck 	 * open drain connected HDLC bus
109067bb938SHolger Brunck 	 */
110067bb938SHolger Brunck 	if (priv->hdlc_bus)
111067bb938SHolger Brunck 		uf_info->brkpt_support = 1;
112067bb938SHolger Brunck 
113c19b6d24SZhao Qiang 	uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
114c19b6d24SZhao Qiang 				UCC_HDLC_UCCE_TXB) << 16);
115c19b6d24SZhao Qiang 
116c19b6d24SZhao Qiang 	ret = ucc_fast_init(uf_info, &priv->uccf);
117c19b6d24SZhao Qiang 	if (ret) {
118c19b6d24SZhao Qiang 		dev_err(priv->dev, "Failed to init uccf.");
119c19b6d24SZhao Qiang 		return ret;
120c19b6d24SZhao Qiang 	}
121c19b6d24SZhao Qiang 
122c19b6d24SZhao Qiang 	priv->uf_regs = priv->uccf->uf_regs;
123c19b6d24SZhao Qiang 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
124c19b6d24SZhao Qiang 
125c19b6d24SZhao Qiang 	/* Loopback mode */
126c19b6d24SZhao Qiang 	if (priv->loopback) {
127c19b6d24SZhao Qiang 		dev_info(priv->dev, "Loopback Mode\n");
12854e9e087SHolger Brunck 		/* use the same clock when work in loopback */
12954e9e087SHolger Brunck 		qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
13054e9e087SHolger Brunck 
131c19b6d24SZhao Qiang 		gumr = ioread32be(&priv->uf_regs->gumr);
132c19b6d24SZhao Qiang 		gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
133c19b6d24SZhao Qiang 			 UCC_FAST_GUMR_TCI);
134c19b6d24SZhao Qiang 		gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
135c19b6d24SZhao Qiang 		iowrite32be(gumr, &priv->uf_regs->gumr);
136c19b6d24SZhao Qiang 	}
137c19b6d24SZhao Qiang 
138c19b6d24SZhao Qiang 	/* Initialize SI */
139c19b6d24SZhao Qiang 	if (priv->tsa)
140c19b6d24SZhao Qiang 		ucc_tdm_init(priv->utdm, priv->ut_info);
141c19b6d24SZhao Qiang 
142c19b6d24SZhao Qiang 	/* Write to QE CECR, UCCx channel to Stop Transmission */
143c19b6d24SZhao Qiang 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
144c19b6d24SZhao Qiang 	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
145c19b6d24SZhao Qiang 			   QE_CR_PROTOCOL_UNSPECIFIED, 0);
146c19b6d24SZhao Qiang 
147c19b6d24SZhao Qiang 	/* Set UPSMR normal mode (need fixed)*/
148c19b6d24SZhao Qiang 	iowrite32be(0, &priv->uf_regs->upsmr);
149c19b6d24SZhao Qiang 
150067bb938SHolger Brunck 	/* hdlc_bus mode */
151067bb938SHolger Brunck 	if (priv->hdlc_bus) {
152067bb938SHolger Brunck 		u32 upsmr;
153067bb938SHolger Brunck 
154067bb938SHolger Brunck 		dev_info(priv->dev, "HDLC bus Mode\n");
155067bb938SHolger Brunck 		upsmr = ioread32be(&priv->uf_regs->upsmr);
156067bb938SHolger Brunck 
157067bb938SHolger Brunck 		/* bus mode and retransmit enable, with collision window
158067bb938SHolger Brunck 		 * set to 8 bytes
159067bb938SHolger Brunck 		 */
160067bb938SHolger Brunck 		upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
161067bb938SHolger Brunck 				UCC_HDLC_UPSMR_CW8;
162067bb938SHolger Brunck 		iowrite32be(upsmr, &priv->uf_regs->upsmr);
163067bb938SHolger Brunck 
164067bb938SHolger Brunck 		/* explicitly disable CDS & CTSP */
165067bb938SHolger Brunck 		gumr = ioread32be(&priv->uf_regs->gumr);
166067bb938SHolger Brunck 		gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
167067bb938SHolger Brunck 		/* set automatic sync to explicitly ignore CD signal */
168067bb938SHolger Brunck 		gumr |= UCC_FAST_GUMR_SYNL_AUTO;
169067bb938SHolger Brunck 		iowrite32be(gumr, &priv->uf_regs->gumr);
170067bb938SHolger Brunck 	}
171067bb938SHolger Brunck 
172c19b6d24SZhao Qiang 	priv->rx_ring_size = RX_BD_RING_LEN;
173c19b6d24SZhao Qiang 	priv->tx_ring_size = TX_BD_RING_LEN;
174c19b6d24SZhao Qiang 	/* Alloc Rx BD */
175c19b6d24SZhao Qiang 	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
1765b8aad93SHolger Brunck 			RX_BD_RING_LEN * sizeof(struct qe_bd),
177c19b6d24SZhao Qiang 			&priv->dma_rx_bd, GFP_KERNEL);
178c19b6d24SZhao Qiang 
179c19b6d24SZhao Qiang 	if (!priv->rx_bd_base) {
180c19b6d24SZhao Qiang 		dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
181c19b6d24SZhao Qiang 		ret = -ENOMEM;
1821efb597dSZhao Qiang 		goto free_uccf;
183c19b6d24SZhao Qiang 	}
184c19b6d24SZhao Qiang 
185c19b6d24SZhao Qiang 	/* Alloc Tx BD */
186c19b6d24SZhao Qiang 	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
1875b8aad93SHolger Brunck 			TX_BD_RING_LEN * sizeof(struct qe_bd),
188c19b6d24SZhao Qiang 			&priv->dma_tx_bd, GFP_KERNEL);
189c19b6d24SZhao Qiang 
190c19b6d24SZhao Qiang 	if (!priv->tx_bd_base) {
191c19b6d24SZhao Qiang 		dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
192c19b6d24SZhao Qiang 		ret = -ENOMEM;
1931efb597dSZhao Qiang 		goto free_rx_bd;
194c19b6d24SZhao Qiang 	}
195c19b6d24SZhao Qiang 
196c19b6d24SZhao Qiang 	/* Alloc parameter ram for ucc hdlc */
19785deed56SHolger Brunck 	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
198c19b6d24SZhao Qiang 				ALIGNMENT_OF_UCC_HDLC_PRAM);
199c19b6d24SZhao Qiang 
200be2e9415SRasmus Villemoes 	if (priv->ucc_pram_offset < 0) {
20124a24d07SColin Ian King 		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
202c19b6d24SZhao Qiang 		ret = -ENOMEM;
2031efb597dSZhao Qiang 		goto free_tx_bd;
204c19b6d24SZhao Qiang 	}
205c19b6d24SZhao Qiang 
2066396bb22SKees Cook 	priv->rx_skbuff = kcalloc(priv->rx_ring_size,
2076396bb22SKees Cook 				  sizeof(*priv->rx_skbuff),
208c19b6d24SZhao Qiang 				  GFP_KERNEL);
20962765d39SJia-Ju Bai 	if (!priv->rx_skbuff) {
21062765d39SJia-Ju Bai 		ret = -ENOMEM;
2111efb597dSZhao Qiang 		goto free_ucc_pram;
21262765d39SJia-Ju Bai 	}
213c19b6d24SZhao Qiang 
2146396bb22SKees Cook 	priv->tx_skbuff = kcalloc(priv->tx_ring_size,
2156396bb22SKees Cook 				  sizeof(*priv->tx_skbuff),
216c19b6d24SZhao Qiang 				  GFP_KERNEL);
21762765d39SJia-Ju Bai 	if (!priv->tx_skbuff) {
21862765d39SJia-Ju Bai 		ret = -ENOMEM;
2191efb597dSZhao Qiang 		goto free_rx_skbuff;
22062765d39SJia-Ju Bai 	}
221c19b6d24SZhao Qiang 
222c19b6d24SZhao Qiang 	priv->skb_curtx = 0;
223c19b6d24SZhao Qiang 	priv->skb_dirtytx = 0;
224c19b6d24SZhao Qiang 	priv->curtx_bd = priv->tx_bd_base;
225c19b6d24SZhao Qiang 	priv->dirty_tx = priv->tx_bd_base;
226c19b6d24SZhao Qiang 	priv->currx_bd = priv->rx_bd_base;
227c19b6d24SZhao Qiang 	priv->currx_bdnum = 0;
228c19b6d24SZhao Qiang 
229c19b6d24SZhao Qiang 	/* init parameter base */
230c19b6d24SZhao Qiang 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
231c19b6d24SZhao Qiang 	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
232c19b6d24SZhao Qiang 			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
233c19b6d24SZhao Qiang 
234c19b6d24SZhao Qiang 	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
235c19b6d24SZhao Qiang 					qe_muram_addr(priv->ucc_pram_offset);
236c19b6d24SZhao Qiang 
237c19b6d24SZhao Qiang 	/* Zero out parameter ram */
238c19b6d24SZhao Qiang 	memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
239c19b6d24SZhao Qiang 
240c19b6d24SZhao Qiang 	/* Alloc riptr, tiptr */
241c19b6d24SZhao Qiang 	riptr = qe_muram_alloc(32, 32);
242be2e9415SRasmus Villemoes 	if (riptr < 0) {
243c19b6d24SZhao Qiang 		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
244c19b6d24SZhao Qiang 		ret = -ENOMEM;
2451efb597dSZhao Qiang 		goto free_tx_skbuff;
246c19b6d24SZhao Qiang 	}
247c19b6d24SZhao Qiang 
248c19b6d24SZhao Qiang 	tiptr = qe_muram_alloc(32, 32);
249be2e9415SRasmus Villemoes 	if (tiptr < 0) {
250c19b6d24SZhao Qiang 		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
251c19b6d24SZhao Qiang 		ret = -ENOMEM;
2521efb597dSZhao Qiang 		goto free_riptr;
253c19b6d24SZhao Qiang 	}
254148587a5SRasmus Villemoes 	if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
255148587a5SRasmus Villemoes 		dev_err(priv->dev, "MURAM allocation out of addressable range\n");
256148587a5SRasmus Villemoes 		ret = -ENOMEM;
257148587a5SRasmus Villemoes 		goto free_tiptr;
258148587a5SRasmus Villemoes 	}
259c19b6d24SZhao Qiang 
260c19b6d24SZhao Qiang 	/* Set RIPTR, TIPTR */
261c19b6d24SZhao Qiang 	iowrite16be(riptr, &priv->ucc_pram->riptr);
262c19b6d24SZhao Qiang 	iowrite16be(tiptr, &priv->ucc_pram->tiptr);
263c19b6d24SZhao Qiang 
264c19b6d24SZhao Qiang 	/* Set MRBLR */
265c19b6d24SZhao Qiang 	iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
266c19b6d24SZhao Qiang 
267c19b6d24SZhao Qiang 	/* Set RBASE, TBASE */
268c19b6d24SZhao Qiang 	iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
269c19b6d24SZhao Qiang 	iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
270c19b6d24SZhao Qiang 
271c19b6d24SZhao Qiang 	/* Set RSTATE, TSTATE */
272c19b6d24SZhao Qiang 	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
273c19b6d24SZhao Qiang 	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
274c19b6d24SZhao Qiang 
275c19b6d24SZhao Qiang 	/* Set C_MASK, C_PRES for 16bit CRC */
276c19b6d24SZhao Qiang 	iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
277c19b6d24SZhao Qiang 	iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
278c19b6d24SZhao Qiang 
279c19b6d24SZhao Qiang 	iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
280c19b6d24SZhao Qiang 	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
281c19b6d24SZhao Qiang 	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
282045f77baSDavid Gounaris 	iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
283c19b6d24SZhao Qiang 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
284c19b6d24SZhao Qiang 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
285c19b6d24SZhao Qiang 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
286c19b6d24SZhao Qiang 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
287c19b6d24SZhao Qiang 
288c19b6d24SZhao Qiang 	/* Get BD buffer */
289750afb08SLuis Chamberlain 	bd_buffer = dma_alloc_coherent(priv->dev,
290750afb08SLuis Chamberlain 				       (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
291c19b6d24SZhao Qiang 				       &bd_dma_addr, GFP_KERNEL);
292c19b6d24SZhao Qiang 
293c19b6d24SZhao Qiang 	if (!bd_buffer) {
294c19b6d24SZhao Qiang 		dev_err(priv->dev, "Could not allocate buffer descriptors\n");
295c19b6d24SZhao Qiang 		ret = -ENOMEM;
2961efb597dSZhao Qiang 		goto free_tiptr;
297c19b6d24SZhao Qiang 	}
298c19b6d24SZhao Qiang 
299c19b6d24SZhao Qiang 	priv->rx_buffer = bd_buffer;
300c19b6d24SZhao Qiang 	priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
301c19b6d24SZhao Qiang 
302c19b6d24SZhao Qiang 	priv->dma_rx_addr = bd_dma_addr;
303c19b6d24SZhao Qiang 	priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
304c19b6d24SZhao Qiang 
305c19b6d24SZhao Qiang 	for (i = 0; i < RX_BD_RING_LEN; i++) {
306c19b6d24SZhao Qiang 		if (i < (RX_BD_RING_LEN - 1))
307c19b6d24SZhao Qiang 			bd_status = R_E_S | R_I_S;
308c19b6d24SZhao Qiang 		else
309c19b6d24SZhao Qiang 			bd_status = R_E_S | R_I_S | R_W_S;
310c19b6d24SZhao Qiang 
3115cf46d8eSChristophe Leroy 		priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
3125cf46d8eSChristophe Leroy 		priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
313c19b6d24SZhao Qiang 	}
314c19b6d24SZhao Qiang 
315c19b6d24SZhao Qiang 	for (i = 0; i < TX_BD_RING_LEN; i++) {
316c19b6d24SZhao Qiang 		if (i < (TX_BD_RING_LEN - 1))
317c19b6d24SZhao Qiang 			bd_status =  T_I_S | T_TC_S;
318c19b6d24SZhao Qiang 		else
319c19b6d24SZhao Qiang 			bd_status =  T_I_S | T_TC_S | T_W_S;
320c19b6d24SZhao Qiang 
3215cf46d8eSChristophe Leroy 		priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
3225cf46d8eSChristophe Leroy 		priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
323c19b6d24SZhao Qiang 	}
3245cf46d8eSChristophe Leroy 	dma_wmb();
325c19b6d24SZhao Qiang 
326c19b6d24SZhao Qiang 	return 0;
327c19b6d24SZhao Qiang 
3281efb597dSZhao Qiang free_tiptr:
329c19b6d24SZhao Qiang 	qe_muram_free(tiptr);
3301efb597dSZhao Qiang free_riptr:
331c19b6d24SZhao Qiang 	qe_muram_free(riptr);
3321efb597dSZhao Qiang free_tx_skbuff:
333c19b6d24SZhao Qiang 	kfree(priv->tx_skbuff);
3341efb597dSZhao Qiang free_rx_skbuff:
335c19b6d24SZhao Qiang 	kfree(priv->rx_skbuff);
3361efb597dSZhao Qiang free_ucc_pram:
337c19b6d24SZhao Qiang 	qe_muram_free(priv->ucc_pram_offset);
3381efb597dSZhao Qiang free_tx_bd:
339c19b6d24SZhao Qiang 	dma_free_coherent(priv->dev,
3405b8aad93SHolger Brunck 			  TX_BD_RING_LEN * sizeof(struct qe_bd),
341c19b6d24SZhao Qiang 			  priv->tx_bd_base, priv->dma_tx_bd);
3421efb597dSZhao Qiang free_rx_bd:
343c19b6d24SZhao Qiang 	dma_free_coherent(priv->dev,
3445b8aad93SHolger Brunck 			  RX_BD_RING_LEN * sizeof(struct qe_bd),
345c19b6d24SZhao Qiang 			  priv->rx_bd_base, priv->dma_rx_bd);
3461efb597dSZhao Qiang free_uccf:
347c19b6d24SZhao Qiang 	ucc_fast_free(priv->uccf);
348c19b6d24SZhao Qiang 
349c19b6d24SZhao Qiang 	return ret;
350c19b6d24SZhao Qiang }
351c19b6d24SZhao Qiang 
ucc_hdlc_tx(struct sk_buff * skb,struct net_device * dev)352c19b6d24SZhao Qiang static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
353c19b6d24SZhao Qiang {
354c19b6d24SZhao Qiang 	hdlc_device *hdlc = dev_to_hdlc(dev);
355c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
3565cf46d8eSChristophe Leroy 	struct qe_bd *bd;
357c19b6d24SZhao Qiang 	u16 bd_status;
358c19b6d24SZhao Qiang 	unsigned long flags;
3595cf46d8eSChristophe Leroy 	__be16 *proto_head;
360c19b6d24SZhao Qiang 
361c19b6d24SZhao Qiang 	switch (dev->type) {
362c19b6d24SZhao Qiang 	case ARPHRD_RAWHDLC:
363c19b6d24SZhao Qiang 		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
364c19b6d24SZhao Qiang 			dev->stats.tx_dropped++;
365c19b6d24SZhao Qiang 			dev_kfree_skb(skb);
366c19b6d24SZhao Qiang 			netdev_err(dev, "No enough space for hdlc head\n");
367c19b6d24SZhao Qiang 			return -ENOMEM;
368c19b6d24SZhao Qiang 		}
369c19b6d24SZhao Qiang 
370c19b6d24SZhao Qiang 		skb_push(skb, HDLC_HEAD_LEN);
371c19b6d24SZhao Qiang 
3725cf46d8eSChristophe Leroy 		proto_head = (__be16 *)skb->data;
373c19b6d24SZhao Qiang 		*proto_head = htons(DEFAULT_HDLC_HEAD);
374c19b6d24SZhao Qiang 
375c19b6d24SZhao Qiang 		dev->stats.tx_bytes += skb->len;
376c19b6d24SZhao Qiang 		break;
377c19b6d24SZhao Qiang 
378c19b6d24SZhao Qiang 	case ARPHRD_PPP:
3795cf46d8eSChristophe Leroy 		proto_head = (__be16 *)skb->data;
380c19b6d24SZhao Qiang 		if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
381c19b6d24SZhao Qiang 			dev->stats.tx_dropped++;
382c19b6d24SZhao Qiang 			dev_kfree_skb(skb);
383c19b6d24SZhao Qiang 			netdev_err(dev, "Wrong ppp header\n");
384c19b6d24SZhao Qiang 			return -ENOMEM;
385c19b6d24SZhao Qiang 		}
386c19b6d24SZhao Qiang 
387c19b6d24SZhao Qiang 		dev->stats.tx_bytes += skb->len;
388c19b6d24SZhao Qiang 		break;
389c19b6d24SZhao Qiang 
3908978ca7cSDavid Gounaris 	case ARPHRD_ETHER:
3918978ca7cSDavid Gounaris 		dev->stats.tx_bytes += skb->len;
3928978ca7cSDavid Gounaris 		break;
3938978ca7cSDavid Gounaris 
394c19b6d24SZhao Qiang 	default:
395c19b6d24SZhao Qiang 		dev->stats.tx_dropped++;
396c19b6d24SZhao Qiang 		dev_kfree_skb(skb);
397c19b6d24SZhao Qiang 		return -ENOMEM;
398c19b6d24SZhao Qiang 	}
3992e7ad56aSMathias Thore 	netdev_sent_queue(dev, skb->len);
400c19b6d24SZhao Qiang 	spin_lock_irqsave(&priv->lock, flags);
401c19b6d24SZhao Qiang 
4025cf46d8eSChristophe Leroy 	dma_rmb();
403c19b6d24SZhao Qiang 	/* Start from the next BD that should be filled */
404c19b6d24SZhao Qiang 	bd = priv->curtx_bd;
4055cf46d8eSChristophe Leroy 	bd_status = be16_to_cpu(bd->status);
406c19b6d24SZhao Qiang 	/* Save the skb pointer so we can free it later */
407c19b6d24SZhao Qiang 	priv->tx_skbuff[priv->skb_curtx] = skb;
408c19b6d24SZhao Qiang 
409c19b6d24SZhao Qiang 	/* Update the current skb pointer (wrapping if this was the last) */
410c19b6d24SZhao Qiang 	priv->skb_curtx =
411c19b6d24SZhao Qiang 	    (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
412c19b6d24SZhao Qiang 
413c19b6d24SZhao Qiang 	/* copy skb data to tx buffer for sdma processing */
414c19b6d24SZhao Qiang 	memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
415c19b6d24SZhao Qiang 	       skb->data, skb->len);
416c19b6d24SZhao Qiang 
417c19b6d24SZhao Qiang 	/* set bd status and length */
418c19b6d24SZhao Qiang 	bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
419c19b6d24SZhao Qiang 
4205cf46d8eSChristophe Leroy 	bd->length = cpu_to_be16(skb->len);
4215cf46d8eSChristophe Leroy 	bd->status = cpu_to_be16(bd_status);
422c19b6d24SZhao Qiang 
423c19b6d24SZhao Qiang 	/* Move to next BD in the ring */
424c19b6d24SZhao Qiang 	if (!(bd_status & T_W_S))
425c19b6d24SZhao Qiang 		bd += 1;
426c19b6d24SZhao Qiang 	else
427c19b6d24SZhao Qiang 		bd = priv->tx_bd_base;
428c19b6d24SZhao Qiang 
429c19b6d24SZhao Qiang 	if (bd == priv->dirty_tx) {
430c19b6d24SZhao Qiang 		if (!netif_queue_stopped(dev))
431c19b6d24SZhao Qiang 			netif_stop_queue(dev);
432c19b6d24SZhao Qiang 	}
433c19b6d24SZhao Qiang 
434c19b6d24SZhao Qiang 	priv->curtx_bd = bd;
435c19b6d24SZhao Qiang 
436c19b6d24SZhao Qiang 	spin_unlock_irqrestore(&priv->lock, flags);
437c19b6d24SZhao Qiang 
438c19b6d24SZhao Qiang 	return NETDEV_TX_OK;
439c19b6d24SZhao Qiang }
440c19b6d24SZhao Qiang 
hdlc_tx_restart(struct ucc_hdlc_private * priv)441ba59d570SMathias Thore static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
442ba59d570SMathias Thore {
443ba59d570SMathias Thore 	u32 cecr_subblock;
444ba59d570SMathias Thore 
445ba59d570SMathias Thore 	cecr_subblock =
446ba59d570SMathias Thore 		ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
447ba59d570SMathias Thore 
448ba59d570SMathias Thore 	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
449ba59d570SMathias Thore 		     QE_CR_PROTOCOL_UNSPECIFIED, 0);
450ba59d570SMathias Thore 	return 0;
451ba59d570SMathias Thore }
452ba59d570SMathias Thore 
hdlc_tx_done(struct ucc_hdlc_private * priv)453c19b6d24SZhao Qiang static int hdlc_tx_done(struct ucc_hdlc_private *priv)
454c19b6d24SZhao Qiang {
455c19b6d24SZhao Qiang 	/* Start from the next BD that should be filled */
456c19b6d24SZhao Qiang 	struct net_device *dev = priv->ndev;
4572e7ad56aSMathias Thore 	unsigned int bytes_sent = 0;
4582e7ad56aSMathias Thore 	int howmany = 0;
459c19b6d24SZhao Qiang 	struct qe_bd *bd;		/* BD pointer */
460c19b6d24SZhao Qiang 	u16 bd_status;
461ba59d570SMathias Thore 	int tx_restart = 0;
462c19b6d24SZhao Qiang 
4635cf46d8eSChristophe Leroy 	dma_rmb();
464c19b6d24SZhao Qiang 	bd = priv->dirty_tx;
4655cf46d8eSChristophe Leroy 	bd_status = be16_to_cpu(bd->status);
466c19b6d24SZhao Qiang 
467c19b6d24SZhao Qiang 	/* Normal processing. */
468c19b6d24SZhao Qiang 	while ((bd_status & T_R_S) == 0) {
469c19b6d24SZhao Qiang 		struct sk_buff *skb;
470c19b6d24SZhao Qiang 
471ba59d570SMathias Thore 		if (bd_status & T_UN_S) { /* Underrun */
472ba59d570SMathias Thore 			dev->stats.tx_fifo_errors++;
473ba59d570SMathias Thore 			tx_restart = 1;
474ba59d570SMathias Thore 		}
475ba59d570SMathias Thore 		if (bd_status & T_CT_S) { /* Carrier lost */
476ba59d570SMathias Thore 			dev->stats.tx_carrier_errors++;
477ba59d570SMathias Thore 			tx_restart = 1;
478ba59d570SMathias Thore 		}
479ba59d570SMathias Thore 
480c19b6d24SZhao Qiang 		/* BD contains already transmitted buffer.   */
481c19b6d24SZhao Qiang 		/* Handle the transmitted buffer and release */
482c19b6d24SZhao Qiang 		/* the BD to be used with the current frame  */
483c19b6d24SZhao Qiang 
484c19b6d24SZhao Qiang 		skb = priv->tx_skbuff[priv->skb_dirtytx];
485c19b6d24SZhao Qiang 		if (!skb)
486c19b6d24SZhao Qiang 			break;
4872e7ad56aSMathias Thore 		howmany++;
4882e7ad56aSMathias Thore 		bytes_sent += skb->len;
489c19b6d24SZhao Qiang 		dev->stats.tx_packets++;
490c19b6d24SZhao Qiang 		memset(priv->tx_buffer +
491c19b6d24SZhao Qiang 		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
492c19b6d24SZhao Qiang 		       0, skb->len);
4937c3850adSYang Wei 		dev_consume_skb_irq(skb);
494c19b6d24SZhao Qiang 
495c19b6d24SZhao Qiang 		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
496c19b6d24SZhao Qiang 		priv->skb_dirtytx =
497c19b6d24SZhao Qiang 		    (priv->skb_dirtytx +
498c19b6d24SZhao Qiang 		     1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
499c19b6d24SZhao Qiang 
500c19b6d24SZhao Qiang 		/* We freed a buffer, so now we can restart transmission */
501c19b6d24SZhao Qiang 		if (netif_queue_stopped(dev))
502c19b6d24SZhao Qiang 			netif_wake_queue(dev);
503c19b6d24SZhao Qiang 
504c19b6d24SZhao Qiang 		/* Advance the confirmation BD pointer */
505c19b6d24SZhao Qiang 		if (!(bd_status & T_W_S))
506c19b6d24SZhao Qiang 			bd += 1;
507c19b6d24SZhao Qiang 		else
508c19b6d24SZhao Qiang 			bd = priv->tx_bd_base;
5095cf46d8eSChristophe Leroy 		bd_status = be16_to_cpu(bd->status);
510c19b6d24SZhao Qiang 	}
511c19b6d24SZhao Qiang 	priv->dirty_tx = bd;
512c19b6d24SZhao Qiang 
513ba59d570SMathias Thore 	if (tx_restart)
514ba59d570SMathias Thore 		hdlc_tx_restart(priv);
515ba59d570SMathias Thore 
5162e7ad56aSMathias Thore 	netdev_completed_queue(dev, howmany, bytes_sent);
517c19b6d24SZhao Qiang 	return 0;
518c19b6d24SZhao Qiang }
519c19b6d24SZhao Qiang 
hdlc_rx_done(struct ucc_hdlc_private * priv,int rx_work_limit)520c19b6d24SZhao Qiang static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
521c19b6d24SZhao Qiang {
522c19b6d24SZhao Qiang 	struct net_device *dev = priv->ndev;
52366bb144bSHolger Brunck 	struct sk_buff *skb = NULL;
524c19b6d24SZhao Qiang 	hdlc_device *hdlc = dev_to_hdlc(dev);
525c19b6d24SZhao Qiang 	struct qe_bd *bd;
52602bb56ddSZhao Qiang 	u16 bd_status;
527c19b6d24SZhao Qiang 	u16 length, howmany = 0;
528c19b6d24SZhao Qiang 	u8 *bdbuffer;
529c19b6d24SZhao Qiang 
5305cf46d8eSChristophe Leroy 	dma_rmb();
531c19b6d24SZhao Qiang 	bd = priv->currx_bd;
5325cf46d8eSChristophe Leroy 	bd_status = be16_to_cpu(bd->status);
533c19b6d24SZhao Qiang 
534c19b6d24SZhao Qiang 	/* while there are received buffers and BD is full (~R_E) */
535c19b6d24SZhao Qiang 	while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
536ba59d570SMathias Thore 		if (bd_status & (RX_BD_ERRORS)) {
537ba59d570SMathias Thore 			dev->stats.rx_errors++;
538ba59d570SMathias Thore 
539ba59d570SMathias Thore 			if (bd_status & R_CD_S)
540ba59d570SMathias Thore 				dev->stats.collisions++;
541c19b6d24SZhao Qiang 			if (bd_status & R_OV_S)
542ba59d570SMathias Thore 				dev->stats.rx_fifo_errors++;
543ba59d570SMathias Thore 			if (bd_status & R_CR_S)
544c19b6d24SZhao Qiang 				dev->stats.rx_crc_errors++;
545ba59d570SMathias Thore 			if (bd_status & R_AB_S)
546ba59d570SMathias Thore 				dev->stats.rx_over_errors++;
547ba59d570SMathias Thore 			if (bd_status & R_NO_S)
548ba59d570SMathias Thore 				dev->stats.rx_frame_errors++;
549ba59d570SMathias Thore 			if (bd_status & R_LG_S)
550ba59d570SMathias Thore 				dev->stats.rx_length_errors++;
551ba59d570SMathias Thore 
552c19b6d24SZhao Qiang 			goto recycle;
553c19b6d24SZhao Qiang 		}
554c19b6d24SZhao Qiang 		bdbuffer = priv->rx_buffer +
555c19b6d24SZhao Qiang 			(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
5565cf46d8eSChristophe Leroy 		length = be16_to_cpu(bd->length);
557c19b6d24SZhao Qiang 
558c19b6d24SZhao Qiang 		switch (dev->type) {
559c19b6d24SZhao Qiang 		case ARPHRD_RAWHDLC:
560c19b6d24SZhao Qiang 			bdbuffer += HDLC_HEAD_LEN;
561c19b6d24SZhao Qiang 			length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
562c19b6d24SZhao Qiang 
563c19b6d24SZhao Qiang 			skb = dev_alloc_skb(length);
564c19b6d24SZhao Qiang 			if (!skb) {
565c19b6d24SZhao Qiang 				dev->stats.rx_dropped++;
566c19b6d24SZhao Qiang 				return -ENOMEM;
567c19b6d24SZhao Qiang 			}
568c19b6d24SZhao Qiang 
569c19b6d24SZhao Qiang 			skb_put(skb, length);
570c19b6d24SZhao Qiang 			skb->len = length;
571c19b6d24SZhao Qiang 			skb->dev = dev;
572c19b6d24SZhao Qiang 			memcpy(skb->data, bdbuffer, length);
573c19b6d24SZhao Qiang 			break;
574c19b6d24SZhao Qiang 
575c19b6d24SZhao Qiang 		case ARPHRD_PPP:
5768978ca7cSDavid Gounaris 		case ARPHRD_ETHER:
577c19b6d24SZhao Qiang 			length -= HDLC_CRC_SIZE;
578c19b6d24SZhao Qiang 
579c19b6d24SZhao Qiang 			skb = dev_alloc_skb(length);
580c19b6d24SZhao Qiang 			if (!skb) {
581c19b6d24SZhao Qiang 				dev->stats.rx_dropped++;
582c19b6d24SZhao Qiang 				return -ENOMEM;
583c19b6d24SZhao Qiang 			}
584c19b6d24SZhao Qiang 
585c19b6d24SZhao Qiang 			skb_put(skb, length);
586c19b6d24SZhao Qiang 			skb->len = length;
587c19b6d24SZhao Qiang 			skb->dev = dev;
588c19b6d24SZhao Qiang 			memcpy(skb->data, bdbuffer, length);
589c19b6d24SZhao Qiang 			break;
590c19b6d24SZhao Qiang 		}
591c19b6d24SZhao Qiang 
592c19b6d24SZhao Qiang 		dev->stats.rx_packets++;
593c19b6d24SZhao Qiang 		dev->stats.rx_bytes += skb->len;
594c19b6d24SZhao Qiang 		howmany++;
595c19b6d24SZhao Qiang 		if (hdlc->proto)
596c19b6d24SZhao Qiang 			skb->protocol = hdlc_type_trans(skb, dev);
597c19b6d24SZhao Qiang 		netif_receive_skb(skb);
598c19b6d24SZhao Qiang 
599c19b6d24SZhao Qiang recycle:
6005cf46d8eSChristophe Leroy 		bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S);
601c19b6d24SZhao Qiang 
602c19b6d24SZhao Qiang 		/* update to point at the next bd */
603c19b6d24SZhao Qiang 		if (bd_status & R_W_S) {
604c19b6d24SZhao Qiang 			priv->currx_bdnum = 0;
605c19b6d24SZhao Qiang 			bd = priv->rx_bd_base;
606c19b6d24SZhao Qiang 		} else {
607c19b6d24SZhao Qiang 			if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
608c19b6d24SZhao Qiang 				priv->currx_bdnum += 1;
609c19b6d24SZhao Qiang 			else
610c19b6d24SZhao Qiang 				priv->currx_bdnum = RX_BD_RING_LEN - 1;
611c19b6d24SZhao Qiang 
612c19b6d24SZhao Qiang 			bd += 1;
613c19b6d24SZhao Qiang 		}
614c19b6d24SZhao Qiang 
6155cf46d8eSChristophe Leroy 		bd_status = be16_to_cpu(bd->status);
616c19b6d24SZhao Qiang 	}
6175cf46d8eSChristophe Leroy 	dma_rmb();
618c19b6d24SZhao Qiang 
619c19b6d24SZhao Qiang 	priv->currx_bd = bd;
620c19b6d24SZhao Qiang 	return howmany;
621c19b6d24SZhao Qiang }
622c19b6d24SZhao Qiang 
ucc_hdlc_poll(struct napi_struct * napi,int budget)623c19b6d24SZhao Qiang static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
624c19b6d24SZhao Qiang {
625c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = container_of(napi,
626c19b6d24SZhao Qiang 						     struct ucc_hdlc_private,
627c19b6d24SZhao Qiang 						     napi);
628c19b6d24SZhao Qiang 	int howmany;
629c19b6d24SZhao Qiang 
630c19b6d24SZhao Qiang 	/* Tx event processing */
631c19b6d24SZhao Qiang 	spin_lock(&priv->lock);
632c19b6d24SZhao Qiang 	hdlc_tx_done(priv);
633c19b6d24SZhao Qiang 	spin_unlock(&priv->lock);
634c19b6d24SZhao Qiang 
635c19b6d24SZhao Qiang 	howmany = 0;
636c19b6d24SZhao Qiang 	howmany += hdlc_rx_done(priv, budget - howmany);
637c19b6d24SZhao Qiang 
638c19b6d24SZhao Qiang 	if (howmany < budget) {
6396ad20165SEric Dumazet 		napi_complete_done(napi, howmany);
640d9d95bcaSRasmus Villemoes 		qe_setbits_be32(priv->uccf->p_uccm,
641c19b6d24SZhao Qiang 				(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
642c19b6d24SZhao Qiang 	}
643c19b6d24SZhao Qiang 
644c19b6d24SZhao Qiang 	return howmany;
645c19b6d24SZhao Qiang }
646c19b6d24SZhao Qiang 
ucc_hdlc_irq_handler(int irq,void * dev_id)647c19b6d24SZhao Qiang static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
648c19b6d24SZhao Qiang {
649c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
650c19b6d24SZhao Qiang 	struct net_device *dev = priv->ndev;
651c19b6d24SZhao Qiang 	struct ucc_fast_private *uccf;
652c19b6d24SZhao Qiang 	u32 ucce;
653c19b6d24SZhao Qiang 	u32 uccm;
654c19b6d24SZhao Qiang 
655c19b6d24SZhao Qiang 	uccf = priv->uccf;
656c19b6d24SZhao Qiang 
657c19b6d24SZhao Qiang 	ucce = ioread32be(uccf->p_ucce);
658c19b6d24SZhao Qiang 	uccm = ioread32be(uccf->p_uccm);
659c19b6d24SZhao Qiang 	ucce &= uccm;
660c19b6d24SZhao Qiang 	iowrite32be(ucce, uccf->p_ucce);
661c19b6d24SZhao Qiang 	if (!ucce)
662c19b6d24SZhao Qiang 		return IRQ_NONE;
663c19b6d24SZhao Qiang 
664c19b6d24SZhao Qiang 	if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
665c19b6d24SZhao Qiang 		if (napi_schedule_prep(&priv->napi)) {
666c19b6d24SZhao Qiang 			uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
667c19b6d24SZhao Qiang 				  << 16);
668c19b6d24SZhao Qiang 			iowrite32be(uccm, uccf->p_uccm);
669c19b6d24SZhao Qiang 			__napi_schedule(&priv->napi);
670c19b6d24SZhao Qiang 		}
671c19b6d24SZhao Qiang 	}
672c19b6d24SZhao Qiang 
673c19b6d24SZhao Qiang 	/* Errors and other events */
674c19b6d24SZhao Qiang 	if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
675ba59d570SMathias Thore 		dev->stats.rx_missed_errors++;
676c19b6d24SZhao Qiang 	if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
677c19b6d24SZhao Qiang 		dev->stats.tx_errors++;
678c19b6d24SZhao Qiang 
679c19b6d24SZhao Qiang 	return IRQ_HANDLED;
680c19b6d24SZhao Qiang }
681c19b6d24SZhao Qiang 
uhdlc_ioctl(struct net_device * dev,struct if_settings * ifs)682ad7eab2aSArnd Bergmann static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
683c19b6d24SZhao Qiang {
684c19b6d24SZhao Qiang 	const size_t size = sizeof(te1_settings);
685c19b6d24SZhao Qiang 	te1_settings line;
686c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = netdev_priv(dev);
687c19b6d24SZhao Qiang 
688ad7eab2aSArnd Bergmann 	switch (ifs->type) {
689c19b6d24SZhao Qiang 	case IF_GET_IFACE:
690ad7eab2aSArnd Bergmann 		ifs->type = IF_IFACE_E1;
691ad7eab2aSArnd Bergmann 		if (ifs->size < size) {
692ad7eab2aSArnd Bergmann 			ifs->size = size; /* data size wanted */
693c19b6d24SZhao Qiang 			return -ENOBUFS;
694c19b6d24SZhao Qiang 		}
6952f43b9beSDan Carpenter 		memset(&line, 0, sizeof(line));
696c19b6d24SZhao Qiang 		line.clock_type = priv->clocking;
697c19b6d24SZhao Qiang 
698ad7eab2aSArnd Bergmann 		if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
699c19b6d24SZhao Qiang 			return -EFAULT;
700c19b6d24SZhao Qiang 		return 0;
701c19b6d24SZhao Qiang 
702c19b6d24SZhao Qiang 	default:
703ad7eab2aSArnd Bergmann 		return hdlc_ioctl(dev, ifs);
704c19b6d24SZhao Qiang 	}
705c19b6d24SZhao Qiang }
706c19b6d24SZhao Qiang 
uhdlc_open(struct net_device * dev)707c19b6d24SZhao Qiang static int uhdlc_open(struct net_device *dev)
708c19b6d24SZhao Qiang {
709c19b6d24SZhao Qiang 	u32 cecr_subblock;
710c19b6d24SZhao Qiang 	hdlc_device *hdlc = dev_to_hdlc(dev);
711c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = hdlc->priv;
712c19b6d24SZhao Qiang 	struct ucc_tdm *utdm = priv->utdm;
713*a59addacSAlexandra Diupina 	int rc = 0;
714c19b6d24SZhao Qiang 
715c19b6d24SZhao Qiang 	if (priv->hdlc_busy != 1) {
716c19b6d24SZhao Qiang 		if (request_irq(priv->ut_info->uf_info.irq,
717c19b6d24SZhao Qiang 				ucc_hdlc_irq_handler, 0, "hdlc", priv))
718c19b6d24SZhao Qiang 			return -ENODEV;
719c19b6d24SZhao Qiang 
720c19b6d24SZhao Qiang 		cecr_subblock = ucc_fast_get_qe_cr_subblock(
721c19b6d24SZhao Qiang 					priv->ut_info->uf_info.ucc_num);
722c19b6d24SZhao Qiang 
723c19b6d24SZhao Qiang 		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
724c19b6d24SZhao Qiang 			     QE_CR_PROTOCOL_UNSPECIFIED, 0);
725c19b6d24SZhao Qiang 
726c19b6d24SZhao Qiang 		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
727c19b6d24SZhao Qiang 
728c19b6d24SZhao Qiang 		/* Enable the TDM port */
729c19b6d24SZhao Qiang 		if (priv->tsa)
7305cf46d8eSChristophe Leroy 			qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
731c19b6d24SZhao Qiang 
732c19b6d24SZhao Qiang 		priv->hdlc_busy = 1;
733c19b6d24SZhao Qiang 		netif_device_attach(priv->ndev);
734c19b6d24SZhao Qiang 		napi_enable(&priv->napi);
7352e7ad56aSMathias Thore 		netdev_reset_queue(dev);
736c19b6d24SZhao Qiang 		netif_start_queue(dev);
737*a59addacSAlexandra Diupina 
738*a59addacSAlexandra Diupina 		rc = hdlc_open(dev);
739*a59addacSAlexandra Diupina 		if (rc)
740*a59addacSAlexandra Diupina 			uhdlc_close(dev);
741c19b6d24SZhao Qiang 	}
742c19b6d24SZhao Qiang 
743*a59addacSAlexandra Diupina 	return rc;
744c19b6d24SZhao Qiang }
745c19b6d24SZhao Qiang 
uhdlc_memclean(struct ucc_hdlc_private * priv)746c19b6d24SZhao Qiang static void uhdlc_memclean(struct ucc_hdlc_private *priv)
747c19b6d24SZhao Qiang {
74849b865b3SRasmus Villemoes 	qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
74949b865b3SRasmus Villemoes 	qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
750c19b6d24SZhao Qiang 
751c19b6d24SZhao Qiang 	if (priv->rx_bd_base) {
752c19b6d24SZhao Qiang 		dma_free_coherent(priv->dev,
7535b8aad93SHolger Brunck 				  RX_BD_RING_LEN * sizeof(struct qe_bd),
754c19b6d24SZhao Qiang 				  priv->rx_bd_base, priv->dma_rx_bd);
755c19b6d24SZhao Qiang 
756c19b6d24SZhao Qiang 		priv->rx_bd_base = NULL;
757c19b6d24SZhao Qiang 		priv->dma_rx_bd = 0;
758c19b6d24SZhao Qiang 	}
759c19b6d24SZhao Qiang 
760c19b6d24SZhao Qiang 	if (priv->tx_bd_base) {
761c19b6d24SZhao Qiang 		dma_free_coherent(priv->dev,
7625b8aad93SHolger Brunck 				  TX_BD_RING_LEN * sizeof(struct qe_bd),
763c19b6d24SZhao Qiang 				  priv->tx_bd_base, priv->dma_tx_bd);
764c19b6d24SZhao Qiang 
765c19b6d24SZhao Qiang 		priv->tx_bd_base = NULL;
766c19b6d24SZhao Qiang 		priv->dma_tx_bd = 0;
767c19b6d24SZhao Qiang 	}
768c19b6d24SZhao Qiang 
769c19b6d24SZhao Qiang 	if (priv->ucc_pram) {
770c19b6d24SZhao Qiang 		qe_muram_free(priv->ucc_pram_offset);
771c19b6d24SZhao Qiang 		priv->ucc_pram = NULL;
772c19b6d24SZhao Qiang 		priv->ucc_pram_offset = 0;
773c19b6d24SZhao Qiang 	 }
774c19b6d24SZhao Qiang 
775c19b6d24SZhao Qiang 	kfree(priv->rx_skbuff);
776c19b6d24SZhao Qiang 	priv->rx_skbuff = NULL;
777c19b6d24SZhao Qiang 
778c19b6d24SZhao Qiang 	kfree(priv->tx_skbuff);
779c19b6d24SZhao Qiang 	priv->tx_skbuff = NULL;
780c19b6d24SZhao Qiang 
781c19b6d24SZhao Qiang 	if (priv->uf_regs) {
782c19b6d24SZhao Qiang 		iounmap(priv->uf_regs);
783c19b6d24SZhao Qiang 		priv->uf_regs = NULL;
784c19b6d24SZhao Qiang 	}
785c19b6d24SZhao Qiang 
786c19b6d24SZhao Qiang 	if (priv->uccf) {
787c19b6d24SZhao Qiang 		ucc_fast_free(priv->uccf);
788c19b6d24SZhao Qiang 		priv->uccf = NULL;
789c19b6d24SZhao Qiang 	}
790c19b6d24SZhao Qiang 
791c19b6d24SZhao Qiang 	if (priv->rx_buffer) {
792c19b6d24SZhao Qiang 		dma_free_coherent(priv->dev,
793c19b6d24SZhao Qiang 				  RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
794c19b6d24SZhao Qiang 				  priv->rx_buffer, priv->dma_rx_addr);
795c19b6d24SZhao Qiang 		priv->rx_buffer = NULL;
796c19b6d24SZhao Qiang 		priv->dma_rx_addr = 0;
797c19b6d24SZhao Qiang 	}
798c19b6d24SZhao Qiang 
799c19b6d24SZhao Qiang 	if (priv->tx_buffer) {
800c19b6d24SZhao Qiang 		dma_free_coherent(priv->dev,
801c19b6d24SZhao Qiang 				  TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
802c19b6d24SZhao Qiang 				  priv->tx_buffer, priv->dma_tx_addr);
803c19b6d24SZhao Qiang 		priv->tx_buffer = NULL;
804c19b6d24SZhao Qiang 		priv->dma_tx_addr = 0;
805c19b6d24SZhao Qiang 	}
806c19b6d24SZhao Qiang }
807c19b6d24SZhao Qiang 
uhdlc_close(struct net_device * dev)808c19b6d24SZhao Qiang static int uhdlc_close(struct net_device *dev)
809c19b6d24SZhao Qiang {
810c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
811c19b6d24SZhao Qiang 	struct ucc_tdm *utdm = priv->utdm;
812c19b6d24SZhao Qiang 	u32 cecr_subblock;
813c19b6d24SZhao Qiang 
814c19b6d24SZhao Qiang 	napi_disable(&priv->napi);
815c19b6d24SZhao Qiang 	cecr_subblock = ucc_fast_get_qe_cr_subblock(
816c19b6d24SZhao Qiang 				priv->ut_info->uf_info.ucc_num);
817c19b6d24SZhao Qiang 
818c19b6d24SZhao Qiang 	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
819c19b6d24SZhao Qiang 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
820c19b6d24SZhao Qiang 	qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
821c19b6d24SZhao Qiang 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
822c19b6d24SZhao Qiang 
823c19b6d24SZhao Qiang 	if (priv->tsa)
8245cf46d8eSChristophe Leroy 		qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
825c19b6d24SZhao Qiang 
826c19b6d24SZhao Qiang 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
827c19b6d24SZhao Qiang 
828c19b6d24SZhao Qiang 	free_irq(priv->ut_info->uf_info.irq, priv);
829c19b6d24SZhao Qiang 	netif_stop_queue(dev);
8302e7ad56aSMathias Thore 	netdev_reset_queue(dev);
831c19b6d24SZhao Qiang 	priv->hdlc_busy = 0;
832c19b6d24SZhao Qiang 
833*a59addacSAlexandra Diupina 	hdlc_close(dev);
834*a59addacSAlexandra Diupina 
835c19b6d24SZhao Qiang 	return 0;
836c19b6d24SZhao Qiang }
837c19b6d24SZhao Qiang 
ucc_hdlc_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)838c19b6d24SZhao Qiang static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
839c19b6d24SZhao Qiang 			   unsigned short parity)
840c19b6d24SZhao Qiang {
841c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
842c19b6d24SZhao Qiang 
843c19b6d24SZhao Qiang 	if (encoding != ENCODING_NRZ &&
844c19b6d24SZhao Qiang 	    encoding != ENCODING_NRZI)
845c19b6d24SZhao Qiang 		return -EINVAL;
846c19b6d24SZhao Qiang 
847c19b6d24SZhao Qiang 	if (parity != PARITY_NONE &&
848c19b6d24SZhao Qiang 	    parity != PARITY_CRC32_PR1_CCITT &&
84943a78e0eSDavid Gounaris 	    parity != PARITY_CRC16_PR0_CCITT &&
850c19b6d24SZhao Qiang 	    parity != PARITY_CRC16_PR1_CCITT)
851c19b6d24SZhao Qiang 		return -EINVAL;
852c19b6d24SZhao Qiang 
853c19b6d24SZhao Qiang 	priv->encoding = encoding;
854c19b6d24SZhao Qiang 	priv->parity = parity;
855c19b6d24SZhao Qiang 
856c19b6d24SZhao Qiang 	return 0;
857c19b6d24SZhao Qiang }
858c19b6d24SZhao Qiang 
859c19b6d24SZhao Qiang #ifdef CONFIG_PM
store_clk_config(struct ucc_hdlc_private * priv)860c19b6d24SZhao Qiang static void store_clk_config(struct ucc_hdlc_private *priv)
861c19b6d24SZhao Qiang {
8625cf46d8eSChristophe Leroy 	struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
863c19b6d24SZhao Qiang 
864c19b6d24SZhao Qiang 	/* store si clk */
865c19b6d24SZhao Qiang 	priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
866c19b6d24SZhao Qiang 	priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
867c19b6d24SZhao Qiang 
868c19b6d24SZhao Qiang 	/* store si sync */
869c19b6d24SZhao Qiang 	priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
870c19b6d24SZhao Qiang 
871c19b6d24SZhao Qiang 	/* store ucc clk */
872c19b6d24SZhao Qiang 	memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
873c19b6d24SZhao Qiang }
874c19b6d24SZhao Qiang 
resume_clk_config(struct ucc_hdlc_private * priv)875c19b6d24SZhao Qiang static void resume_clk_config(struct ucc_hdlc_private *priv)
876c19b6d24SZhao Qiang {
8775cf46d8eSChristophe Leroy 	struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
878c19b6d24SZhao Qiang 
879c19b6d24SZhao Qiang 	memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
880c19b6d24SZhao Qiang 
881c19b6d24SZhao Qiang 	iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
882c19b6d24SZhao Qiang 	iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
883c19b6d24SZhao Qiang 
884c19b6d24SZhao Qiang 	iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
885c19b6d24SZhao Qiang }
886c19b6d24SZhao Qiang 
uhdlc_suspend(struct device * dev)887c19b6d24SZhao Qiang static int uhdlc_suspend(struct device *dev)
888c19b6d24SZhao Qiang {
889c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
890c19b6d24SZhao Qiang 	struct ucc_fast __iomem *uf_regs;
891c19b6d24SZhao Qiang 
892c19b6d24SZhao Qiang 	if (!priv)
893c19b6d24SZhao Qiang 		return -EINVAL;
894c19b6d24SZhao Qiang 
895c19b6d24SZhao Qiang 	if (!netif_running(priv->ndev))
896c19b6d24SZhao Qiang 		return 0;
897c19b6d24SZhao Qiang 
898c19b6d24SZhao Qiang 	netif_device_detach(priv->ndev);
899c19b6d24SZhao Qiang 	napi_disable(&priv->napi);
900c19b6d24SZhao Qiang 
901c19b6d24SZhao Qiang 	uf_regs = priv->uf_regs;
902c19b6d24SZhao Qiang 
903c19b6d24SZhao Qiang 	/* backup gumr guemr*/
904c19b6d24SZhao Qiang 	priv->gumr = ioread32be(&uf_regs->gumr);
905c19b6d24SZhao Qiang 	priv->guemr = ioread8(&uf_regs->guemr);
906c19b6d24SZhao Qiang 
907c19b6d24SZhao Qiang 	priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
908c19b6d24SZhao Qiang 					GFP_KERNEL);
909c19b6d24SZhao Qiang 	if (!priv->ucc_pram_bak)
910c19b6d24SZhao Qiang 		return -ENOMEM;
911c19b6d24SZhao Qiang 
912c19b6d24SZhao Qiang 	/* backup HDLC parameter */
913c19b6d24SZhao Qiang 	memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
914c19b6d24SZhao Qiang 		      sizeof(struct ucc_hdlc_param));
915c19b6d24SZhao Qiang 
916c19b6d24SZhao Qiang 	/* store the clk configuration */
917c19b6d24SZhao Qiang 	store_clk_config(priv);
918c19b6d24SZhao Qiang 
919c19b6d24SZhao Qiang 	/* save power */
920c19b6d24SZhao Qiang 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
921c19b6d24SZhao Qiang 
922c19b6d24SZhao Qiang 	return 0;
923c19b6d24SZhao Qiang }
924c19b6d24SZhao Qiang 
uhdlc_resume(struct device * dev)925c19b6d24SZhao Qiang static int uhdlc_resume(struct device *dev)
926c19b6d24SZhao Qiang {
927c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
9288c57a3a7Sxypron.glpk@gmx.de 	struct ucc_tdm *utdm;
929c19b6d24SZhao Qiang 	struct ucc_tdm_info *ut_info;
930c19b6d24SZhao Qiang 	struct ucc_fast __iomem *uf_regs;
931c19b6d24SZhao Qiang 	struct ucc_fast_private *uccf;
932c19b6d24SZhao Qiang 	struct ucc_fast_info *uf_info;
933270fe2ceSChen Zhou 	int i;
934c19b6d24SZhao Qiang 	u32 cecr_subblock;
935c19b6d24SZhao Qiang 	u16 bd_status;
936c19b6d24SZhao Qiang 
937c19b6d24SZhao Qiang 	if (!priv)
938c19b6d24SZhao Qiang 		return -EINVAL;
939c19b6d24SZhao Qiang 
940c19b6d24SZhao Qiang 	if (!netif_running(priv->ndev))
941c19b6d24SZhao Qiang 		return 0;
942c19b6d24SZhao Qiang 
9438c57a3a7Sxypron.glpk@gmx.de 	utdm = priv->utdm;
944c19b6d24SZhao Qiang 	ut_info = priv->ut_info;
945c19b6d24SZhao Qiang 	uf_info = &ut_info->uf_info;
946c19b6d24SZhao Qiang 	uf_regs = priv->uf_regs;
947c19b6d24SZhao Qiang 	uccf = priv->uccf;
948c19b6d24SZhao Qiang 
949c19b6d24SZhao Qiang 	/* restore gumr guemr */
950c19b6d24SZhao Qiang 	iowrite8(priv->guemr, &uf_regs->guemr);
951c19b6d24SZhao Qiang 	iowrite32be(priv->gumr, &uf_regs->gumr);
952c19b6d24SZhao Qiang 
953c19b6d24SZhao Qiang 	/* Set Virtual Fifo registers */
954c19b6d24SZhao Qiang 	iowrite16be(uf_info->urfs, &uf_regs->urfs);
955c19b6d24SZhao Qiang 	iowrite16be(uf_info->urfet, &uf_regs->urfet);
956c19b6d24SZhao Qiang 	iowrite16be(uf_info->urfset, &uf_regs->urfset);
957c19b6d24SZhao Qiang 	iowrite16be(uf_info->utfs, &uf_regs->utfs);
958c19b6d24SZhao Qiang 	iowrite16be(uf_info->utfet, &uf_regs->utfet);
959c19b6d24SZhao Qiang 	iowrite16be(uf_info->utftt, &uf_regs->utftt);
960c19b6d24SZhao Qiang 	/* utfb, urfb are offsets from MURAM base */
961c19b6d24SZhao Qiang 	iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
962c19b6d24SZhao Qiang 	iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
963c19b6d24SZhao Qiang 
964c19b6d24SZhao Qiang 	/* Rx Tx and sync clock routing */
965c19b6d24SZhao Qiang 	resume_clk_config(priv);
966c19b6d24SZhao Qiang 
967c19b6d24SZhao Qiang 	iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
968c19b6d24SZhao Qiang 	iowrite32be(0xffffffff, &uf_regs->ucce);
969c19b6d24SZhao Qiang 
970c19b6d24SZhao Qiang 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
971c19b6d24SZhao Qiang 
972c19b6d24SZhao Qiang 	/* rebuild SIRAM */
973c19b6d24SZhao Qiang 	if (priv->tsa)
974c19b6d24SZhao Qiang 		ucc_tdm_init(priv->utdm, priv->ut_info);
975c19b6d24SZhao Qiang 
976c19b6d24SZhao Qiang 	/* Write to QE CECR, UCCx channel to Stop Transmission */
977c19b6d24SZhao Qiang 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
978270fe2ceSChen Zhou 	qe_issue_cmd(QE_STOP_TX, cecr_subblock,
979c19b6d24SZhao Qiang 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
980c19b6d24SZhao Qiang 
981c19b6d24SZhao Qiang 	/* Set UPSMR normal mode */
982c19b6d24SZhao Qiang 	iowrite32be(0, &uf_regs->upsmr);
983c19b6d24SZhao Qiang 
984c19b6d24SZhao Qiang 	/* init parameter base */
985c19b6d24SZhao Qiang 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
986270fe2ceSChen Zhou 	qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
987c19b6d24SZhao Qiang 		     QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
988c19b6d24SZhao Qiang 
989c19b6d24SZhao Qiang 	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
990c19b6d24SZhao Qiang 				qe_muram_addr(priv->ucc_pram_offset);
991c19b6d24SZhao Qiang 
992c19b6d24SZhao Qiang 	/* restore ucc parameter */
993c19b6d24SZhao Qiang 	memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
994c19b6d24SZhao Qiang 		    sizeof(struct ucc_hdlc_param));
995c19b6d24SZhao Qiang 	kfree(priv->ucc_pram_bak);
996c19b6d24SZhao Qiang 
997c19b6d24SZhao Qiang 	/* rebuild BD entry */
998c19b6d24SZhao Qiang 	for (i = 0; i < RX_BD_RING_LEN; i++) {
999c19b6d24SZhao Qiang 		if (i < (RX_BD_RING_LEN - 1))
1000c19b6d24SZhao Qiang 			bd_status = R_E_S | R_I_S;
1001c19b6d24SZhao Qiang 		else
1002c19b6d24SZhao Qiang 			bd_status = R_E_S | R_I_S | R_W_S;
1003c19b6d24SZhao Qiang 
10045cf46d8eSChristophe Leroy 		priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
10055cf46d8eSChristophe Leroy 		priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
1006c19b6d24SZhao Qiang 	}
1007c19b6d24SZhao Qiang 
1008c19b6d24SZhao Qiang 	for (i = 0; i < TX_BD_RING_LEN; i++) {
1009c19b6d24SZhao Qiang 		if (i < (TX_BD_RING_LEN - 1))
1010c19b6d24SZhao Qiang 			bd_status =  T_I_S | T_TC_S;
1011c19b6d24SZhao Qiang 		else
1012c19b6d24SZhao Qiang 			bd_status =  T_I_S | T_TC_S | T_W_S;
1013c19b6d24SZhao Qiang 
10145cf46d8eSChristophe Leroy 		priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
10155cf46d8eSChristophe Leroy 		priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
1016c19b6d24SZhao Qiang 	}
10175cf46d8eSChristophe Leroy 	dma_wmb();
1018c19b6d24SZhao Qiang 
1019c19b6d24SZhao Qiang 	/* if hdlc is busy enable TX and RX */
1020c19b6d24SZhao Qiang 	if (priv->hdlc_busy == 1) {
1021c19b6d24SZhao Qiang 		cecr_subblock = ucc_fast_get_qe_cr_subblock(
1022c19b6d24SZhao Qiang 					priv->ut_info->uf_info.ucc_num);
1023c19b6d24SZhao Qiang 
1024c19b6d24SZhao Qiang 		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1025c19b6d24SZhao Qiang 			     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1026c19b6d24SZhao Qiang 
1027c19b6d24SZhao Qiang 		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1028c19b6d24SZhao Qiang 
1029c19b6d24SZhao Qiang 		/* Enable the TDM port */
1030c19b6d24SZhao Qiang 		if (priv->tsa)
10315cf46d8eSChristophe Leroy 			qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
1032c19b6d24SZhao Qiang 	}
1033c19b6d24SZhao Qiang 
1034c19b6d24SZhao Qiang 	napi_enable(&priv->napi);
1035c19b6d24SZhao Qiang 	netif_device_attach(priv->ndev);
1036c19b6d24SZhao Qiang 
1037c19b6d24SZhao Qiang 	return 0;
1038c19b6d24SZhao Qiang }
1039c19b6d24SZhao Qiang 
1040c19b6d24SZhao Qiang static const struct dev_pm_ops uhdlc_pm_ops = {
1041c19b6d24SZhao Qiang 	.suspend = uhdlc_suspend,
1042c19b6d24SZhao Qiang 	.resume = uhdlc_resume,
1043c19b6d24SZhao Qiang 	.freeze = uhdlc_suspend,
1044c19b6d24SZhao Qiang 	.thaw = uhdlc_resume,
1045c19b6d24SZhao Qiang };
1046c19b6d24SZhao Qiang 
1047c19b6d24SZhao Qiang #define HDLC_PM_OPS (&uhdlc_pm_ops)
1048c19b6d24SZhao Qiang 
1049c19b6d24SZhao Qiang #else
1050c19b6d24SZhao Qiang 
1051c19b6d24SZhao Qiang #define HDLC_PM_OPS NULL
1052c19b6d24SZhao Qiang 
1053c19b6d24SZhao Qiang #endif
uhdlc_tx_timeout(struct net_device * ndev,unsigned int txqueue)10540290bd29SMichael S. Tsirkin static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1055ccb7bc0eSDavid Gounaris {
1056ccb7bc0eSDavid Gounaris 	netdev_err(ndev, "%s\n", __func__);
1057ccb7bc0eSDavid Gounaris }
1058ccb7bc0eSDavid Gounaris 
1059c19b6d24SZhao Qiang static const struct net_device_ops uhdlc_ops = {
1060c19b6d24SZhao Qiang 	.ndo_open       = uhdlc_open,
1061c19b6d24SZhao Qiang 	.ndo_stop       = uhdlc_close,
1062c19b6d24SZhao Qiang 	.ndo_start_xmit = hdlc_start_xmit,
1063ad7eab2aSArnd Bergmann 	.ndo_siocwandev = uhdlc_ioctl,
1064ccb7bc0eSDavid Gounaris 	.ndo_tx_timeout	= uhdlc_tx_timeout,
1065c19b6d24SZhao Qiang };
1066c19b6d24SZhao Qiang 
hdlc_map_iomem(char * name,int init_flag,void __iomem ** ptr)10678d68100aSWen Yang static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
10688d68100aSWen Yang {
10698d68100aSWen Yang 	struct device_node *np;
10708d68100aSWen Yang 	struct platform_device *pdev;
10718d68100aSWen Yang 	struct resource *res;
10728d68100aSWen Yang 	static int siram_init_flag;
10738d68100aSWen Yang 	int ret = 0;
10748d68100aSWen Yang 
10758d68100aSWen Yang 	np = of_find_compatible_node(NULL, NULL, name);
10768d68100aSWen Yang 	if (!np)
10778d68100aSWen Yang 		return -EINVAL;
10788d68100aSWen Yang 
10798d68100aSWen Yang 	pdev = of_find_device_by_node(np);
10808d68100aSWen Yang 	if (!pdev) {
10818d68100aSWen Yang 		pr_err("%pOFn: failed to lookup pdev\n", np);
10828d68100aSWen Yang 		of_node_put(np);
10838d68100aSWen Yang 		return -EINVAL;
10848d68100aSWen Yang 	}
10858d68100aSWen Yang 
10868d68100aSWen Yang 	of_node_put(np);
10878d68100aSWen Yang 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
10888d68100aSWen Yang 	if (!res) {
10898d68100aSWen Yang 		ret = -EINVAL;
10908d68100aSWen Yang 		goto error_put_device;
10918d68100aSWen Yang 	}
10928d68100aSWen Yang 	*ptr = ioremap(res->start, resource_size(res));
10938d68100aSWen Yang 	if (!*ptr) {
10948d68100aSWen Yang 		ret = -ENOMEM;
10958d68100aSWen Yang 		goto error_put_device;
10968d68100aSWen Yang 	}
10978d68100aSWen Yang 
10988d68100aSWen Yang 	/* We've remapped the addresses, and we don't need the device any
10998d68100aSWen Yang 	 * more, so we should release it.
11008d68100aSWen Yang 	 */
11018d68100aSWen Yang 	put_device(&pdev->dev);
11028d68100aSWen Yang 
11038d68100aSWen Yang 	if (init_flag && siram_init_flag == 0) {
11048d68100aSWen Yang 		memset_io(*ptr, 0, resource_size(res));
11058d68100aSWen Yang 		siram_init_flag = 1;
11068d68100aSWen Yang 	}
11078d68100aSWen Yang 	return  0;
11088d68100aSWen Yang 
11098d68100aSWen Yang error_put_device:
11108d68100aSWen Yang 	put_device(&pdev->dev);
11118d68100aSWen Yang 
11128d68100aSWen Yang 	return ret;
11138d68100aSWen Yang }
11148d68100aSWen Yang 
ucc_hdlc_probe(struct platform_device * pdev)1115c19b6d24SZhao Qiang static int ucc_hdlc_probe(struct platform_device *pdev)
1116c19b6d24SZhao Qiang {
1117c19b6d24SZhao Qiang 	struct device_node *np = pdev->dev.of_node;
1118c19b6d24SZhao Qiang 	struct ucc_hdlc_private *uhdlc_priv = NULL;
1119c19b6d24SZhao Qiang 	struct ucc_tdm_info *ut_info;
112066bb144bSHolger Brunck 	struct ucc_tdm *utdm = NULL;
1121c19b6d24SZhao Qiang 	struct resource res;
1122c19b6d24SZhao Qiang 	struct net_device *dev;
1123c19b6d24SZhao Qiang 	hdlc_device *hdlc;
1124c19b6d24SZhao Qiang 	int ucc_num;
1125c19b6d24SZhao Qiang 	const char *sprop;
1126c19b6d24SZhao Qiang 	int ret;
1127c19b6d24SZhao Qiang 	u32 val;
1128c19b6d24SZhao Qiang 
1129c19b6d24SZhao Qiang 	ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1130c19b6d24SZhao Qiang 	if (ret) {
1131c19b6d24SZhao Qiang 		dev_err(&pdev->dev, "Invalid ucc property\n");
1132c19b6d24SZhao Qiang 		return -ENODEV;
1133c19b6d24SZhao Qiang 	}
1134c19b6d24SZhao Qiang 
1135c19b6d24SZhao Qiang 	ucc_num = val - 1;
1136d8d74777SDavid Gounaris 	if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1137c19b6d24SZhao Qiang 		dev_err(&pdev->dev, ": Invalid UCC num\n");
1138c19b6d24SZhao Qiang 		return -EINVAL;
1139c19b6d24SZhao Qiang 	}
1140c19b6d24SZhao Qiang 
1141c19b6d24SZhao Qiang 	memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1142c19b6d24SZhao Qiang 	       sizeof(utdm_primary_info));
1143c19b6d24SZhao Qiang 
1144c19b6d24SZhao Qiang 	ut_info = &utdm_info[ucc_num];
1145c19b6d24SZhao Qiang 	ut_info->uf_info.ucc_num = ucc_num;
1146c19b6d24SZhao Qiang 
1147c19b6d24SZhao Qiang 	sprop = of_get_property(np, "rx-clock-name", NULL);
1148c19b6d24SZhao Qiang 	if (sprop) {
1149c19b6d24SZhao Qiang 		ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1150c19b6d24SZhao Qiang 		if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1151c19b6d24SZhao Qiang 		    (ut_info->uf_info.rx_clock > QE_CLK24)) {
1152c19b6d24SZhao Qiang 			dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1153c19b6d24SZhao Qiang 			return -EINVAL;
1154c19b6d24SZhao Qiang 		}
1155c19b6d24SZhao Qiang 	} else {
1156c19b6d24SZhao Qiang 		dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1157c19b6d24SZhao Qiang 		return -EINVAL;
1158c19b6d24SZhao Qiang 	}
1159c19b6d24SZhao Qiang 
1160c19b6d24SZhao Qiang 	sprop = of_get_property(np, "tx-clock-name", NULL);
1161c19b6d24SZhao Qiang 	if (sprop) {
1162c19b6d24SZhao Qiang 		ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1163c19b6d24SZhao Qiang 		if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1164c19b6d24SZhao Qiang 		    (ut_info->uf_info.tx_clock > QE_CLK24)) {
1165c19b6d24SZhao Qiang 			dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1166c19b6d24SZhao Qiang 			return -EINVAL;
1167c19b6d24SZhao Qiang 		}
1168c19b6d24SZhao Qiang 	} else {
1169c19b6d24SZhao Qiang 		dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1170c19b6d24SZhao Qiang 		return -EINVAL;
1171c19b6d24SZhao Qiang 	}
1172c19b6d24SZhao Qiang 
1173c19b6d24SZhao Qiang 	ret = of_address_to_resource(np, 0, &res);
1174c19b6d24SZhao Qiang 	if (ret)
1175c19b6d24SZhao Qiang 		return -EINVAL;
1176c19b6d24SZhao Qiang 
1177c19b6d24SZhao Qiang 	ut_info->uf_info.regs = res.start;
1178c19b6d24SZhao Qiang 	ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1179c19b6d24SZhao Qiang 
1180c19b6d24SZhao Qiang 	uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1181a4e59147SPeng Li 	if (!uhdlc_priv)
11821efb597dSZhao Qiang 		return -ENOMEM;
1183c19b6d24SZhao Qiang 
1184c19b6d24SZhao Qiang 	dev_set_drvdata(&pdev->dev, uhdlc_priv);
1185c19b6d24SZhao Qiang 	uhdlc_priv->dev = &pdev->dev;
1186c19b6d24SZhao Qiang 	uhdlc_priv->ut_info = ut_info;
1187c19b6d24SZhao Qiang 
11881a87e641SRob Herring 	uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface");
11891a87e641SRob Herring 	uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback");
11901a87e641SRob Herring 	uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus");
1191067bb938SHolger Brunck 
1192c19b6d24SZhao Qiang 	if (uhdlc_priv->tsa == 1) {
1193c19b6d24SZhao Qiang 		utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1194c19b6d24SZhao Qiang 		if (!utdm) {
1195c19b6d24SZhao Qiang 			ret = -ENOMEM;
1196c19b6d24SZhao Qiang 			dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
11971efb597dSZhao Qiang 			goto free_uhdlc_priv;
1198c19b6d24SZhao Qiang 		}
1199c19b6d24SZhao Qiang 		uhdlc_priv->utdm = utdm;
1200c19b6d24SZhao Qiang 		ret = ucc_of_parse_tdm(np, utdm, ut_info);
1201c19b6d24SZhao Qiang 		if (ret)
12021efb597dSZhao Qiang 			goto free_utdm;
12038d68100aSWen Yang 
12048d68100aSWen Yang 		ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
12058d68100aSWen Yang 				     (void __iomem **)&utdm->si_regs);
12068d68100aSWen Yang 		if (ret)
12078d68100aSWen Yang 			goto free_utdm;
12088d68100aSWen Yang 		ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
12098d68100aSWen Yang 				     (void __iomem **)&utdm->siram);
12108d68100aSWen Yang 		if (ret)
12118d68100aSWen Yang 			goto unmap_si_regs;
1212c19b6d24SZhao Qiang 	}
1213c19b6d24SZhao Qiang 
1214045f77baSDavid Gounaris 	if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1215045f77baSDavid Gounaris 		uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1216045f77baSDavid Gounaris 
1217c19b6d24SZhao Qiang 	ret = uhdlc_init(uhdlc_priv);
1218c19b6d24SZhao Qiang 	if (ret) {
1219c19b6d24SZhao Qiang 		dev_err(&pdev->dev, "Failed to init uhdlc\n");
12208d68100aSWen Yang 		goto undo_uhdlc_init;
1221c19b6d24SZhao Qiang 	}
1222c19b6d24SZhao Qiang 
1223c19b6d24SZhao Qiang 	dev = alloc_hdlcdev(uhdlc_priv);
1224c19b6d24SZhao Qiang 	if (!dev) {
1225c19b6d24SZhao Qiang 		ret = -ENOMEM;
1226c19b6d24SZhao Qiang 		pr_err("ucc_hdlc: unable to allocate memory\n");
12271efb597dSZhao Qiang 		goto undo_uhdlc_init;
1228c19b6d24SZhao Qiang 	}
1229c19b6d24SZhao Qiang 
1230c19b6d24SZhao Qiang 	uhdlc_priv->ndev = dev;
1231c19b6d24SZhao Qiang 	hdlc = dev_to_hdlc(dev);
1232c19b6d24SZhao Qiang 	dev->tx_queue_len = 16;
1233c19b6d24SZhao Qiang 	dev->netdev_ops = &uhdlc_ops;
1234ccb7bc0eSDavid Gounaris 	dev->watchdog_timeo = 2 * HZ;
1235c19b6d24SZhao Qiang 	hdlc->attach = ucc_hdlc_attach;
1236c19b6d24SZhao Qiang 	hdlc->xmit = ucc_hdlc_tx;
12376f83cb8cSJakub Kicinski 	netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1238c19b6d24SZhao Qiang 	if (register_hdlc_device(dev)) {
1239c19b6d24SZhao Qiang 		ret = -ENOBUFS;
1240c19b6d24SZhao Qiang 		pr_err("ucc_hdlc: unable to register hdlc device\n");
12411efb597dSZhao Qiang 		goto free_dev;
1242c19b6d24SZhao Qiang 	}
1243c19b6d24SZhao Qiang 
1244c19b6d24SZhao Qiang 	return 0;
1245c19b6d24SZhao Qiang 
12461efb597dSZhao Qiang free_dev:
12471efb597dSZhao Qiang 	free_netdev(dev);
12481efb597dSZhao Qiang undo_uhdlc_init:
1249488e0bf7SEsina Ekaterina 	if (utdm)
12508d68100aSWen Yang 		iounmap(utdm->siram);
12518d68100aSWen Yang unmap_si_regs:
1252488e0bf7SEsina Ekaterina 	if (utdm)
12538d68100aSWen Yang 		iounmap(utdm->si_regs);
12541efb597dSZhao Qiang free_utdm:
1255c19b6d24SZhao Qiang 	if (uhdlc_priv->tsa)
1256c19b6d24SZhao Qiang 		kfree(utdm);
12571efb597dSZhao Qiang free_uhdlc_priv:
1258c19b6d24SZhao Qiang 	kfree(uhdlc_priv);
1259c19b6d24SZhao Qiang 	return ret;
1260c19b6d24SZhao Qiang }
1261c19b6d24SZhao Qiang 
ucc_hdlc_remove(struct platform_device * pdev)1262c19b6d24SZhao Qiang static int ucc_hdlc_remove(struct platform_device *pdev)
1263c19b6d24SZhao Qiang {
1264c19b6d24SZhao Qiang 	struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1265c19b6d24SZhao Qiang 
1266c19b6d24SZhao Qiang 	uhdlc_memclean(priv);
1267c19b6d24SZhao Qiang 
1268c19b6d24SZhao Qiang 	if (priv->utdm->si_regs) {
1269c19b6d24SZhao Qiang 		iounmap(priv->utdm->si_regs);
1270c19b6d24SZhao Qiang 		priv->utdm->si_regs = NULL;
1271c19b6d24SZhao Qiang 	}
1272c19b6d24SZhao Qiang 
1273c19b6d24SZhao Qiang 	if (priv->utdm->siram) {
1274c19b6d24SZhao Qiang 		iounmap(priv->utdm->siram);
1275c19b6d24SZhao Qiang 		priv->utdm->siram = NULL;
1276c19b6d24SZhao Qiang 	}
1277c19b6d24SZhao Qiang 	kfree(priv);
1278c19b6d24SZhao Qiang 
1279c19b6d24SZhao Qiang 	dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1280c19b6d24SZhao Qiang 
1281c19b6d24SZhao Qiang 	return 0;
1282c19b6d24SZhao Qiang }
1283c19b6d24SZhao Qiang 
1284c19b6d24SZhao Qiang static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1285c19b6d24SZhao Qiang 	{
1286c19b6d24SZhao Qiang 	.compatible = "fsl,ucc-hdlc",
1287c19b6d24SZhao Qiang 	},
1288c19b6d24SZhao Qiang 	{},
1289c19b6d24SZhao Qiang };
1290c19b6d24SZhao Qiang 
1291c19b6d24SZhao Qiang MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1292c19b6d24SZhao Qiang 
1293c19b6d24SZhao Qiang static struct platform_driver ucc_hdlc_driver = {
1294c19b6d24SZhao Qiang 	.probe	= ucc_hdlc_probe,
1295c19b6d24SZhao Qiang 	.remove	= ucc_hdlc_remove,
1296c19b6d24SZhao Qiang 	.driver	= {
1297c19b6d24SZhao Qiang 		.name		= DRV_NAME,
1298c19b6d24SZhao Qiang 		.pm		= HDLC_PM_OPS,
1299c19b6d24SZhao Qiang 		.of_match_table	= fsl_ucc_hdlc_of_match,
1300c19b6d24SZhao Qiang 	},
1301c19b6d24SZhao Qiang };
1302c19b6d24SZhao Qiang 
1303459421ccSWei Yongjun module_platform_driver(ucc_hdlc_driver);
130474179d44SValentin Longchamp MODULE_LICENSE("GPL");
1305d6043299SYueHaibing MODULE_DESCRIPTION(DRV_DESC);
1306