xref: /openbmc/linux/drivers/net/wan/fsl_ucc_hdlc.c (revision 13dd8710)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Freescale QUICC Engine HDLC Device Driver
3  *
4  * Copyright 2016 Freescale Semiconductor Inc.
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/hdlc.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/sched.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/stddef.h>
26 #include <soc/fsl/qe/qe_tdm.h>
27 #include <uapi/linux/if_arp.h>
28 
29 #include "fsl_ucc_hdlc.h"
30 
31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
32 #define DRV_NAME "ucc_hdlc"
33 
34 #define TDM_PPPOHT_SLIC_MAXIN
35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
36 
37 static struct ucc_tdm_info utdm_primary_info = {
38 	.uf_info = {
39 		.tsa = 0,
40 		.cdp = 0,
41 		.cds = 1,
42 		.ctsp = 1,
43 		.ctss = 1,
44 		.revd = 0,
45 		.urfs = 256,
46 		.utfs = 256,
47 		.urfet = 128,
48 		.urfset = 192,
49 		.utfet = 128,
50 		.utftt = 0x40,
51 		.ufpt = 256,
52 		.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
53 		.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
54 		.tenc = UCC_FAST_TX_ENCODING_NRZ,
55 		.renc = UCC_FAST_RX_ENCODING_NRZ,
56 		.tcrc = UCC_FAST_16_BIT_CRC,
57 		.synl = UCC_FAST_SYNC_LEN_NOT_USED,
58 	},
59 
60 	.si_info = {
61 #ifdef TDM_PPPOHT_SLIC_MAXIN
62 		.simr_rfsd = 1,
63 		.simr_tfsd = 2,
64 #else
65 		.simr_rfsd = 0,
66 		.simr_tfsd = 0,
67 #endif
68 		.simr_crt = 0,
69 		.simr_sl = 0,
70 		.simr_ce = 1,
71 		.simr_fe = 1,
72 		.simr_gm = 0,
73 	},
74 };
75 
76 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
77 
78 static int uhdlc_init(struct ucc_hdlc_private *priv)
79 {
80 	struct ucc_tdm_info *ut_info;
81 	struct ucc_fast_info *uf_info;
82 	u32 cecr_subblock;
83 	u16 bd_status;
84 	int ret, i;
85 	void *bd_buffer;
86 	dma_addr_t bd_dma_addr;
87 	s32 riptr;
88 	s32 tiptr;
89 	u32 gumr;
90 
91 	ut_info = priv->ut_info;
92 	uf_info = &ut_info->uf_info;
93 
94 	if (priv->tsa) {
95 		uf_info->tsa = 1;
96 		uf_info->ctsp = 1;
97 		uf_info->cds = 1;
98 		uf_info->ctss = 1;
99 	} else {
100 		uf_info->cds = 0;
101 		uf_info->ctsp = 0;
102 		uf_info->ctss = 0;
103 	}
104 
105 	/* This sets HPM register in CMXUCR register which configures a
106 	 * open drain connected HDLC bus
107 	 */
108 	if (priv->hdlc_bus)
109 		uf_info->brkpt_support = 1;
110 
111 	uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
112 				UCC_HDLC_UCCE_TXB) << 16);
113 
114 	ret = ucc_fast_init(uf_info, &priv->uccf);
115 	if (ret) {
116 		dev_err(priv->dev, "Failed to init uccf.");
117 		return ret;
118 	}
119 
120 	priv->uf_regs = priv->uccf->uf_regs;
121 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
122 
123 	/* Loopback mode */
124 	if (priv->loopback) {
125 		dev_info(priv->dev, "Loopback Mode\n");
126 		/* use the same clock when work in loopback */
127 		qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
128 
129 		gumr = ioread32be(&priv->uf_regs->gumr);
130 		gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
131 			 UCC_FAST_GUMR_TCI);
132 		gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
133 		iowrite32be(gumr, &priv->uf_regs->gumr);
134 	}
135 
136 	/* Initialize SI */
137 	if (priv->tsa)
138 		ucc_tdm_init(priv->utdm, priv->ut_info);
139 
140 	/* Write to QE CECR, UCCx channel to Stop Transmission */
141 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
142 	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
143 			   QE_CR_PROTOCOL_UNSPECIFIED, 0);
144 
145 	/* Set UPSMR normal mode (need fixed)*/
146 	iowrite32be(0, &priv->uf_regs->upsmr);
147 
148 	/* hdlc_bus mode */
149 	if (priv->hdlc_bus) {
150 		u32 upsmr;
151 
152 		dev_info(priv->dev, "HDLC bus Mode\n");
153 		upsmr = ioread32be(&priv->uf_regs->upsmr);
154 
155 		/* bus mode and retransmit enable, with collision window
156 		 * set to 8 bytes
157 		 */
158 		upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
159 				UCC_HDLC_UPSMR_CW8;
160 		iowrite32be(upsmr, &priv->uf_regs->upsmr);
161 
162 		/* explicitly disable CDS & CTSP */
163 		gumr = ioread32be(&priv->uf_regs->gumr);
164 		gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
165 		/* set automatic sync to explicitly ignore CD signal */
166 		gumr |= UCC_FAST_GUMR_SYNL_AUTO;
167 		iowrite32be(gumr, &priv->uf_regs->gumr);
168 	}
169 
170 	priv->rx_ring_size = RX_BD_RING_LEN;
171 	priv->tx_ring_size = TX_BD_RING_LEN;
172 	/* Alloc Rx BD */
173 	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
174 			RX_BD_RING_LEN * sizeof(struct qe_bd),
175 			&priv->dma_rx_bd, GFP_KERNEL);
176 
177 	if (!priv->rx_bd_base) {
178 		dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
179 		ret = -ENOMEM;
180 		goto free_uccf;
181 	}
182 
183 	/* Alloc Tx BD */
184 	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
185 			TX_BD_RING_LEN * sizeof(struct qe_bd),
186 			&priv->dma_tx_bd, GFP_KERNEL);
187 
188 	if (!priv->tx_bd_base) {
189 		dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
190 		ret = -ENOMEM;
191 		goto free_rx_bd;
192 	}
193 
194 	/* Alloc parameter ram for ucc hdlc */
195 	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
196 				ALIGNMENT_OF_UCC_HDLC_PRAM);
197 
198 	if (priv->ucc_pram_offset < 0) {
199 		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
200 		ret = -ENOMEM;
201 		goto free_tx_bd;
202 	}
203 
204 	priv->rx_skbuff = kcalloc(priv->rx_ring_size,
205 				  sizeof(*priv->rx_skbuff),
206 				  GFP_KERNEL);
207 	if (!priv->rx_skbuff)
208 		goto free_ucc_pram;
209 
210 	priv->tx_skbuff = kcalloc(priv->tx_ring_size,
211 				  sizeof(*priv->tx_skbuff),
212 				  GFP_KERNEL);
213 	if (!priv->tx_skbuff)
214 		goto free_rx_skbuff;
215 
216 	priv->skb_curtx = 0;
217 	priv->skb_dirtytx = 0;
218 	priv->curtx_bd = priv->tx_bd_base;
219 	priv->dirty_tx = priv->tx_bd_base;
220 	priv->currx_bd = priv->rx_bd_base;
221 	priv->currx_bdnum = 0;
222 
223 	/* init parameter base */
224 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
225 	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
226 			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
227 
228 	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
229 					qe_muram_addr(priv->ucc_pram_offset);
230 
231 	/* Zero out parameter ram */
232 	memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
233 
234 	/* Alloc riptr, tiptr */
235 	riptr = qe_muram_alloc(32, 32);
236 	if (riptr < 0) {
237 		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
238 		ret = -ENOMEM;
239 		goto free_tx_skbuff;
240 	}
241 
242 	tiptr = qe_muram_alloc(32, 32);
243 	if (tiptr < 0) {
244 		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
245 		ret = -ENOMEM;
246 		goto free_riptr;
247 	}
248 	if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
249 		dev_err(priv->dev, "MURAM allocation out of addressable range\n");
250 		ret = -ENOMEM;
251 		goto free_tiptr;
252 	}
253 
254 	/* Set RIPTR, TIPTR */
255 	iowrite16be(riptr, &priv->ucc_pram->riptr);
256 	iowrite16be(tiptr, &priv->ucc_pram->tiptr);
257 
258 	/* Set MRBLR */
259 	iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
260 
261 	/* Set RBASE, TBASE */
262 	iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
263 	iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
264 
265 	/* Set RSTATE, TSTATE */
266 	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
267 	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
268 
269 	/* Set C_MASK, C_PRES for 16bit CRC */
270 	iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
271 	iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
272 
273 	iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
274 	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
275 	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
276 	iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
277 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
278 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
279 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
280 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
281 
282 	/* Get BD buffer */
283 	bd_buffer = dma_alloc_coherent(priv->dev,
284 				       (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
285 				       &bd_dma_addr, GFP_KERNEL);
286 
287 	if (!bd_buffer) {
288 		dev_err(priv->dev, "Could not allocate buffer descriptors\n");
289 		ret = -ENOMEM;
290 		goto free_tiptr;
291 	}
292 
293 	priv->rx_buffer = bd_buffer;
294 	priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
295 
296 	priv->dma_rx_addr = bd_dma_addr;
297 	priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
298 
299 	for (i = 0; i < RX_BD_RING_LEN; i++) {
300 		if (i < (RX_BD_RING_LEN - 1))
301 			bd_status = R_E_S | R_I_S;
302 		else
303 			bd_status = R_E_S | R_I_S | R_W_S;
304 
305 		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
306 		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
307 			    &priv->rx_bd_base[i].buf);
308 	}
309 
310 	for (i = 0; i < TX_BD_RING_LEN; i++) {
311 		if (i < (TX_BD_RING_LEN - 1))
312 			bd_status =  T_I_S | T_TC_S;
313 		else
314 			bd_status =  T_I_S | T_TC_S | T_W_S;
315 
316 		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
317 		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
318 			    &priv->tx_bd_base[i].buf);
319 	}
320 
321 	return 0;
322 
323 free_tiptr:
324 	qe_muram_free(tiptr);
325 free_riptr:
326 	qe_muram_free(riptr);
327 free_tx_skbuff:
328 	kfree(priv->tx_skbuff);
329 free_rx_skbuff:
330 	kfree(priv->rx_skbuff);
331 free_ucc_pram:
332 	qe_muram_free(priv->ucc_pram_offset);
333 free_tx_bd:
334 	dma_free_coherent(priv->dev,
335 			  TX_BD_RING_LEN * sizeof(struct qe_bd),
336 			  priv->tx_bd_base, priv->dma_tx_bd);
337 free_rx_bd:
338 	dma_free_coherent(priv->dev,
339 			  RX_BD_RING_LEN * sizeof(struct qe_bd),
340 			  priv->rx_bd_base, priv->dma_rx_bd);
341 free_uccf:
342 	ucc_fast_free(priv->uccf);
343 
344 	return ret;
345 }
346 
347 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
348 {
349 	hdlc_device *hdlc = dev_to_hdlc(dev);
350 	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
351 	struct qe_bd __iomem *bd;
352 	u16 bd_status;
353 	unsigned long flags;
354 	u16 *proto_head;
355 
356 	switch (dev->type) {
357 	case ARPHRD_RAWHDLC:
358 		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
359 			dev->stats.tx_dropped++;
360 			dev_kfree_skb(skb);
361 			netdev_err(dev, "No enough space for hdlc head\n");
362 			return -ENOMEM;
363 		}
364 
365 		skb_push(skb, HDLC_HEAD_LEN);
366 
367 		proto_head = (u16 *)skb->data;
368 		*proto_head = htons(DEFAULT_HDLC_HEAD);
369 
370 		dev->stats.tx_bytes += skb->len;
371 		break;
372 
373 	case ARPHRD_PPP:
374 		proto_head = (u16 *)skb->data;
375 		if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
376 			dev->stats.tx_dropped++;
377 			dev_kfree_skb(skb);
378 			netdev_err(dev, "Wrong ppp header\n");
379 			return -ENOMEM;
380 		}
381 
382 		dev->stats.tx_bytes += skb->len;
383 		break;
384 
385 	case ARPHRD_ETHER:
386 		dev->stats.tx_bytes += skb->len;
387 		break;
388 
389 	default:
390 		dev->stats.tx_dropped++;
391 		dev_kfree_skb(skb);
392 		return -ENOMEM;
393 	}
394 	netdev_sent_queue(dev, skb->len);
395 	spin_lock_irqsave(&priv->lock, flags);
396 
397 	/* Start from the next BD that should be filled */
398 	bd = priv->curtx_bd;
399 	bd_status = ioread16be(&bd->status);
400 	/* Save the skb pointer so we can free it later */
401 	priv->tx_skbuff[priv->skb_curtx] = skb;
402 
403 	/* Update the current skb pointer (wrapping if this was the last) */
404 	priv->skb_curtx =
405 	    (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
406 
407 	/* copy skb data to tx buffer for sdma processing */
408 	memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
409 	       skb->data, skb->len);
410 
411 	/* set bd status and length */
412 	bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
413 
414 	iowrite16be(skb->len, &bd->length);
415 	iowrite16be(bd_status, &bd->status);
416 
417 	/* Move to next BD in the ring */
418 	if (!(bd_status & T_W_S))
419 		bd += 1;
420 	else
421 		bd = priv->tx_bd_base;
422 
423 	if (bd == priv->dirty_tx) {
424 		if (!netif_queue_stopped(dev))
425 			netif_stop_queue(dev);
426 	}
427 
428 	priv->curtx_bd = bd;
429 
430 	spin_unlock_irqrestore(&priv->lock, flags);
431 
432 	return NETDEV_TX_OK;
433 }
434 
435 static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
436 {
437 	u32 cecr_subblock;
438 
439 	cecr_subblock =
440 		ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
441 
442 	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
443 		     QE_CR_PROTOCOL_UNSPECIFIED, 0);
444 	return 0;
445 }
446 
447 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
448 {
449 	/* Start from the next BD that should be filled */
450 	struct net_device *dev = priv->ndev;
451 	unsigned int bytes_sent = 0;
452 	int howmany = 0;
453 	struct qe_bd *bd;		/* BD pointer */
454 	u16 bd_status;
455 	int tx_restart = 0;
456 
457 	bd = priv->dirty_tx;
458 	bd_status = ioread16be(&bd->status);
459 
460 	/* Normal processing. */
461 	while ((bd_status & T_R_S) == 0) {
462 		struct sk_buff *skb;
463 
464 		if (bd_status & T_UN_S) { /* Underrun */
465 			dev->stats.tx_fifo_errors++;
466 			tx_restart = 1;
467 		}
468 		if (bd_status & T_CT_S) { /* Carrier lost */
469 			dev->stats.tx_carrier_errors++;
470 			tx_restart = 1;
471 		}
472 
473 		/* BD contains already transmitted buffer.   */
474 		/* Handle the transmitted buffer and release */
475 		/* the BD to be used with the current frame  */
476 
477 		skb = priv->tx_skbuff[priv->skb_dirtytx];
478 		if (!skb)
479 			break;
480 		howmany++;
481 		bytes_sent += skb->len;
482 		dev->stats.tx_packets++;
483 		memset(priv->tx_buffer +
484 		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
485 		       0, skb->len);
486 		dev_consume_skb_irq(skb);
487 
488 		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
489 		priv->skb_dirtytx =
490 		    (priv->skb_dirtytx +
491 		     1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
492 
493 		/* We freed a buffer, so now we can restart transmission */
494 		if (netif_queue_stopped(dev))
495 			netif_wake_queue(dev);
496 
497 		/* Advance the confirmation BD pointer */
498 		if (!(bd_status & T_W_S))
499 			bd += 1;
500 		else
501 			bd = priv->tx_bd_base;
502 		bd_status = ioread16be(&bd->status);
503 	}
504 	priv->dirty_tx = bd;
505 
506 	if (tx_restart)
507 		hdlc_tx_restart(priv);
508 
509 	netdev_completed_queue(dev, howmany, bytes_sent);
510 	return 0;
511 }
512 
513 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
514 {
515 	struct net_device *dev = priv->ndev;
516 	struct sk_buff *skb = NULL;
517 	hdlc_device *hdlc = dev_to_hdlc(dev);
518 	struct qe_bd *bd;
519 	u16 bd_status;
520 	u16 length, howmany = 0;
521 	u8 *bdbuffer;
522 
523 	bd = priv->currx_bd;
524 	bd_status = ioread16be(&bd->status);
525 
526 	/* while there are received buffers and BD is full (~R_E) */
527 	while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
528 		if (bd_status & (RX_BD_ERRORS)) {
529 			dev->stats.rx_errors++;
530 
531 			if (bd_status & R_CD_S)
532 				dev->stats.collisions++;
533 			if (bd_status & R_OV_S)
534 				dev->stats.rx_fifo_errors++;
535 			if (bd_status & R_CR_S)
536 				dev->stats.rx_crc_errors++;
537 			if (bd_status & R_AB_S)
538 				dev->stats.rx_over_errors++;
539 			if (bd_status & R_NO_S)
540 				dev->stats.rx_frame_errors++;
541 			if (bd_status & R_LG_S)
542 				dev->stats.rx_length_errors++;
543 
544 			goto recycle;
545 		}
546 		bdbuffer = priv->rx_buffer +
547 			(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
548 		length = ioread16be(&bd->length);
549 
550 		switch (dev->type) {
551 		case ARPHRD_RAWHDLC:
552 			bdbuffer += HDLC_HEAD_LEN;
553 			length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
554 
555 			skb = dev_alloc_skb(length);
556 			if (!skb) {
557 				dev->stats.rx_dropped++;
558 				return -ENOMEM;
559 			}
560 
561 			skb_put(skb, length);
562 			skb->len = length;
563 			skb->dev = dev;
564 			memcpy(skb->data, bdbuffer, length);
565 			break;
566 
567 		case ARPHRD_PPP:
568 		case ARPHRD_ETHER:
569 			length -= HDLC_CRC_SIZE;
570 
571 			skb = dev_alloc_skb(length);
572 			if (!skb) {
573 				dev->stats.rx_dropped++;
574 				return -ENOMEM;
575 			}
576 
577 			skb_put(skb, length);
578 			skb->len = length;
579 			skb->dev = dev;
580 			memcpy(skb->data, bdbuffer, length);
581 			break;
582 		}
583 
584 		dev->stats.rx_packets++;
585 		dev->stats.rx_bytes += skb->len;
586 		howmany++;
587 		if (hdlc->proto)
588 			skb->protocol = hdlc_type_trans(skb, dev);
589 		netif_receive_skb(skb);
590 
591 recycle:
592 		iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
593 
594 		/* update to point at the next bd */
595 		if (bd_status & R_W_S) {
596 			priv->currx_bdnum = 0;
597 			bd = priv->rx_bd_base;
598 		} else {
599 			if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
600 				priv->currx_bdnum += 1;
601 			else
602 				priv->currx_bdnum = RX_BD_RING_LEN - 1;
603 
604 			bd += 1;
605 		}
606 
607 		bd_status = ioread16be(&bd->status);
608 	}
609 
610 	priv->currx_bd = bd;
611 	return howmany;
612 }
613 
614 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
615 {
616 	struct ucc_hdlc_private *priv = container_of(napi,
617 						     struct ucc_hdlc_private,
618 						     napi);
619 	int howmany;
620 
621 	/* Tx event processing */
622 	spin_lock(&priv->lock);
623 	hdlc_tx_done(priv);
624 	spin_unlock(&priv->lock);
625 
626 	howmany = 0;
627 	howmany += hdlc_rx_done(priv, budget - howmany);
628 
629 	if (howmany < budget) {
630 		napi_complete_done(napi, howmany);
631 		qe_setbits_be32(priv->uccf->p_uccm,
632 				(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
633 	}
634 
635 	return howmany;
636 }
637 
638 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
639 {
640 	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
641 	struct net_device *dev = priv->ndev;
642 	struct ucc_fast_private *uccf;
643 	u32 ucce;
644 	u32 uccm;
645 
646 	uccf = priv->uccf;
647 
648 	ucce = ioread32be(uccf->p_ucce);
649 	uccm = ioread32be(uccf->p_uccm);
650 	ucce &= uccm;
651 	iowrite32be(ucce, uccf->p_ucce);
652 	if (!ucce)
653 		return IRQ_NONE;
654 
655 	if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
656 		if (napi_schedule_prep(&priv->napi)) {
657 			uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
658 				  << 16);
659 			iowrite32be(uccm, uccf->p_uccm);
660 			__napi_schedule(&priv->napi);
661 		}
662 	}
663 
664 	/* Errors and other events */
665 	if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
666 		dev->stats.rx_missed_errors++;
667 	if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
668 		dev->stats.tx_errors++;
669 
670 	return IRQ_HANDLED;
671 }
672 
673 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
674 {
675 	const size_t size = sizeof(te1_settings);
676 	te1_settings line;
677 	struct ucc_hdlc_private *priv = netdev_priv(dev);
678 
679 	if (cmd != SIOCWANDEV)
680 		return hdlc_ioctl(dev, ifr, cmd);
681 
682 	switch (ifr->ifr_settings.type) {
683 	case IF_GET_IFACE:
684 		ifr->ifr_settings.type = IF_IFACE_E1;
685 		if (ifr->ifr_settings.size < size) {
686 			ifr->ifr_settings.size = size; /* data size wanted */
687 			return -ENOBUFS;
688 		}
689 		memset(&line, 0, sizeof(line));
690 		line.clock_type = priv->clocking;
691 
692 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
693 			return -EFAULT;
694 		return 0;
695 
696 	default:
697 		return hdlc_ioctl(dev, ifr, cmd);
698 	}
699 }
700 
701 static int uhdlc_open(struct net_device *dev)
702 {
703 	u32 cecr_subblock;
704 	hdlc_device *hdlc = dev_to_hdlc(dev);
705 	struct ucc_hdlc_private *priv = hdlc->priv;
706 	struct ucc_tdm *utdm = priv->utdm;
707 
708 	if (priv->hdlc_busy != 1) {
709 		if (request_irq(priv->ut_info->uf_info.irq,
710 				ucc_hdlc_irq_handler, 0, "hdlc", priv))
711 			return -ENODEV;
712 
713 		cecr_subblock = ucc_fast_get_qe_cr_subblock(
714 					priv->ut_info->uf_info.ucc_num);
715 
716 		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
717 			     QE_CR_PROTOCOL_UNSPECIFIED, 0);
718 
719 		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
720 
721 		/* Enable the TDM port */
722 		if (priv->tsa)
723 			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
724 
725 		priv->hdlc_busy = 1;
726 		netif_device_attach(priv->ndev);
727 		napi_enable(&priv->napi);
728 		netdev_reset_queue(dev);
729 		netif_start_queue(dev);
730 		hdlc_open(dev);
731 	}
732 
733 	return 0;
734 }
735 
736 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
737 {
738 	qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
739 	qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
740 
741 	if (priv->rx_bd_base) {
742 		dma_free_coherent(priv->dev,
743 				  RX_BD_RING_LEN * sizeof(struct qe_bd),
744 				  priv->rx_bd_base, priv->dma_rx_bd);
745 
746 		priv->rx_bd_base = NULL;
747 		priv->dma_rx_bd = 0;
748 	}
749 
750 	if (priv->tx_bd_base) {
751 		dma_free_coherent(priv->dev,
752 				  TX_BD_RING_LEN * sizeof(struct qe_bd),
753 				  priv->tx_bd_base, priv->dma_tx_bd);
754 
755 		priv->tx_bd_base = NULL;
756 		priv->dma_tx_bd = 0;
757 	}
758 
759 	if (priv->ucc_pram) {
760 		qe_muram_free(priv->ucc_pram_offset);
761 		priv->ucc_pram = NULL;
762 		priv->ucc_pram_offset = 0;
763 	 }
764 
765 	kfree(priv->rx_skbuff);
766 	priv->rx_skbuff = NULL;
767 
768 	kfree(priv->tx_skbuff);
769 	priv->tx_skbuff = NULL;
770 
771 	if (priv->uf_regs) {
772 		iounmap(priv->uf_regs);
773 		priv->uf_regs = NULL;
774 	}
775 
776 	if (priv->uccf) {
777 		ucc_fast_free(priv->uccf);
778 		priv->uccf = NULL;
779 	}
780 
781 	if (priv->rx_buffer) {
782 		dma_free_coherent(priv->dev,
783 				  RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
784 				  priv->rx_buffer, priv->dma_rx_addr);
785 		priv->rx_buffer = NULL;
786 		priv->dma_rx_addr = 0;
787 	}
788 
789 	if (priv->tx_buffer) {
790 		dma_free_coherent(priv->dev,
791 				  TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
792 				  priv->tx_buffer, priv->dma_tx_addr);
793 		priv->tx_buffer = NULL;
794 		priv->dma_tx_addr = 0;
795 	}
796 }
797 
798 static int uhdlc_close(struct net_device *dev)
799 {
800 	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
801 	struct ucc_tdm *utdm = priv->utdm;
802 	u32 cecr_subblock;
803 
804 	napi_disable(&priv->napi);
805 	cecr_subblock = ucc_fast_get_qe_cr_subblock(
806 				priv->ut_info->uf_info.ucc_num);
807 
808 	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
809 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
810 	qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
811 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
812 
813 	if (priv->tsa)
814 		utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
815 
816 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
817 
818 	free_irq(priv->ut_info->uf_info.irq, priv);
819 	netif_stop_queue(dev);
820 	netdev_reset_queue(dev);
821 	priv->hdlc_busy = 0;
822 
823 	return 0;
824 }
825 
826 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
827 			   unsigned short parity)
828 {
829 	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
830 
831 	if (encoding != ENCODING_NRZ &&
832 	    encoding != ENCODING_NRZI)
833 		return -EINVAL;
834 
835 	if (parity != PARITY_NONE &&
836 	    parity != PARITY_CRC32_PR1_CCITT &&
837 	    parity != PARITY_CRC16_PR0_CCITT &&
838 	    parity != PARITY_CRC16_PR1_CCITT)
839 		return -EINVAL;
840 
841 	priv->encoding = encoding;
842 	priv->parity = parity;
843 
844 	return 0;
845 }
846 
847 #ifdef CONFIG_PM
848 static void store_clk_config(struct ucc_hdlc_private *priv)
849 {
850 	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
851 
852 	/* store si clk */
853 	priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
854 	priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
855 
856 	/* store si sync */
857 	priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
858 
859 	/* store ucc clk */
860 	memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
861 }
862 
863 static void resume_clk_config(struct ucc_hdlc_private *priv)
864 {
865 	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
866 
867 	memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
868 
869 	iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
870 	iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
871 
872 	iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
873 }
874 
875 static int uhdlc_suspend(struct device *dev)
876 {
877 	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
878 	struct ucc_fast __iomem *uf_regs;
879 
880 	if (!priv)
881 		return -EINVAL;
882 
883 	if (!netif_running(priv->ndev))
884 		return 0;
885 
886 	netif_device_detach(priv->ndev);
887 	napi_disable(&priv->napi);
888 
889 	uf_regs = priv->uf_regs;
890 
891 	/* backup gumr guemr*/
892 	priv->gumr = ioread32be(&uf_regs->gumr);
893 	priv->guemr = ioread8(&uf_regs->guemr);
894 
895 	priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
896 					GFP_KERNEL);
897 	if (!priv->ucc_pram_bak)
898 		return -ENOMEM;
899 
900 	/* backup HDLC parameter */
901 	memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
902 		      sizeof(struct ucc_hdlc_param));
903 
904 	/* store the clk configuration */
905 	store_clk_config(priv);
906 
907 	/* save power */
908 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
909 
910 	return 0;
911 }
912 
913 static int uhdlc_resume(struct device *dev)
914 {
915 	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
916 	struct ucc_tdm *utdm;
917 	struct ucc_tdm_info *ut_info;
918 	struct ucc_fast __iomem *uf_regs;
919 	struct ucc_fast_private *uccf;
920 	struct ucc_fast_info *uf_info;
921 	int i;
922 	u32 cecr_subblock;
923 	u16 bd_status;
924 
925 	if (!priv)
926 		return -EINVAL;
927 
928 	if (!netif_running(priv->ndev))
929 		return 0;
930 
931 	utdm = priv->utdm;
932 	ut_info = priv->ut_info;
933 	uf_info = &ut_info->uf_info;
934 	uf_regs = priv->uf_regs;
935 	uccf = priv->uccf;
936 
937 	/* restore gumr guemr */
938 	iowrite8(priv->guemr, &uf_regs->guemr);
939 	iowrite32be(priv->gumr, &uf_regs->gumr);
940 
941 	/* Set Virtual Fifo registers */
942 	iowrite16be(uf_info->urfs, &uf_regs->urfs);
943 	iowrite16be(uf_info->urfet, &uf_regs->urfet);
944 	iowrite16be(uf_info->urfset, &uf_regs->urfset);
945 	iowrite16be(uf_info->utfs, &uf_regs->utfs);
946 	iowrite16be(uf_info->utfet, &uf_regs->utfet);
947 	iowrite16be(uf_info->utftt, &uf_regs->utftt);
948 	/* utfb, urfb are offsets from MURAM base */
949 	iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
950 	iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
951 
952 	/* Rx Tx and sync clock routing */
953 	resume_clk_config(priv);
954 
955 	iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
956 	iowrite32be(0xffffffff, &uf_regs->ucce);
957 
958 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
959 
960 	/* rebuild SIRAM */
961 	if (priv->tsa)
962 		ucc_tdm_init(priv->utdm, priv->ut_info);
963 
964 	/* Write to QE CECR, UCCx channel to Stop Transmission */
965 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
966 	qe_issue_cmd(QE_STOP_TX, cecr_subblock,
967 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
968 
969 	/* Set UPSMR normal mode */
970 	iowrite32be(0, &uf_regs->upsmr);
971 
972 	/* init parameter base */
973 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
974 	qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
975 		     QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
976 
977 	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
978 				qe_muram_addr(priv->ucc_pram_offset);
979 
980 	/* restore ucc parameter */
981 	memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
982 		    sizeof(struct ucc_hdlc_param));
983 	kfree(priv->ucc_pram_bak);
984 
985 	/* rebuild BD entry */
986 	for (i = 0; i < RX_BD_RING_LEN; i++) {
987 		if (i < (RX_BD_RING_LEN - 1))
988 			bd_status = R_E_S | R_I_S;
989 		else
990 			bd_status = R_E_S | R_I_S | R_W_S;
991 
992 		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
993 		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
994 			    &priv->rx_bd_base[i].buf);
995 	}
996 
997 	for (i = 0; i < TX_BD_RING_LEN; i++) {
998 		if (i < (TX_BD_RING_LEN - 1))
999 			bd_status =  T_I_S | T_TC_S;
1000 		else
1001 			bd_status =  T_I_S | T_TC_S | T_W_S;
1002 
1003 		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1004 		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1005 			    &priv->tx_bd_base[i].buf);
1006 	}
1007 
1008 	/* if hdlc is busy enable TX and RX */
1009 	if (priv->hdlc_busy == 1) {
1010 		cecr_subblock = ucc_fast_get_qe_cr_subblock(
1011 					priv->ut_info->uf_info.ucc_num);
1012 
1013 		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1014 			     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1015 
1016 		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1017 
1018 		/* Enable the TDM port */
1019 		if (priv->tsa)
1020 			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1021 	}
1022 
1023 	napi_enable(&priv->napi);
1024 	netif_device_attach(priv->ndev);
1025 
1026 	return 0;
1027 }
1028 
1029 static const struct dev_pm_ops uhdlc_pm_ops = {
1030 	.suspend = uhdlc_suspend,
1031 	.resume = uhdlc_resume,
1032 	.freeze = uhdlc_suspend,
1033 	.thaw = uhdlc_resume,
1034 };
1035 
1036 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1037 
1038 #else
1039 
1040 #define HDLC_PM_OPS NULL
1041 
1042 #endif
1043 static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1044 {
1045 	netdev_err(ndev, "%s\n", __func__);
1046 }
1047 
1048 static const struct net_device_ops uhdlc_ops = {
1049 	.ndo_open       = uhdlc_open,
1050 	.ndo_stop       = uhdlc_close,
1051 	.ndo_start_xmit = hdlc_start_xmit,
1052 	.ndo_do_ioctl   = uhdlc_ioctl,
1053 	.ndo_tx_timeout	= uhdlc_tx_timeout,
1054 };
1055 
1056 static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1057 {
1058 	struct device_node *np;
1059 	struct platform_device *pdev;
1060 	struct resource *res;
1061 	static int siram_init_flag;
1062 	int ret = 0;
1063 
1064 	np = of_find_compatible_node(NULL, NULL, name);
1065 	if (!np)
1066 		return -EINVAL;
1067 
1068 	pdev = of_find_device_by_node(np);
1069 	if (!pdev) {
1070 		pr_err("%pOFn: failed to lookup pdev\n", np);
1071 		of_node_put(np);
1072 		return -EINVAL;
1073 	}
1074 
1075 	of_node_put(np);
1076 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1077 	if (!res) {
1078 		ret = -EINVAL;
1079 		goto error_put_device;
1080 	}
1081 	*ptr = ioremap(res->start, resource_size(res));
1082 	if (!*ptr) {
1083 		ret = -ENOMEM;
1084 		goto error_put_device;
1085 	}
1086 
1087 	/* We've remapped the addresses, and we don't need the device any
1088 	 * more, so we should release it.
1089 	 */
1090 	put_device(&pdev->dev);
1091 
1092 	if (init_flag && siram_init_flag == 0) {
1093 		memset_io(*ptr, 0, resource_size(res));
1094 		siram_init_flag = 1;
1095 	}
1096 	return  0;
1097 
1098 error_put_device:
1099 	put_device(&pdev->dev);
1100 
1101 	return ret;
1102 }
1103 
1104 static int ucc_hdlc_probe(struct platform_device *pdev)
1105 {
1106 	struct device_node *np = pdev->dev.of_node;
1107 	struct ucc_hdlc_private *uhdlc_priv = NULL;
1108 	struct ucc_tdm_info *ut_info;
1109 	struct ucc_tdm *utdm = NULL;
1110 	struct resource res;
1111 	struct net_device *dev;
1112 	hdlc_device *hdlc;
1113 	int ucc_num;
1114 	const char *sprop;
1115 	int ret;
1116 	u32 val;
1117 
1118 	ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1119 	if (ret) {
1120 		dev_err(&pdev->dev, "Invalid ucc property\n");
1121 		return -ENODEV;
1122 	}
1123 
1124 	ucc_num = val - 1;
1125 	if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1126 		dev_err(&pdev->dev, ": Invalid UCC num\n");
1127 		return -EINVAL;
1128 	}
1129 
1130 	memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1131 	       sizeof(utdm_primary_info));
1132 
1133 	ut_info = &utdm_info[ucc_num];
1134 	ut_info->uf_info.ucc_num = ucc_num;
1135 
1136 	sprop = of_get_property(np, "rx-clock-name", NULL);
1137 	if (sprop) {
1138 		ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1139 		if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1140 		    (ut_info->uf_info.rx_clock > QE_CLK24)) {
1141 			dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1142 			return -EINVAL;
1143 		}
1144 	} else {
1145 		dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1146 		return -EINVAL;
1147 	}
1148 
1149 	sprop = of_get_property(np, "tx-clock-name", NULL);
1150 	if (sprop) {
1151 		ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1152 		if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1153 		    (ut_info->uf_info.tx_clock > QE_CLK24)) {
1154 			dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1155 			return -EINVAL;
1156 		}
1157 	} else {
1158 		dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1159 		return -EINVAL;
1160 	}
1161 
1162 	ret = of_address_to_resource(np, 0, &res);
1163 	if (ret)
1164 		return -EINVAL;
1165 
1166 	ut_info->uf_info.regs = res.start;
1167 	ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1168 
1169 	uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1170 	if (!uhdlc_priv) {
1171 		return -ENOMEM;
1172 	}
1173 
1174 	dev_set_drvdata(&pdev->dev, uhdlc_priv);
1175 	uhdlc_priv->dev = &pdev->dev;
1176 	uhdlc_priv->ut_info = ut_info;
1177 
1178 	if (of_get_property(np, "fsl,tdm-interface", NULL))
1179 		uhdlc_priv->tsa = 1;
1180 
1181 	if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1182 		uhdlc_priv->loopback = 1;
1183 
1184 	if (of_get_property(np, "fsl,hdlc-bus", NULL))
1185 		uhdlc_priv->hdlc_bus = 1;
1186 
1187 	if (uhdlc_priv->tsa == 1) {
1188 		utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1189 		if (!utdm) {
1190 			ret = -ENOMEM;
1191 			dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1192 			goto free_uhdlc_priv;
1193 		}
1194 		uhdlc_priv->utdm = utdm;
1195 		ret = ucc_of_parse_tdm(np, utdm, ut_info);
1196 		if (ret)
1197 			goto free_utdm;
1198 
1199 		ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1200 				     (void __iomem **)&utdm->si_regs);
1201 		if (ret)
1202 			goto free_utdm;
1203 		ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1204 				     (void __iomem **)&utdm->siram);
1205 		if (ret)
1206 			goto unmap_si_regs;
1207 	}
1208 
1209 	if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1210 		uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1211 
1212 	ret = uhdlc_init(uhdlc_priv);
1213 	if (ret) {
1214 		dev_err(&pdev->dev, "Failed to init uhdlc\n");
1215 		goto undo_uhdlc_init;
1216 	}
1217 
1218 	dev = alloc_hdlcdev(uhdlc_priv);
1219 	if (!dev) {
1220 		ret = -ENOMEM;
1221 		pr_err("ucc_hdlc: unable to allocate memory\n");
1222 		goto undo_uhdlc_init;
1223 	}
1224 
1225 	uhdlc_priv->ndev = dev;
1226 	hdlc = dev_to_hdlc(dev);
1227 	dev->tx_queue_len = 16;
1228 	dev->netdev_ops = &uhdlc_ops;
1229 	dev->watchdog_timeo = 2 * HZ;
1230 	hdlc->attach = ucc_hdlc_attach;
1231 	hdlc->xmit = ucc_hdlc_tx;
1232 	netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1233 	if (register_hdlc_device(dev)) {
1234 		ret = -ENOBUFS;
1235 		pr_err("ucc_hdlc: unable to register hdlc device\n");
1236 		goto free_dev;
1237 	}
1238 
1239 	return 0;
1240 
1241 free_dev:
1242 	free_netdev(dev);
1243 undo_uhdlc_init:
1244 	iounmap(utdm->siram);
1245 unmap_si_regs:
1246 	iounmap(utdm->si_regs);
1247 free_utdm:
1248 	if (uhdlc_priv->tsa)
1249 		kfree(utdm);
1250 free_uhdlc_priv:
1251 	kfree(uhdlc_priv);
1252 	return ret;
1253 }
1254 
1255 static int ucc_hdlc_remove(struct platform_device *pdev)
1256 {
1257 	struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1258 
1259 	uhdlc_memclean(priv);
1260 
1261 	if (priv->utdm->si_regs) {
1262 		iounmap(priv->utdm->si_regs);
1263 		priv->utdm->si_regs = NULL;
1264 	}
1265 
1266 	if (priv->utdm->siram) {
1267 		iounmap(priv->utdm->siram);
1268 		priv->utdm->siram = NULL;
1269 	}
1270 	kfree(priv);
1271 
1272 	dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1273 
1274 	return 0;
1275 }
1276 
1277 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1278 	{
1279 	.compatible = "fsl,ucc-hdlc",
1280 	},
1281 	{},
1282 };
1283 
1284 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1285 
1286 static struct platform_driver ucc_hdlc_driver = {
1287 	.probe	= ucc_hdlc_probe,
1288 	.remove	= ucc_hdlc_remove,
1289 	.driver	= {
1290 		.name		= DRV_NAME,
1291 		.pm		= HDLC_PM_OPS,
1292 		.of_match_table	= fsl_ucc_hdlc_of_match,
1293 	},
1294 };
1295 
1296 module_platform_driver(ucc_hdlc_driver);
1297 MODULE_LICENSE("GPL");
1298 MODULE_DESCRIPTION(DRV_DESC);
1299