xref: /openbmc/linux/drivers/net/wan/fsl_ucc_hdlc.c (revision dd2934a95701576203b2f61e8ded4e4a2f9183ea)
1 /* Freescale QUICC Engine HDLC Device Driver
2  *
3  * Copyright 2016 Freescale Semiconductor Inc.
4  *
5  * This program is free software; you can redistribute  it and/or modify it
6  * under  the terms of  the GNU General  Public License as published by the
7  * Free Software Foundation;  either version 2 of the  License, or (at your
8  * option) any later version.
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
32 
33 #include "fsl_ucc_hdlc.h"
34 
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
37 
38 #define TDM_PPPOHT_SLIC_MAXIN
39 
40 static struct ucc_tdm_info utdm_primary_info = {
41 	.uf_info = {
42 		.tsa = 0,
43 		.cdp = 0,
44 		.cds = 1,
45 		.ctsp = 1,
46 		.ctss = 1,
47 		.revd = 0,
48 		.urfs = 256,
49 		.utfs = 256,
50 		.urfet = 128,
51 		.urfset = 192,
52 		.utfet = 128,
53 		.utftt = 0x40,
54 		.ufpt = 256,
55 		.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56 		.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57 		.tenc = UCC_FAST_TX_ENCODING_NRZ,
58 		.renc = UCC_FAST_RX_ENCODING_NRZ,
59 		.tcrc = UCC_FAST_16_BIT_CRC,
60 		.synl = UCC_FAST_SYNC_LEN_NOT_USED,
61 	},
62 
63 	.si_info = {
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
65 		.simr_rfsd = 1,
66 		.simr_tfsd = 2,
67 #else
68 		.simr_rfsd = 0,
69 		.simr_tfsd = 0,
70 #endif
71 		.simr_crt = 0,
72 		.simr_sl = 0,
73 		.simr_ce = 1,
74 		.simr_fe = 1,
75 		.simr_gm = 0,
76 	},
77 };
78 
79 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
80 
81 static int uhdlc_init(struct ucc_hdlc_private *priv)
82 {
83 	struct ucc_tdm_info *ut_info;
84 	struct ucc_fast_info *uf_info;
85 	u32 cecr_subblock;
86 	u16 bd_status;
87 	int ret, i;
88 	void *bd_buffer;
89 	dma_addr_t bd_dma_addr;
90 	u32 riptr;
91 	u32 tiptr;
92 	u32 gumr;
93 
94 	ut_info = priv->ut_info;
95 	uf_info = &ut_info->uf_info;
96 
97 	if (priv->tsa) {
98 		uf_info->tsa = 1;
99 		uf_info->ctsp = 1;
100 		uf_info->cds = 1;
101 		uf_info->ctss = 1;
102 	} else {
103 		uf_info->cds = 0;
104 		uf_info->ctsp = 0;
105 		uf_info->ctss = 0;
106 	}
107 
108 	/* This sets HPM register in CMXUCR register which configures a
109 	 * open drain connected HDLC bus
110 	 */
111 	if (priv->hdlc_bus)
112 		uf_info->brkpt_support = 1;
113 
114 	uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
115 				UCC_HDLC_UCCE_TXB) << 16);
116 
117 	ret = ucc_fast_init(uf_info, &priv->uccf);
118 	if (ret) {
119 		dev_err(priv->dev, "Failed to init uccf.");
120 		return ret;
121 	}
122 
123 	priv->uf_regs = priv->uccf->uf_regs;
124 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
125 
126 	/* Loopback mode */
127 	if (priv->loopback) {
128 		dev_info(priv->dev, "Loopback Mode\n");
129 		/* use the same clock when work in loopback */
130 		qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
131 
132 		gumr = ioread32be(&priv->uf_regs->gumr);
133 		gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
134 			 UCC_FAST_GUMR_TCI);
135 		gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
136 		iowrite32be(gumr, &priv->uf_regs->gumr);
137 	}
138 
139 	/* Initialize SI */
140 	if (priv->tsa)
141 		ucc_tdm_init(priv->utdm, priv->ut_info);
142 
143 	/* Write to QE CECR, UCCx channel to Stop Transmission */
144 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
145 	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
146 			   QE_CR_PROTOCOL_UNSPECIFIED, 0);
147 
148 	/* Set UPSMR normal mode (need fixed)*/
149 	iowrite32be(0, &priv->uf_regs->upsmr);
150 
151 	/* hdlc_bus mode */
152 	if (priv->hdlc_bus) {
153 		u32 upsmr;
154 
155 		dev_info(priv->dev, "HDLC bus Mode\n");
156 		upsmr = ioread32be(&priv->uf_regs->upsmr);
157 
158 		/* bus mode and retransmit enable, with collision window
159 		 * set to 8 bytes
160 		 */
161 		upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
162 				UCC_HDLC_UPSMR_CW8;
163 		iowrite32be(upsmr, &priv->uf_regs->upsmr);
164 
165 		/* explicitly disable CDS & CTSP */
166 		gumr = ioread32be(&priv->uf_regs->gumr);
167 		gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
168 		/* set automatic sync to explicitly ignore CD signal */
169 		gumr |= UCC_FAST_GUMR_SYNL_AUTO;
170 		iowrite32be(gumr, &priv->uf_regs->gumr);
171 	}
172 
173 	priv->rx_ring_size = RX_BD_RING_LEN;
174 	priv->tx_ring_size = TX_BD_RING_LEN;
175 	/* Alloc Rx BD */
176 	priv->rx_bd_base = dma_alloc_coherent(priv->dev,
177 			RX_BD_RING_LEN * sizeof(struct qe_bd),
178 			&priv->dma_rx_bd, GFP_KERNEL);
179 
180 	if (!priv->rx_bd_base) {
181 		dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
182 		ret = -ENOMEM;
183 		goto free_uccf;
184 	}
185 
186 	/* Alloc Tx BD */
187 	priv->tx_bd_base = dma_alloc_coherent(priv->dev,
188 			TX_BD_RING_LEN * sizeof(struct qe_bd),
189 			&priv->dma_tx_bd, GFP_KERNEL);
190 
191 	if (!priv->tx_bd_base) {
192 		dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
193 		ret = -ENOMEM;
194 		goto free_rx_bd;
195 	}
196 
197 	/* Alloc parameter ram for ucc hdlc */
198 	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
199 				ALIGNMENT_OF_UCC_HDLC_PRAM);
200 
201 	if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
202 		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
203 		ret = -ENOMEM;
204 		goto free_tx_bd;
205 	}
206 
207 	priv->rx_skbuff = kcalloc(priv->rx_ring_size,
208 				  sizeof(*priv->rx_skbuff),
209 				  GFP_KERNEL);
210 	if (!priv->rx_skbuff)
211 		goto free_ucc_pram;
212 
213 	priv->tx_skbuff = kcalloc(priv->tx_ring_size,
214 				  sizeof(*priv->tx_skbuff),
215 				  GFP_KERNEL);
216 	if (!priv->tx_skbuff)
217 		goto free_rx_skbuff;
218 
219 	priv->skb_curtx = 0;
220 	priv->skb_dirtytx = 0;
221 	priv->curtx_bd = priv->tx_bd_base;
222 	priv->dirty_tx = priv->tx_bd_base;
223 	priv->currx_bd = priv->rx_bd_base;
224 	priv->currx_bdnum = 0;
225 
226 	/* init parameter base */
227 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
228 	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
229 			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
230 
231 	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
232 					qe_muram_addr(priv->ucc_pram_offset);
233 
234 	/* Zero out parameter ram */
235 	memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
236 
237 	/* Alloc riptr, tiptr */
238 	riptr = qe_muram_alloc(32, 32);
239 	if (IS_ERR_VALUE(riptr)) {
240 		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
241 		ret = -ENOMEM;
242 		goto free_tx_skbuff;
243 	}
244 
245 	tiptr = qe_muram_alloc(32, 32);
246 	if (IS_ERR_VALUE(tiptr)) {
247 		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
248 		ret = -ENOMEM;
249 		goto free_riptr;
250 	}
251 
252 	/* Set RIPTR, TIPTR */
253 	iowrite16be(riptr, &priv->ucc_pram->riptr);
254 	iowrite16be(tiptr, &priv->ucc_pram->tiptr);
255 
256 	/* Set MRBLR */
257 	iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
258 
259 	/* Set RBASE, TBASE */
260 	iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
261 	iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
262 
263 	/* Set RSTATE, TSTATE */
264 	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
265 	iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
266 
267 	/* Set C_MASK, C_PRES for 16bit CRC */
268 	iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
269 	iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
270 
271 	iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
272 	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
273 	iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
274 	iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
275 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
276 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
277 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
278 	iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
279 
280 	/* Get BD buffer */
281 	bd_buffer = dma_zalloc_coherent(priv->dev,
282 					(RX_BD_RING_LEN + TX_BD_RING_LEN) *
283 					MAX_RX_BUF_LENGTH,
284 					&bd_dma_addr, GFP_KERNEL);
285 
286 	if (!bd_buffer) {
287 		dev_err(priv->dev, "Could not allocate buffer descriptors\n");
288 		ret = -ENOMEM;
289 		goto free_tiptr;
290 	}
291 
292 	priv->rx_buffer = bd_buffer;
293 	priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
294 
295 	priv->dma_rx_addr = bd_dma_addr;
296 	priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
297 
298 	for (i = 0; i < RX_BD_RING_LEN; i++) {
299 		if (i < (RX_BD_RING_LEN - 1))
300 			bd_status = R_E_S | R_I_S;
301 		else
302 			bd_status = R_E_S | R_I_S | R_W_S;
303 
304 		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
305 		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
306 			    &priv->rx_bd_base[i].buf);
307 	}
308 
309 	for (i = 0; i < TX_BD_RING_LEN; i++) {
310 		if (i < (TX_BD_RING_LEN - 1))
311 			bd_status =  T_I_S | T_TC_S;
312 		else
313 			bd_status =  T_I_S | T_TC_S | T_W_S;
314 
315 		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
316 		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
317 			    &priv->tx_bd_base[i].buf);
318 	}
319 
320 	return 0;
321 
322 free_tiptr:
323 	qe_muram_free(tiptr);
324 free_riptr:
325 	qe_muram_free(riptr);
326 free_tx_skbuff:
327 	kfree(priv->tx_skbuff);
328 free_rx_skbuff:
329 	kfree(priv->rx_skbuff);
330 free_ucc_pram:
331 	qe_muram_free(priv->ucc_pram_offset);
332 free_tx_bd:
333 	dma_free_coherent(priv->dev,
334 			  TX_BD_RING_LEN * sizeof(struct qe_bd),
335 			  priv->tx_bd_base, priv->dma_tx_bd);
336 free_rx_bd:
337 	dma_free_coherent(priv->dev,
338 			  RX_BD_RING_LEN * sizeof(struct qe_bd),
339 			  priv->rx_bd_base, priv->dma_rx_bd);
340 free_uccf:
341 	ucc_fast_free(priv->uccf);
342 
343 	return ret;
344 }
345 
346 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
347 {
348 	hdlc_device *hdlc = dev_to_hdlc(dev);
349 	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
350 	struct qe_bd __iomem *bd;
351 	u16 bd_status;
352 	unsigned long flags;
353 	u16 *proto_head;
354 
355 	switch (dev->type) {
356 	case ARPHRD_RAWHDLC:
357 		if (skb_headroom(skb) < HDLC_HEAD_LEN) {
358 			dev->stats.tx_dropped++;
359 			dev_kfree_skb(skb);
360 			netdev_err(dev, "No enough space for hdlc head\n");
361 			return -ENOMEM;
362 		}
363 
364 		skb_push(skb, HDLC_HEAD_LEN);
365 
366 		proto_head = (u16 *)skb->data;
367 		*proto_head = htons(DEFAULT_HDLC_HEAD);
368 
369 		dev->stats.tx_bytes += skb->len;
370 		break;
371 
372 	case ARPHRD_PPP:
373 		proto_head = (u16 *)skb->data;
374 		if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
375 			dev->stats.tx_dropped++;
376 			dev_kfree_skb(skb);
377 			netdev_err(dev, "Wrong ppp header\n");
378 			return -ENOMEM;
379 		}
380 
381 		dev->stats.tx_bytes += skb->len;
382 		break;
383 
384 	case ARPHRD_ETHER:
385 		dev->stats.tx_bytes += skb->len;
386 		break;
387 
388 	default:
389 		dev->stats.tx_dropped++;
390 		dev_kfree_skb(skb);
391 		return -ENOMEM;
392 	}
393 	spin_lock_irqsave(&priv->lock, flags);
394 
395 	/* Start from the next BD that should be filled */
396 	bd = priv->curtx_bd;
397 	bd_status = ioread16be(&bd->status);
398 	/* Save the skb pointer so we can free it later */
399 	priv->tx_skbuff[priv->skb_curtx] = skb;
400 
401 	/* Update the current skb pointer (wrapping if this was the last) */
402 	priv->skb_curtx =
403 	    (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
404 
405 	/* copy skb data to tx buffer for sdma processing */
406 	memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
407 	       skb->data, skb->len);
408 
409 	/* set bd status and length */
410 	bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
411 
412 	iowrite16be(skb->len, &bd->length);
413 	iowrite16be(bd_status, &bd->status);
414 
415 	/* Move to next BD in the ring */
416 	if (!(bd_status & T_W_S))
417 		bd += 1;
418 	else
419 		bd = priv->tx_bd_base;
420 
421 	if (bd == priv->dirty_tx) {
422 		if (!netif_queue_stopped(dev))
423 			netif_stop_queue(dev);
424 	}
425 
426 	priv->curtx_bd = bd;
427 
428 	spin_unlock_irqrestore(&priv->lock, flags);
429 
430 	return NETDEV_TX_OK;
431 }
432 
433 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
434 {
435 	/* Start from the next BD that should be filled */
436 	struct net_device *dev = priv->ndev;
437 	struct qe_bd *bd;		/* BD pointer */
438 	u16 bd_status;
439 
440 	bd = priv->dirty_tx;
441 	bd_status = ioread16be(&bd->status);
442 
443 	/* Normal processing. */
444 	while ((bd_status & T_R_S) == 0) {
445 		struct sk_buff *skb;
446 
447 		/* BD contains already transmitted buffer.   */
448 		/* Handle the transmitted buffer and release */
449 		/* the BD to be used with the current frame  */
450 
451 		skb = priv->tx_skbuff[priv->skb_dirtytx];
452 		if (!skb)
453 			break;
454 		dev->stats.tx_packets++;
455 		memset(priv->tx_buffer +
456 		       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
457 		       0, skb->len);
458 		dev_kfree_skb_irq(skb);
459 
460 		priv->tx_skbuff[priv->skb_dirtytx] = NULL;
461 		priv->skb_dirtytx =
462 		    (priv->skb_dirtytx +
463 		     1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
464 
465 		/* We freed a buffer, so now we can restart transmission */
466 		if (netif_queue_stopped(dev))
467 			netif_wake_queue(dev);
468 
469 		/* Advance the confirmation BD pointer */
470 		if (!(bd_status & T_W_S))
471 			bd += 1;
472 		else
473 			bd = priv->tx_bd_base;
474 		bd_status = ioread16be(&bd->status);
475 	}
476 	priv->dirty_tx = bd;
477 
478 	return 0;
479 }
480 
481 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
482 {
483 	struct net_device *dev = priv->ndev;
484 	struct sk_buff *skb = NULL;
485 	hdlc_device *hdlc = dev_to_hdlc(dev);
486 	struct qe_bd *bd;
487 	u16 bd_status;
488 	u16 length, howmany = 0;
489 	u8 *bdbuffer;
490 
491 	bd = priv->currx_bd;
492 	bd_status = ioread16be(&bd->status);
493 
494 	/* while there are received buffers and BD is full (~R_E) */
495 	while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
496 		if (bd_status & R_OV_S)
497 			dev->stats.rx_over_errors++;
498 		if (bd_status & R_CR_S) {
499 			dev->stats.rx_crc_errors++;
500 			dev->stats.rx_dropped++;
501 			goto recycle;
502 		}
503 		bdbuffer = priv->rx_buffer +
504 			(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
505 		length = ioread16be(&bd->length);
506 
507 		switch (dev->type) {
508 		case ARPHRD_RAWHDLC:
509 			bdbuffer += HDLC_HEAD_LEN;
510 			length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
511 
512 			skb = dev_alloc_skb(length);
513 			if (!skb) {
514 				dev->stats.rx_dropped++;
515 				return -ENOMEM;
516 			}
517 
518 			skb_put(skb, length);
519 			skb->len = length;
520 			skb->dev = dev;
521 			memcpy(skb->data, bdbuffer, length);
522 			break;
523 
524 		case ARPHRD_PPP:
525 		case ARPHRD_ETHER:
526 			length -= HDLC_CRC_SIZE;
527 
528 			skb = dev_alloc_skb(length);
529 			if (!skb) {
530 				dev->stats.rx_dropped++;
531 				return -ENOMEM;
532 			}
533 
534 			skb_put(skb, length);
535 			skb->len = length;
536 			skb->dev = dev;
537 			memcpy(skb->data, bdbuffer, length);
538 			break;
539 		}
540 
541 		dev->stats.rx_packets++;
542 		dev->stats.rx_bytes += skb->len;
543 		howmany++;
544 		if (hdlc->proto)
545 			skb->protocol = hdlc_type_trans(skb, dev);
546 		netif_receive_skb(skb);
547 
548 recycle:
549 		iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
550 
551 		/* update to point at the next bd */
552 		if (bd_status & R_W_S) {
553 			priv->currx_bdnum = 0;
554 			bd = priv->rx_bd_base;
555 		} else {
556 			if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
557 				priv->currx_bdnum += 1;
558 			else
559 				priv->currx_bdnum = RX_BD_RING_LEN - 1;
560 
561 			bd += 1;
562 		}
563 
564 		bd_status = ioread16be(&bd->status);
565 	}
566 
567 	priv->currx_bd = bd;
568 	return howmany;
569 }
570 
571 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
572 {
573 	struct ucc_hdlc_private *priv = container_of(napi,
574 						     struct ucc_hdlc_private,
575 						     napi);
576 	int howmany;
577 
578 	/* Tx event processing */
579 	spin_lock(&priv->lock);
580 	hdlc_tx_done(priv);
581 	spin_unlock(&priv->lock);
582 
583 	howmany = 0;
584 	howmany += hdlc_rx_done(priv, budget - howmany);
585 
586 	if (howmany < budget) {
587 		napi_complete_done(napi, howmany);
588 		qe_setbits32(priv->uccf->p_uccm,
589 			     (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
590 	}
591 
592 	return howmany;
593 }
594 
595 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
596 {
597 	struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
598 	struct net_device *dev = priv->ndev;
599 	struct ucc_fast_private *uccf;
600 	struct ucc_tdm_info *ut_info;
601 	u32 ucce;
602 	u32 uccm;
603 
604 	ut_info = priv->ut_info;
605 	uccf = priv->uccf;
606 
607 	ucce = ioread32be(uccf->p_ucce);
608 	uccm = ioread32be(uccf->p_uccm);
609 	ucce &= uccm;
610 	iowrite32be(ucce, uccf->p_ucce);
611 	if (!ucce)
612 		return IRQ_NONE;
613 
614 	if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
615 		if (napi_schedule_prep(&priv->napi)) {
616 			uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
617 				  << 16);
618 			iowrite32be(uccm, uccf->p_uccm);
619 			__napi_schedule(&priv->napi);
620 		}
621 	}
622 
623 	/* Errors and other events */
624 	if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
625 		dev->stats.rx_errors++;
626 	if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
627 		dev->stats.tx_errors++;
628 
629 	return IRQ_HANDLED;
630 }
631 
632 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
633 {
634 	const size_t size = sizeof(te1_settings);
635 	te1_settings line;
636 	struct ucc_hdlc_private *priv = netdev_priv(dev);
637 
638 	if (cmd != SIOCWANDEV)
639 		return hdlc_ioctl(dev, ifr, cmd);
640 
641 	switch (ifr->ifr_settings.type) {
642 	case IF_GET_IFACE:
643 		ifr->ifr_settings.type = IF_IFACE_E1;
644 		if (ifr->ifr_settings.size < size) {
645 			ifr->ifr_settings.size = size; /* data size wanted */
646 			return -ENOBUFS;
647 		}
648 		memset(&line, 0, sizeof(line));
649 		line.clock_type = priv->clocking;
650 
651 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
652 			return -EFAULT;
653 		return 0;
654 
655 	default:
656 		return hdlc_ioctl(dev, ifr, cmd);
657 	}
658 }
659 
660 static int uhdlc_open(struct net_device *dev)
661 {
662 	u32 cecr_subblock;
663 	hdlc_device *hdlc = dev_to_hdlc(dev);
664 	struct ucc_hdlc_private *priv = hdlc->priv;
665 	struct ucc_tdm *utdm = priv->utdm;
666 
667 	if (priv->hdlc_busy != 1) {
668 		if (request_irq(priv->ut_info->uf_info.irq,
669 				ucc_hdlc_irq_handler, 0, "hdlc", priv))
670 			return -ENODEV;
671 
672 		cecr_subblock = ucc_fast_get_qe_cr_subblock(
673 					priv->ut_info->uf_info.ucc_num);
674 
675 		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
676 			     QE_CR_PROTOCOL_UNSPECIFIED, 0);
677 
678 		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
679 
680 		/* Enable the TDM port */
681 		if (priv->tsa)
682 			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
683 
684 		priv->hdlc_busy = 1;
685 		netif_device_attach(priv->ndev);
686 		napi_enable(&priv->napi);
687 		netif_start_queue(dev);
688 		hdlc_open(dev);
689 	}
690 
691 	return 0;
692 }
693 
694 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
695 {
696 	qe_muram_free(priv->ucc_pram->riptr);
697 	qe_muram_free(priv->ucc_pram->tiptr);
698 
699 	if (priv->rx_bd_base) {
700 		dma_free_coherent(priv->dev,
701 				  RX_BD_RING_LEN * sizeof(struct qe_bd),
702 				  priv->rx_bd_base, priv->dma_rx_bd);
703 
704 		priv->rx_bd_base = NULL;
705 		priv->dma_rx_bd = 0;
706 	}
707 
708 	if (priv->tx_bd_base) {
709 		dma_free_coherent(priv->dev,
710 				  TX_BD_RING_LEN * sizeof(struct qe_bd),
711 				  priv->tx_bd_base, priv->dma_tx_bd);
712 
713 		priv->tx_bd_base = NULL;
714 		priv->dma_tx_bd = 0;
715 	}
716 
717 	if (priv->ucc_pram) {
718 		qe_muram_free(priv->ucc_pram_offset);
719 		priv->ucc_pram = NULL;
720 		priv->ucc_pram_offset = 0;
721 	 }
722 
723 	kfree(priv->rx_skbuff);
724 	priv->rx_skbuff = NULL;
725 
726 	kfree(priv->tx_skbuff);
727 	priv->tx_skbuff = NULL;
728 
729 	if (priv->uf_regs) {
730 		iounmap(priv->uf_regs);
731 		priv->uf_regs = NULL;
732 	}
733 
734 	if (priv->uccf) {
735 		ucc_fast_free(priv->uccf);
736 		priv->uccf = NULL;
737 	}
738 
739 	if (priv->rx_buffer) {
740 		dma_free_coherent(priv->dev,
741 				  RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
742 				  priv->rx_buffer, priv->dma_rx_addr);
743 		priv->rx_buffer = NULL;
744 		priv->dma_rx_addr = 0;
745 	}
746 
747 	if (priv->tx_buffer) {
748 		dma_free_coherent(priv->dev,
749 				  TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
750 				  priv->tx_buffer, priv->dma_tx_addr);
751 		priv->tx_buffer = NULL;
752 		priv->dma_tx_addr = 0;
753 	}
754 }
755 
756 static int uhdlc_close(struct net_device *dev)
757 {
758 	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
759 	struct ucc_tdm *utdm = priv->utdm;
760 	u32 cecr_subblock;
761 
762 	napi_disable(&priv->napi);
763 	cecr_subblock = ucc_fast_get_qe_cr_subblock(
764 				priv->ut_info->uf_info.ucc_num);
765 
766 	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
767 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
768 	qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
769 		     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
770 
771 	if (priv->tsa)
772 		utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
773 
774 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
775 
776 	free_irq(priv->ut_info->uf_info.irq, priv);
777 	netif_stop_queue(dev);
778 	priv->hdlc_busy = 0;
779 
780 	return 0;
781 }
782 
783 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
784 			   unsigned short parity)
785 {
786 	struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
787 
788 	if (encoding != ENCODING_NRZ &&
789 	    encoding != ENCODING_NRZI)
790 		return -EINVAL;
791 
792 	if (parity != PARITY_NONE &&
793 	    parity != PARITY_CRC32_PR1_CCITT &&
794 	    parity != PARITY_CRC16_PR0_CCITT &&
795 	    parity != PARITY_CRC16_PR1_CCITT)
796 		return -EINVAL;
797 
798 	priv->encoding = encoding;
799 	priv->parity = parity;
800 
801 	return 0;
802 }
803 
804 #ifdef CONFIG_PM
805 static void store_clk_config(struct ucc_hdlc_private *priv)
806 {
807 	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
808 
809 	/* store si clk */
810 	priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
811 	priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
812 
813 	/* store si sync */
814 	priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
815 
816 	/* store ucc clk */
817 	memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
818 }
819 
820 static void resume_clk_config(struct ucc_hdlc_private *priv)
821 {
822 	struct qe_mux *qe_mux_reg = &qe_immr->qmx;
823 
824 	memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
825 
826 	iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
827 	iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
828 
829 	iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
830 }
831 
832 static int uhdlc_suspend(struct device *dev)
833 {
834 	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
835 	struct ucc_tdm_info *ut_info;
836 	struct ucc_fast __iomem *uf_regs;
837 
838 	if (!priv)
839 		return -EINVAL;
840 
841 	if (!netif_running(priv->ndev))
842 		return 0;
843 
844 	netif_device_detach(priv->ndev);
845 	napi_disable(&priv->napi);
846 
847 	ut_info = priv->ut_info;
848 	uf_regs = priv->uf_regs;
849 
850 	/* backup gumr guemr*/
851 	priv->gumr = ioread32be(&uf_regs->gumr);
852 	priv->guemr = ioread8(&uf_regs->guemr);
853 
854 	priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
855 					GFP_KERNEL);
856 	if (!priv->ucc_pram_bak)
857 		return -ENOMEM;
858 
859 	/* backup HDLC parameter */
860 	memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
861 		      sizeof(struct ucc_hdlc_param));
862 
863 	/* store the clk configuration */
864 	store_clk_config(priv);
865 
866 	/* save power */
867 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
868 
869 	return 0;
870 }
871 
872 static int uhdlc_resume(struct device *dev)
873 {
874 	struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
875 	struct ucc_tdm *utdm;
876 	struct ucc_tdm_info *ut_info;
877 	struct ucc_fast __iomem *uf_regs;
878 	struct ucc_fast_private *uccf;
879 	struct ucc_fast_info *uf_info;
880 	int ret, i;
881 	u32 cecr_subblock;
882 	u16 bd_status;
883 
884 	if (!priv)
885 		return -EINVAL;
886 
887 	if (!netif_running(priv->ndev))
888 		return 0;
889 
890 	utdm = priv->utdm;
891 	ut_info = priv->ut_info;
892 	uf_info = &ut_info->uf_info;
893 	uf_regs = priv->uf_regs;
894 	uccf = priv->uccf;
895 
896 	/* restore gumr guemr */
897 	iowrite8(priv->guemr, &uf_regs->guemr);
898 	iowrite32be(priv->gumr, &uf_regs->gumr);
899 
900 	/* Set Virtual Fifo registers */
901 	iowrite16be(uf_info->urfs, &uf_regs->urfs);
902 	iowrite16be(uf_info->urfet, &uf_regs->urfet);
903 	iowrite16be(uf_info->urfset, &uf_regs->urfset);
904 	iowrite16be(uf_info->utfs, &uf_regs->utfs);
905 	iowrite16be(uf_info->utfet, &uf_regs->utfet);
906 	iowrite16be(uf_info->utftt, &uf_regs->utftt);
907 	/* utfb, urfb are offsets from MURAM base */
908 	iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
909 	iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
910 
911 	/* Rx Tx and sync clock routing */
912 	resume_clk_config(priv);
913 
914 	iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
915 	iowrite32be(0xffffffff, &uf_regs->ucce);
916 
917 	ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
918 
919 	/* rebuild SIRAM */
920 	if (priv->tsa)
921 		ucc_tdm_init(priv->utdm, priv->ut_info);
922 
923 	/* Write to QE CECR, UCCx channel to Stop Transmission */
924 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
925 	ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
926 			   (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
927 
928 	/* Set UPSMR normal mode */
929 	iowrite32be(0, &uf_regs->upsmr);
930 
931 	/* init parameter base */
932 	cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
933 	ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
934 			   QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
935 
936 	priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
937 				qe_muram_addr(priv->ucc_pram_offset);
938 
939 	/* restore ucc parameter */
940 	memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
941 		    sizeof(struct ucc_hdlc_param));
942 	kfree(priv->ucc_pram_bak);
943 
944 	/* rebuild BD entry */
945 	for (i = 0; i < RX_BD_RING_LEN; i++) {
946 		if (i < (RX_BD_RING_LEN - 1))
947 			bd_status = R_E_S | R_I_S;
948 		else
949 			bd_status = R_E_S | R_I_S | R_W_S;
950 
951 		iowrite16be(bd_status, &priv->rx_bd_base[i].status);
952 		iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
953 			    &priv->rx_bd_base[i].buf);
954 	}
955 
956 	for (i = 0; i < TX_BD_RING_LEN; i++) {
957 		if (i < (TX_BD_RING_LEN - 1))
958 			bd_status =  T_I_S | T_TC_S;
959 		else
960 			bd_status =  T_I_S | T_TC_S | T_W_S;
961 
962 		iowrite16be(bd_status, &priv->tx_bd_base[i].status);
963 		iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
964 			    &priv->tx_bd_base[i].buf);
965 	}
966 
967 	/* if hdlc is busy enable TX and RX */
968 	if (priv->hdlc_busy == 1) {
969 		cecr_subblock = ucc_fast_get_qe_cr_subblock(
970 					priv->ut_info->uf_info.ucc_num);
971 
972 		qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
973 			     (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
974 
975 		ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
976 
977 		/* Enable the TDM port */
978 		if (priv->tsa)
979 			utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
980 	}
981 
982 	napi_enable(&priv->napi);
983 	netif_device_attach(priv->ndev);
984 
985 	return 0;
986 }
987 
988 static const struct dev_pm_ops uhdlc_pm_ops = {
989 	.suspend = uhdlc_suspend,
990 	.resume = uhdlc_resume,
991 	.freeze = uhdlc_suspend,
992 	.thaw = uhdlc_resume,
993 };
994 
995 #define HDLC_PM_OPS (&uhdlc_pm_ops)
996 
997 #else
998 
999 #define HDLC_PM_OPS NULL
1000 
1001 #endif
1002 static void uhdlc_tx_timeout(struct net_device *ndev)
1003 {
1004 	netdev_err(ndev, "%s\n", __func__);
1005 }
1006 
1007 static const struct net_device_ops uhdlc_ops = {
1008 	.ndo_open       = uhdlc_open,
1009 	.ndo_stop       = uhdlc_close,
1010 	.ndo_start_xmit = hdlc_start_xmit,
1011 	.ndo_do_ioctl   = uhdlc_ioctl,
1012 	.ndo_tx_timeout	= uhdlc_tx_timeout,
1013 };
1014 
1015 static int ucc_hdlc_probe(struct platform_device *pdev)
1016 {
1017 	struct device_node *np = pdev->dev.of_node;
1018 	struct ucc_hdlc_private *uhdlc_priv = NULL;
1019 	struct ucc_tdm_info *ut_info;
1020 	struct ucc_tdm *utdm = NULL;
1021 	struct resource res;
1022 	struct net_device *dev;
1023 	hdlc_device *hdlc;
1024 	int ucc_num;
1025 	const char *sprop;
1026 	int ret;
1027 	u32 val;
1028 
1029 	ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1030 	if (ret) {
1031 		dev_err(&pdev->dev, "Invalid ucc property\n");
1032 		return -ENODEV;
1033 	}
1034 
1035 	ucc_num = val - 1;
1036 	if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1037 		dev_err(&pdev->dev, ": Invalid UCC num\n");
1038 		return -EINVAL;
1039 	}
1040 
1041 	memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1042 	       sizeof(utdm_primary_info));
1043 
1044 	ut_info = &utdm_info[ucc_num];
1045 	ut_info->uf_info.ucc_num = ucc_num;
1046 
1047 	sprop = of_get_property(np, "rx-clock-name", NULL);
1048 	if (sprop) {
1049 		ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1050 		if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1051 		    (ut_info->uf_info.rx_clock > QE_CLK24)) {
1052 			dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1053 			return -EINVAL;
1054 		}
1055 	} else {
1056 		dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1057 		return -EINVAL;
1058 	}
1059 
1060 	sprop = of_get_property(np, "tx-clock-name", NULL);
1061 	if (sprop) {
1062 		ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1063 		if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1064 		    (ut_info->uf_info.tx_clock > QE_CLK24)) {
1065 			dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1066 			return -EINVAL;
1067 		}
1068 	} else {
1069 		dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1070 		return -EINVAL;
1071 	}
1072 
1073 	ret = of_address_to_resource(np, 0, &res);
1074 	if (ret)
1075 		return -EINVAL;
1076 
1077 	ut_info->uf_info.regs = res.start;
1078 	ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1079 
1080 	uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1081 	if (!uhdlc_priv) {
1082 		return -ENOMEM;
1083 	}
1084 
1085 	dev_set_drvdata(&pdev->dev, uhdlc_priv);
1086 	uhdlc_priv->dev = &pdev->dev;
1087 	uhdlc_priv->ut_info = ut_info;
1088 
1089 	if (of_get_property(np, "fsl,tdm-interface", NULL))
1090 		uhdlc_priv->tsa = 1;
1091 
1092 	if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1093 		uhdlc_priv->loopback = 1;
1094 
1095 	if (of_get_property(np, "fsl,hdlc-bus", NULL))
1096 		uhdlc_priv->hdlc_bus = 1;
1097 
1098 	if (uhdlc_priv->tsa == 1) {
1099 		utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1100 		if (!utdm) {
1101 			ret = -ENOMEM;
1102 			dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1103 			goto free_uhdlc_priv;
1104 		}
1105 		uhdlc_priv->utdm = utdm;
1106 		ret = ucc_of_parse_tdm(np, utdm, ut_info);
1107 		if (ret)
1108 			goto free_utdm;
1109 	}
1110 
1111 	if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1112 		uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1113 
1114 	ret = uhdlc_init(uhdlc_priv);
1115 	if (ret) {
1116 		dev_err(&pdev->dev, "Failed to init uhdlc\n");
1117 		goto free_utdm;
1118 	}
1119 
1120 	dev = alloc_hdlcdev(uhdlc_priv);
1121 	if (!dev) {
1122 		ret = -ENOMEM;
1123 		pr_err("ucc_hdlc: unable to allocate memory\n");
1124 		goto undo_uhdlc_init;
1125 	}
1126 
1127 	uhdlc_priv->ndev = dev;
1128 	hdlc = dev_to_hdlc(dev);
1129 	dev->tx_queue_len = 16;
1130 	dev->netdev_ops = &uhdlc_ops;
1131 	dev->watchdog_timeo = 2 * HZ;
1132 	hdlc->attach = ucc_hdlc_attach;
1133 	hdlc->xmit = ucc_hdlc_tx;
1134 	netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1135 	if (register_hdlc_device(dev)) {
1136 		ret = -ENOBUFS;
1137 		pr_err("ucc_hdlc: unable to register hdlc device\n");
1138 		free_netdev(dev);
1139 		goto free_dev;
1140 	}
1141 
1142 	return 0;
1143 
1144 free_dev:
1145 	free_netdev(dev);
1146 undo_uhdlc_init:
1147 free_utdm:
1148 	if (uhdlc_priv->tsa)
1149 		kfree(utdm);
1150 free_uhdlc_priv:
1151 	kfree(uhdlc_priv);
1152 	return ret;
1153 }
1154 
1155 static int ucc_hdlc_remove(struct platform_device *pdev)
1156 {
1157 	struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1158 
1159 	uhdlc_memclean(priv);
1160 
1161 	if (priv->utdm->si_regs) {
1162 		iounmap(priv->utdm->si_regs);
1163 		priv->utdm->si_regs = NULL;
1164 	}
1165 
1166 	if (priv->utdm->siram) {
1167 		iounmap(priv->utdm->siram);
1168 		priv->utdm->siram = NULL;
1169 	}
1170 	kfree(priv);
1171 
1172 	dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1173 
1174 	return 0;
1175 }
1176 
1177 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1178 	{
1179 	.compatible = "fsl,ucc-hdlc",
1180 	},
1181 	{},
1182 };
1183 
1184 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1185 
1186 static struct platform_driver ucc_hdlc_driver = {
1187 	.probe	= ucc_hdlc_probe,
1188 	.remove	= ucc_hdlc_remove,
1189 	.driver	= {
1190 		.name		= DRV_NAME,
1191 		.pm		= HDLC_PM_OPS,
1192 		.of_match_table	= fsl_ucc_hdlc_of_match,
1193 	},
1194 };
1195 
1196 module_platform_driver(ucc_hdlc_driver);
1197 MODULE_LICENSE("GPL");
1198