1 /* Freescale QUICC Engine HDLC Device Driver 2 * 3 * Copyright 2016 Freescale Semiconductor Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/hdlc.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/netdevice.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/sched.h> 26 #include <linux/skbuff.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/stddef.h> 30 #include <soc/fsl/qe/qe_tdm.h> 31 #include <uapi/linux/if_arp.h> 32 33 #include "fsl_ucc_hdlc.h" 34 35 #define DRV_DESC "Freescale QE UCC HDLC Driver" 36 #define DRV_NAME "ucc_hdlc" 37 38 #define TDM_PPPOHT_SLIC_MAXIN 39 #define BROKEN_FRAME_INFO 40 41 static struct ucc_tdm_info utdm_primary_info = { 42 .uf_info = { 43 .tsa = 0, 44 .cdp = 0, 45 .cds = 1, 46 .ctsp = 1, 47 .ctss = 1, 48 .revd = 0, 49 .urfs = 256, 50 .utfs = 256, 51 .urfet = 128, 52 .urfset = 192, 53 .utfet = 128, 54 .utftt = 0x40, 55 .ufpt = 256, 56 .mode = UCC_FAST_PROTOCOL_MODE_HDLC, 57 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 58 .tenc = UCC_FAST_TX_ENCODING_NRZ, 59 .renc = UCC_FAST_RX_ENCODING_NRZ, 60 .tcrc = UCC_FAST_16_BIT_CRC, 61 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 62 }, 63 64 .si_info = { 65 #ifdef TDM_PPPOHT_SLIC_MAXIN 66 .simr_rfsd = 1, 67 .simr_tfsd = 2, 68 #else 69 .simr_rfsd = 0, 70 .simr_tfsd = 0, 71 #endif 72 .simr_crt = 0, 73 .simr_sl = 0, 74 .simr_ce = 1, 75 .simr_fe = 1, 76 .simr_gm = 0, 77 }, 78 }; 79 80 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; 81 82 static int uhdlc_init(struct ucc_hdlc_private *priv) 83 { 84 struct ucc_tdm_info *ut_info; 85 struct ucc_fast_info *uf_info; 86 u32 cecr_subblock; 87 u16 bd_status; 88 int ret, i; 89 void *bd_buffer; 90 dma_addr_t bd_dma_addr; 91 u32 riptr; 92 u32 tiptr; 93 u32 gumr; 94 95 ut_info = priv->ut_info; 96 uf_info = &ut_info->uf_info; 97 98 if (priv->tsa) { 99 uf_info->tsa = 1; 100 uf_info->ctsp = 1; 101 } 102 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | 103 UCC_HDLC_UCCE_TXB) << 16); 104 105 ret = ucc_fast_init(uf_info, &priv->uccf); 106 if (ret) { 107 dev_err(priv->dev, "Failed to init uccf."); 108 return ret; 109 } 110 111 priv->uf_regs = priv->uccf->uf_regs; 112 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 113 114 /* Loopback mode */ 115 if (priv->loopback) { 116 dev_info(priv->dev, "Loopback Mode\n"); 117 gumr = ioread32be(&priv->uf_regs->gumr); 118 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS | 119 UCC_FAST_GUMR_TCI); 120 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); 121 iowrite32be(gumr, &priv->uf_regs->gumr); 122 } 123 124 /* Initialize SI */ 125 if (priv->tsa) 126 ucc_tdm_init(priv->utdm, priv->ut_info); 127 128 /* Write to QE CECR, UCCx channel to Stop Transmission */ 129 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 130 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 131 QE_CR_PROTOCOL_UNSPECIFIED, 0); 132 133 /* Set UPSMR normal mode (need fixed)*/ 134 iowrite32be(0, &priv->uf_regs->upsmr); 135 136 priv->rx_ring_size = RX_BD_RING_LEN; 137 priv->tx_ring_size = TX_BD_RING_LEN; 138 /* Alloc Rx BD */ 139 priv->rx_bd_base = dma_alloc_coherent(priv->dev, 140 RX_BD_RING_LEN * sizeof(struct qe_bd *), 141 &priv->dma_rx_bd, GFP_KERNEL); 142 143 if (!priv->rx_bd_base) { 144 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); 145 ret = -ENOMEM; 146 goto free_uccf; 147 } 148 149 /* Alloc Tx BD */ 150 priv->tx_bd_base = dma_alloc_coherent(priv->dev, 151 TX_BD_RING_LEN * sizeof(struct qe_bd *), 152 &priv->dma_tx_bd, GFP_KERNEL); 153 154 if (!priv->tx_bd_base) { 155 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); 156 ret = -ENOMEM; 157 goto free_rx_bd; 158 } 159 160 /* Alloc parameter ram for ucc hdlc */ 161 priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram), 162 ALIGNMENT_OF_UCC_HDLC_PRAM); 163 164 if (priv->ucc_pram_offset < 0) { 165 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); 166 ret = -ENOMEM; 167 goto free_tx_bd; 168 } 169 170 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff), 171 GFP_KERNEL); 172 if (!priv->rx_skbuff) 173 goto free_ucc_pram; 174 175 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff), 176 GFP_KERNEL); 177 if (!priv->tx_skbuff) 178 goto free_rx_skbuff; 179 180 priv->skb_curtx = 0; 181 priv->skb_dirtytx = 0; 182 priv->curtx_bd = priv->tx_bd_base; 183 priv->dirty_tx = priv->tx_bd_base; 184 priv->currx_bd = priv->rx_bd_base; 185 priv->currx_bdnum = 0; 186 187 /* init parameter base */ 188 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 189 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 190 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 191 192 priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 193 qe_muram_addr(priv->ucc_pram_offset); 194 195 /* Zero out parameter ram */ 196 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); 197 198 /* Alloc riptr, tiptr */ 199 riptr = qe_muram_alloc(32, 32); 200 if (riptr < 0) { 201 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); 202 ret = -ENOMEM; 203 goto free_tx_skbuff; 204 } 205 206 tiptr = qe_muram_alloc(32, 32); 207 if (tiptr < 0) { 208 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); 209 ret = -ENOMEM; 210 goto free_riptr; 211 } 212 213 /* Set RIPTR, TIPTR */ 214 iowrite16be(riptr, &priv->ucc_pram->riptr); 215 iowrite16be(tiptr, &priv->ucc_pram->tiptr); 216 217 /* Set MRBLR */ 218 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr); 219 220 /* Set RBASE, TBASE */ 221 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase); 222 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase); 223 224 /* Set RSTATE, TSTATE */ 225 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate); 226 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate); 227 228 /* Set C_MASK, C_PRES for 16bit CRC */ 229 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask); 230 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres); 231 232 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); 233 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); 234 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); 235 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); 236 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); 237 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); 238 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); 239 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 240 241 /* Get BD buffer */ 242 bd_buffer = dma_alloc_coherent(priv->dev, 243 (RX_BD_RING_LEN + TX_BD_RING_LEN) * 244 MAX_RX_BUF_LENGTH, 245 &bd_dma_addr, GFP_KERNEL); 246 247 if (!bd_buffer) { 248 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 249 ret = -ENOMEM; 250 goto free_tiptr; 251 } 252 253 memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN) 254 * MAX_RX_BUF_LENGTH); 255 256 priv->rx_buffer = bd_buffer; 257 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 258 259 priv->dma_rx_addr = bd_dma_addr; 260 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 261 262 for (i = 0; i < RX_BD_RING_LEN; i++) { 263 if (i < (RX_BD_RING_LEN - 1)) 264 bd_status = R_E_S | R_I_S; 265 else 266 bd_status = R_E_S | R_I_S | R_W_S; 267 268 iowrite16be(bd_status, &priv->rx_bd_base[i].status); 269 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 270 &priv->rx_bd_base[i].buf); 271 } 272 273 for (i = 0; i < TX_BD_RING_LEN; i++) { 274 if (i < (TX_BD_RING_LEN - 1)) 275 bd_status = T_I_S | T_TC_S; 276 else 277 bd_status = T_I_S | T_TC_S | T_W_S; 278 279 iowrite16be(bd_status, &priv->tx_bd_base[i].status); 280 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 281 &priv->tx_bd_base[i].buf); 282 } 283 284 return 0; 285 286 free_tiptr: 287 qe_muram_free(tiptr); 288 free_riptr: 289 qe_muram_free(riptr); 290 free_tx_skbuff: 291 kfree(priv->tx_skbuff); 292 free_rx_skbuff: 293 kfree(priv->rx_skbuff); 294 free_ucc_pram: 295 qe_muram_free(priv->ucc_pram_offset); 296 free_tx_bd: 297 dma_free_coherent(priv->dev, 298 TX_BD_RING_LEN * sizeof(struct qe_bd *), 299 priv->tx_bd_base, priv->dma_tx_bd); 300 free_rx_bd: 301 dma_free_coherent(priv->dev, 302 RX_BD_RING_LEN * sizeof(struct qe_bd *), 303 priv->rx_bd_base, priv->dma_rx_bd); 304 free_uccf: 305 ucc_fast_free(priv->uccf); 306 307 return ret; 308 } 309 310 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) 311 { 312 hdlc_device *hdlc = dev_to_hdlc(dev); 313 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; 314 struct qe_bd __iomem *bd; 315 u16 bd_status; 316 unsigned long flags; 317 u8 *send_buf; 318 int i; 319 u16 *proto_head; 320 321 switch (dev->type) { 322 case ARPHRD_RAWHDLC: 323 if (skb_headroom(skb) < HDLC_HEAD_LEN) { 324 dev->stats.tx_dropped++; 325 dev_kfree_skb(skb); 326 netdev_err(dev, "No enough space for hdlc head\n"); 327 return -ENOMEM; 328 } 329 330 skb_push(skb, HDLC_HEAD_LEN); 331 332 proto_head = (u16 *)skb->data; 333 *proto_head = htons(DEFAULT_HDLC_HEAD); 334 335 dev->stats.tx_bytes += skb->len; 336 break; 337 338 case ARPHRD_PPP: 339 proto_head = (u16 *)skb->data; 340 if (*proto_head != htons(DEFAULT_PPP_HEAD)) { 341 dev->stats.tx_dropped++; 342 dev_kfree_skb(skb); 343 netdev_err(dev, "Wrong ppp header\n"); 344 return -ENOMEM; 345 } 346 347 dev->stats.tx_bytes += skb->len; 348 break; 349 350 default: 351 dev->stats.tx_dropped++; 352 dev_kfree_skb(skb); 353 return -ENOMEM; 354 } 355 356 pr_info("Tx data skb->len:%d ", skb->len); 357 send_buf = (u8 *)skb->data; 358 pr_info("\nTransmitted data:\n"); 359 for (i = 0; i < 16; i++) { 360 if (i == skb->len) 361 pr_info("++++"); 362 else 363 pr_info("%02x\n", send_buf[i]); 364 } 365 spin_lock_irqsave(&priv->lock, flags); 366 367 /* Start from the next BD that should be filled */ 368 bd = priv->curtx_bd; 369 bd_status = ioread16be(&bd->status); 370 /* Save the skb pointer so we can free it later */ 371 priv->tx_skbuff[priv->skb_curtx] = skb; 372 373 /* Update the current skb pointer (wrapping if this was the last) */ 374 priv->skb_curtx = 375 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 376 377 /* copy skb data to tx buffer for sdma processing */ 378 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 379 skb->data, skb->len); 380 381 /* set bd status and length */ 382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; 383 384 iowrite16be(bd_status, &bd->status); 385 iowrite16be(skb->len, &bd->length); 386 387 /* Move to next BD in the ring */ 388 if (!(bd_status & T_W_S)) 389 bd += 1; 390 else 391 bd = priv->tx_bd_base; 392 393 if (bd == priv->dirty_tx) { 394 if (!netif_queue_stopped(dev)) 395 netif_stop_queue(dev); 396 } 397 398 priv->curtx_bd = bd; 399 400 spin_unlock_irqrestore(&priv->lock, flags); 401 402 return NETDEV_TX_OK; 403 } 404 405 static int hdlc_tx_done(struct ucc_hdlc_private *priv) 406 { 407 /* Start from the next BD that should be filled */ 408 struct net_device *dev = priv->ndev; 409 struct qe_bd *bd; /* BD pointer */ 410 u16 bd_status; 411 412 bd = priv->dirty_tx; 413 bd_status = ioread16be(&bd->status); 414 415 /* Normal processing. */ 416 while ((bd_status & T_R_S) == 0) { 417 struct sk_buff *skb; 418 419 /* BD contains already transmitted buffer. */ 420 /* Handle the transmitted buffer and release */ 421 /* the BD to be used with the current frame */ 422 423 skb = priv->tx_skbuff[priv->skb_dirtytx]; 424 if (!skb) 425 break; 426 pr_info("TxBD: %x\n", bd_status); 427 dev->stats.tx_packets++; 428 memset(priv->tx_buffer + 429 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 430 0, skb->len); 431 dev_kfree_skb_irq(skb); 432 433 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 434 priv->skb_dirtytx = 435 (priv->skb_dirtytx + 436 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 437 438 /* We freed a buffer, so now we can restart transmission */ 439 if (netif_queue_stopped(dev)) 440 netif_wake_queue(dev); 441 442 /* Advance the confirmation BD pointer */ 443 if (!(bd_status & T_W_S)) 444 bd += 1; 445 else 446 bd = priv->tx_bd_base; 447 bd_status = ioread16be(&bd->status); 448 } 449 priv->dirty_tx = bd; 450 451 return 0; 452 } 453 454 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) 455 { 456 struct net_device *dev = priv->ndev; 457 struct sk_buff *skb; 458 hdlc_device *hdlc = dev_to_hdlc(dev); 459 struct qe_bd *bd; 460 u32 bd_status; 461 u16 length, howmany = 0; 462 u8 *bdbuffer; 463 int i; 464 static int entry; 465 466 bd = priv->currx_bd; 467 bd_status = ioread16be(&bd->status); 468 469 /* while there are received buffers and BD is full (~R_E) */ 470 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { 471 if (bd_status & R_OV_S) 472 dev->stats.rx_over_errors++; 473 if (bd_status & R_CR_S) { 474 #ifdef BROKEN_FRAME_INFO 475 pr_info("Broken Frame with RxBD: %x\n", bd_status); 476 #endif 477 dev->stats.rx_crc_errors++; 478 dev->stats.rx_dropped++; 479 goto recycle; 480 } 481 bdbuffer = priv->rx_buffer + 482 (priv->currx_bdnum * MAX_RX_BUF_LENGTH); 483 length = ioread16be(&bd->length); 484 485 pr_info("Received data length:%d", length); 486 pr_info("while entry times:%d", entry++); 487 488 pr_info("\nReceived data:\n"); 489 for (i = 0; (i < 16); i++) { 490 if (i == length) 491 pr_info("++++"); 492 else 493 pr_info("%02x\n", bdbuffer[i]); 494 } 495 496 switch (dev->type) { 497 case ARPHRD_RAWHDLC: 498 bdbuffer += HDLC_HEAD_LEN; 499 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); 500 501 skb = dev_alloc_skb(length); 502 if (!skb) { 503 dev->stats.rx_dropped++; 504 return -ENOMEM; 505 } 506 507 skb_put(skb, length); 508 skb->len = length; 509 skb->dev = dev; 510 memcpy(skb->data, bdbuffer, length); 511 break; 512 513 case ARPHRD_PPP: 514 length -= HDLC_CRC_SIZE; 515 516 skb = dev_alloc_skb(length); 517 if (!skb) { 518 dev->stats.rx_dropped++; 519 return -ENOMEM; 520 } 521 522 skb_put(skb, length); 523 skb->len = length; 524 skb->dev = dev; 525 memcpy(skb->data, bdbuffer, length); 526 break; 527 } 528 529 dev->stats.rx_packets++; 530 dev->stats.rx_bytes += skb->len; 531 howmany++; 532 if (hdlc->proto) 533 skb->protocol = hdlc_type_trans(skb, dev); 534 pr_info("skb->protocol:%x\n", skb->protocol); 535 netif_receive_skb(skb); 536 537 recycle: 538 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status); 539 540 /* update to point at the next bd */ 541 if (bd_status & R_W_S) { 542 priv->currx_bdnum = 0; 543 bd = priv->rx_bd_base; 544 } else { 545 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) 546 priv->currx_bdnum += 1; 547 else 548 priv->currx_bdnum = RX_BD_RING_LEN - 1; 549 550 bd += 1; 551 } 552 553 bd_status = ioread16be(&bd->status); 554 } 555 556 priv->currx_bd = bd; 557 return howmany; 558 } 559 560 static int ucc_hdlc_poll(struct napi_struct *napi, int budget) 561 { 562 struct ucc_hdlc_private *priv = container_of(napi, 563 struct ucc_hdlc_private, 564 napi); 565 int howmany; 566 567 /* Tx event processing */ 568 spin_lock(&priv->lock); 569 hdlc_tx_done(priv); 570 spin_unlock(&priv->lock); 571 572 howmany = 0; 573 howmany += hdlc_rx_done(priv, budget - howmany); 574 575 if (howmany < budget) { 576 napi_complete(napi); 577 qe_setbits32(priv->uccf->p_uccm, 578 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); 579 } 580 581 return howmany; 582 } 583 584 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) 585 { 586 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; 587 struct net_device *dev = priv->ndev; 588 struct ucc_fast_private *uccf; 589 struct ucc_tdm_info *ut_info; 590 u32 ucce; 591 u32 uccm; 592 593 ut_info = priv->ut_info; 594 uccf = priv->uccf; 595 596 ucce = ioread32be(uccf->p_ucce); 597 uccm = ioread32be(uccf->p_uccm); 598 ucce &= uccm; 599 iowrite32be(ucce, uccf->p_ucce); 600 pr_info("irq ucce:%x\n", ucce); 601 if (!ucce) 602 return IRQ_NONE; 603 604 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) { 605 if (napi_schedule_prep(&priv->napi)) { 606 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) 607 << 16); 608 iowrite32be(uccm, uccf->p_uccm); 609 __napi_schedule(&priv->napi); 610 } 611 } 612 613 /* Errors and other events */ 614 if (ucce >> 16 & UCC_HDLC_UCCE_BSY) 615 dev->stats.rx_errors++; 616 if (ucce >> 16 & UCC_HDLC_UCCE_TXE) 617 dev->stats.tx_errors++; 618 619 return IRQ_HANDLED; 620 } 621 622 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 623 { 624 const size_t size = sizeof(te1_settings); 625 te1_settings line; 626 struct ucc_hdlc_private *priv = netdev_priv(dev); 627 628 if (cmd != SIOCWANDEV) 629 return hdlc_ioctl(dev, ifr, cmd); 630 631 switch (ifr->ifr_settings.type) { 632 case IF_GET_IFACE: 633 ifr->ifr_settings.type = IF_IFACE_E1; 634 if (ifr->ifr_settings.size < size) { 635 ifr->ifr_settings.size = size; /* data size wanted */ 636 return -ENOBUFS; 637 } 638 memset(&line, 0, sizeof(line)); 639 line.clock_type = priv->clocking; 640 641 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) 642 return -EFAULT; 643 return 0; 644 645 default: 646 return hdlc_ioctl(dev, ifr, cmd); 647 } 648 } 649 650 static int uhdlc_open(struct net_device *dev) 651 { 652 u32 cecr_subblock; 653 hdlc_device *hdlc = dev_to_hdlc(dev); 654 struct ucc_hdlc_private *priv = hdlc->priv; 655 struct ucc_tdm *utdm = priv->utdm; 656 657 if (priv->hdlc_busy != 1) { 658 if (request_irq(priv->ut_info->uf_info.irq, 659 ucc_hdlc_irq_handler, 0, "hdlc", priv)) 660 return -ENODEV; 661 662 cecr_subblock = ucc_fast_get_qe_cr_subblock( 663 priv->ut_info->uf_info.ucc_num); 664 665 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 666 QE_CR_PROTOCOL_UNSPECIFIED, 0); 667 668 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 669 670 /* Enable the TDM port */ 671 if (priv->tsa) 672 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 673 674 priv->hdlc_busy = 1; 675 netif_device_attach(priv->ndev); 676 napi_enable(&priv->napi); 677 netif_start_queue(dev); 678 hdlc_open(dev); 679 } 680 681 return 0; 682 } 683 684 static void uhdlc_memclean(struct ucc_hdlc_private *priv) 685 { 686 qe_muram_free(priv->ucc_pram->riptr); 687 qe_muram_free(priv->ucc_pram->tiptr); 688 689 if (priv->rx_bd_base) { 690 dma_free_coherent(priv->dev, 691 RX_BD_RING_LEN * sizeof(struct qe_bd *), 692 priv->rx_bd_base, priv->dma_rx_bd); 693 694 priv->rx_bd_base = NULL; 695 priv->dma_rx_bd = 0; 696 } 697 698 if (priv->tx_bd_base) { 699 dma_free_coherent(priv->dev, 700 TX_BD_RING_LEN * sizeof(struct qe_bd *), 701 priv->tx_bd_base, priv->dma_tx_bd); 702 703 priv->tx_bd_base = NULL; 704 priv->dma_tx_bd = 0; 705 } 706 707 if (priv->ucc_pram) { 708 qe_muram_free(priv->ucc_pram_offset); 709 priv->ucc_pram = NULL; 710 priv->ucc_pram_offset = 0; 711 } 712 713 kfree(priv->rx_skbuff); 714 priv->rx_skbuff = NULL; 715 716 kfree(priv->tx_skbuff); 717 priv->tx_skbuff = NULL; 718 719 if (priv->uf_regs) { 720 iounmap(priv->uf_regs); 721 priv->uf_regs = NULL; 722 } 723 724 if (priv->uccf) { 725 ucc_fast_free(priv->uccf); 726 priv->uccf = NULL; 727 } 728 729 if (priv->rx_buffer) { 730 dma_free_coherent(priv->dev, 731 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 732 priv->rx_buffer, priv->dma_rx_addr); 733 priv->rx_buffer = NULL; 734 priv->dma_rx_addr = 0; 735 } 736 737 if (priv->tx_buffer) { 738 dma_free_coherent(priv->dev, 739 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 740 priv->tx_buffer, priv->dma_tx_addr); 741 priv->tx_buffer = NULL; 742 priv->dma_tx_addr = 0; 743 } 744 } 745 746 static int uhdlc_close(struct net_device *dev) 747 { 748 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 749 struct ucc_tdm *utdm = priv->utdm; 750 u32 cecr_subblock; 751 752 napi_disable(&priv->napi); 753 cecr_subblock = ucc_fast_get_qe_cr_subblock( 754 priv->ut_info->uf_info.ucc_num); 755 756 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 757 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 758 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, 759 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 760 761 if (priv->tsa) 762 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port); 763 764 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 765 766 free_irq(priv->ut_info->uf_info.irq, priv); 767 netif_stop_queue(dev); 768 priv->hdlc_busy = 0; 769 770 return 0; 771 } 772 773 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, 774 unsigned short parity) 775 { 776 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 777 778 if (encoding != ENCODING_NRZ && 779 encoding != ENCODING_NRZI) 780 return -EINVAL; 781 782 if (parity != PARITY_NONE && 783 parity != PARITY_CRC32_PR1_CCITT && 784 parity != PARITY_CRC16_PR1_CCITT) 785 return -EINVAL; 786 787 priv->encoding = encoding; 788 priv->parity = parity; 789 790 return 0; 791 } 792 793 #ifdef CONFIG_PM 794 static void store_clk_config(struct ucc_hdlc_private *priv) 795 { 796 struct qe_mux *qe_mux_reg = &qe_immr->qmx; 797 798 /* store si clk */ 799 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); 800 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l); 801 802 /* store si sync */ 803 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr); 804 805 /* store ucc clk */ 806 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); 807 } 808 809 static void resume_clk_config(struct ucc_hdlc_private *priv) 810 { 811 struct qe_mux *qe_mux_reg = &qe_immr->qmx; 812 813 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); 814 815 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h); 816 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l); 817 818 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr); 819 } 820 821 static int uhdlc_suspend(struct device *dev) 822 { 823 struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 824 struct ucc_tdm_info *ut_info; 825 struct ucc_fast __iomem *uf_regs; 826 827 if (!priv) 828 return -EINVAL; 829 830 if (!netif_running(priv->ndev)) 831 return 0; 832 833 netif_device_detach(priv->ndev); 834 napi_disable(&priv->napi); 835 836 ut_info = priv->ut_info; 837 uf_regs = priv->uf_regs; 838 839 /* backup gumr guemr*/ 840 priv->gumr = ioread32be(&uf_regs->gumr); 841 priv->guemr = ioread8(&uf_regs->guemr); 842 843 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak), 844 GFP_KERNEL); 845 if (!priv->ucc_pram_bak) 846 return -ENOMEM; 847 848 /* backup HDLC parameter */ 849 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, 850 sizeof(struct ucc_hdlc_param)); 851 852 /* store the clk configuration */ 853 store_clk_config(priv); 854 855 /* save power */ 856 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 857 858 dev_dbg(dev, "ucc hdlc suspend\n"); 859 return 0; 860 } 861 862 static int uhdlc_resume(struct device *dev) 863 { 864 struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 865 struct ucc_tdm *utdm; 866 struct ucc_tdm_info *ut_info; 867 struct ucc_fast __iomem *uf_regs; 868 struct ucc_fast_private *uccf; 869 struct ucc_fast_info *uf_info; 870 int ret, i; 871 u32 cecr_subblock; 872 u16 bd_status; 873 874 if (!priv) 875 return -EINVAL; 876 877 if (!netif_running(priv->ndev)) 878 return 0; 879 880 utdm = priv->utdm; 881 ut_info = priv->ut_info; 882 uf_info = &ut_info->uf_info; 883 uf_regs = priv->uf_regs; 884 uccf = priv->uccf; 885 886 /* restore gumr guemr */ 887 iowrite8(priv->guemr, &uf_regs->guemr); 888 iowrite32be(priv->gumr, &uf_regs->gumr); 889 890 /* Set Virtual Fifo registers */ 891 iowrite16be(uf_info->urfs, &uf_regs->urfs); 892 iowrite16be(uf_info->urfet, &uf_regs->urfet); 893 iowrite16be(uf_info->urfset, &uf_regs->urfset); 894 iowrite16be(uf_info->utfs, &uf_regs->utfs); 895 iowrite16be(uf_info->utfet, &uf_regs->utfet); 896 iowrite16be(uf_info->utftt, &uf_regs->utftt); 897 /* utfb, urfb are offsets from MURAM base */ 898 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); 899 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); 900 901 /* Rx Tx and sync clock routing */ 902 resume_clk_config(priv); 903 904 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 905 iowrite32be(0xffffffff, &uf_regs->ucce); 906 907 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 908 909 /* rebuild SIRAM */ 910 if (priv->tsa) 911 ucc_tdm_init(priv->utdm, priv->ut_info); 912 913 /* Write to QE CECR, UCCx channel to Stop Transmission */ 914 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 915 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 916 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 917 918 /* Set UPSMR normal mode */ 919 iowrite32be(0, &uf_regs->upsmr); 920 921 /* init parameter base */ 922 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 923 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 924 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 925 926 priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 927 qe_muram_addr(priv->ucc_pram_offset); 928 929 /* restore ucc parameter */ 930 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, 931 sizeof(struct ucc_hdlc_param)); 932 kfree(priv->ucc_pram_bak); 933 934 /* rebuild BD entry */ 935 for (i = 0; i < RX_BD_RING_LEN; i++) { 936 if (i < (RX_BD_RING_LEN - 1)) 937 bd_status = R_E_S | R_I_S; 938 else 939 bd_status = R_E_S | R_I_S | R_W_S; 940 941 iowrite16be(bd_status, &priv->rx_bd_base[i].status); 942 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 943 &priv->rx_bd_base[i].buf); 944 } 945 946 for (i = 0; i < TX_BD_RING_LEN; i++) { 947 if (i < (TX_BD_RING_LEN - 1)) 948 bd_status = T_I_S | T_TC_S; 949 else 950 bd_status = T_I_S | T_TC_S | T_W_S; 951 952 iowrite16be(bd_status, &priv->tx_bd_base[i].status); 953 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 954 &priv->tx_bd_base[i].buf); 955 } 956 957 /* if hdlc is busy enable TX and RX */ 958 if (priv->hdlc_busy == 1) { 959 cecr_subblock = ucc_fast_get_qe_cr_subblock( 960 priv->ut_info->uf_info.ucc_num); 961 962 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 963 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 964 965 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 966 967 /* Enable the TDM port */ 968 if (priv->tsa) 969 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 970 } 971 972 napi_enable(&priv->napi); 973 netif_device_attach(priv->ndev); 974 975 return 0; 976 } 977 978 static const struct dev_pm_ops uhdlc_pm_ops = { 979 .suspend = uhdlc_suspend, 980 .resume = uhdlc_resume, 981 .freeze = uhdlc_suspend, 982 .thaw = uhdlc_resume, 983 }; 984 985 #define HDLC_PM_OPS (&uhdlc_pm_ops) 986 987 #else 988 989 #define HDLC_PM_OPS NULL 990 991 #endif 992 static const struct net_device_ops uhdlc_ops = { 993 .ndo_open = uhdlc_open, 994 .ndo_stop = uhdlc_close, 995 .ndo_change_mtu = hdlc_change_mtu, 996 .ndo_start_xmit = hdlc_start_xmit, 997 .ndo_do_ioctl = uhdlc_ioctl, 998 }; 999 1000 static int ucc_hdlc_probe(struct platform_device *pdev) 1001 { 1002 struct device_node *np = pdev->dev.of_node; 1003 struct ucc_hdlc_private *uhdlc_priv = NULL; 1004 struct ucc_tdm_info *ut_info; 1005 struct ucc_tdm *utdm; 1006 struct resource res; 1007 struct net_device *dev; 1008 hdlc_device *hdlc; 1009 int ucc_num; 1010 const char *sprop; 1011 int ret; 1012 u32 val; 1013 1014 ret = of_property_read_u32_index(np, "cell-index", 0, &val); 1015 if (ret) { 1016 dev_err(&pdev->dev, "Invalid ucc property\n"); 1017 return -ENODEV; 1018 } 1019 1020 ucc_num = val - 1; 1021 if ((ucc_num > 3) || (ucc_num < 0)) { 1022 dev_err(&pdev->dev, ": Invalid UCC num\n"); 1023 return -EINVAL; 1024 } 1025 1026 memcpy(&utdm_info[ucc_num], &utdm_primary_info, 1027 sizeof(utdm_primary_info)); 1028 1029 ut_info = &utdm_info[ucc_num]; 1030 ut_info->uf_info.ucc_num = ucc_num; 1031 1032 sprop = of_get_property(np, "rx-clock-name", NULL); 1033 if (sprop) { 1034 ut_info->uf_info.rx_clock = qe_clock_source(sprop); 1035 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) || 1036 (ut_info->uf_info.rx_clock > QE_CLK24)) { 1037 dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1038 return -EINVAL; 1039 } 1040 } else { 1041 dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1042 return -EINVAL; 1043 } 1044 1045 sprop = of_get_property(np, "tx-clock-name", NULL); 1046 if (sprop) { 1047 ut_info->uf_info.tx_clock = qe_clock_source(sprop); 1048 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) || 1049 (ut_info->uf_info.tx_clock > QE_CLK24)) { 1050 dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1051 return -EINVAL; 1052 } 1053 } else { 1054 dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1055 return -EINVAL; 1056 } 1057 1058 /* use the same clock when work in loopback */ 1059 if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock) 1060 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1); 1061 1062 ret = of_address_to_resource(np, 0, &res); 1063 if (ret) 1064 return -EINVAL; 1065 1066 ut_info->uf_info.regs = res.start; 1067 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0); 1068 1069 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL); 1070 if (!uhdlc_priv) { 1071 return -ENOMEM; 1072 } 1073 1074 dev_set_drvdata(&pdev->dev, uhdlc_priv); 1075 uhdlc_priv->dev = &pdev->dev; 1076 uhdlc_priv->ut_info = ut_info; 1077 1078 if (of_get_property(np, "fsl,tdm-interface", NULL)) 1079 uhdlc_priv->tsa = 1; 1080 1081 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) 1082 uhdlc_priv->loopback = 1; 1083 1084 if (uhdlc_priv->tsa == 1) { 1085 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); 1086 if (!utdm) { 1087 ret = -ENOMEM; 1088 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n"); 1089 goto free_uhdlc_priv; 1090 } 1091 uhdlc_priv->utdm = utdm; 1092 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1093 if (ret) 1094 goto free_utdm; 1095 } 1096 1097 ret = uhdlc_init(uhdlc_priv); 1098 if (ret) { 1099 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1100 goto free_utdm; 1101 } 1102 1103 dev = alloc_hdlcdev(uhdlc_priv); 1104 if (!dev) { 1105 ret = -ENOMEM; 1106 pr_err("ucc_hdlc: unable to allocate memory\n"); 1107 goto undo_uhdlc_init; 1108 } 1109 1110 uhdlc_priv->ndev = dev; 1111 hdlc = dev_to_hdlc(dev); 1112 dev->tx_queue_len = 16; 1113 dev->netdev_ops = &uhdlc_ops; 1114 hdlc->attach = ucc_hdlc_attach; 1115 hdlc->xmit = ucc_hdlc_tx; 1116 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); 1117 if (register_hdlc_device(dev)) { 1118 ret = -ENOBUFS; 1119 pr_err("ucc_hdlc: unable to register hdlc device\n"); 1120 free_netdev(dev); 1121 goto free_dev; 1122 } 1123 1124 return 0; 1125 1126 free_dev: 1127 free_netdev(dev); 1128 undo_uhdlc_init: 1129 free_utdm: 1130 if (uhdlc_priv->tsa) 1131 kfree(utdm); 1132 free_uhdlc_priv: 1133 kfree(uhdlc_priv); 1134 return ret; 1135 } 1136 1137 static int ucc_hdlc_remove(struct platform_device *pdev) 1138 { 1139 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); 1140 1141 uhdlc_memclean(priv); 1142 1143 if (priv->utdm->si_regs) { 1144 iounmap(priv->utdm->si_regs); 1145 priv->utdm->si_regs = NULL; 1146 } 1147 1148 if (priv->utdm->siram) { 1149 iounmap(priv->utdm->siram); 1150 priv->utdm->siram = NULL; 1151 } 1152 kfree(priv); 1153 1154 dev_info(&pdev->dev, "UCC based hdlc module removed\n"); 1155 1156 return 0; 1157 } 1158 1159 static const struct of_device_id fsl_ucc_hdlc_of_match[] = { 1160 { 1161 .compatible = "fsl,ucc-hdlc", 1162 }, 1163 {}, 1164 }; 1165 1166 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); 1167 1168 static struct platform_driver ucc_hdlc_driver = { 1169 .probe = ucc_hdlc_probe, 1170 .remove = ucc_hdlc_remove, 1171 .driver = { 1172 .name = DRV_NAME, 1173 .pm = HDLC_PM_OPS, 1174 .of_match_table = fsl_ucc_hdlc_of_match, 1175 }, 1176 }; 1177 1178 module_platform_driver(ucc_hdlc_driver); 1179