1 /* Freescale QUICC Engine HDLC Device Driver 2 * 3 * Copyright 2016 Freescale Semiconductor Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/hdlc.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/netdevice.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/sched.h> 26 #include <linux/skbuff.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/stddef.h> 30 #include <soc/fsl/qe/qe_tdm.h> 31 #include <uapi/linux/if_arp.h> 32 33 #include "fsl_ucc_hdlc.h" 34 35 #define DRV_DESC "Freescale QE UCC HDLC Driver" 36 #define DRV_NAME "ucc_hdlc" 37 38 #define TDM_PPPOHT_SLIC_MAXIN 39 40 static struct ucc_tdm_info utdm_primary_info = { 41 .uf_info = { 42 .tsa = 0, 43 .cdp = 0, 44 .cds = 1, 45 .ctsp = 1, 46 .ctss = 1, 47 .revd = 0, 48 .urfs = 256, 49 .utfs = 256, 50 .urfet = 128, 51 .urfset = 192, 52 .utfet = 128, 53 .utftt = 0x40, 54 .ufpt = 256, 55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC, 56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 57 .tenc = UCC_FAST_TX_ENCODING_NRZ, 58 .renc = UCC_FAST_RX_ENCODING_NRZ, 59 .tcrc = UCC_FAST_16_BIT_CRC, 60 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 61 }, 62 63 .si_info = { 64 #ifdef TDM_PPPOHT_SLIC_MAXIN 65 .simr_rfsd = 1, 66 .simr_tfsd = 2, 67 #else 68 .simr_rfsd = 0, 69 .simr_tfsd = 0, 70 #endif 71 .simr_crt = 0, 72 .simr_sl = 0, 73 .simr_ce = 1, 74 .simr_fe = 1, 75 .simr_gm = 0, 76 }, 77 }; 78 79 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; 80 81 static int uhdlc_init(struct ucc_hdlc_private *priv) 82 { 83 struct ucc_tdm_info *ut_info; 84 struct ucc_fast_info *uf_info; 85 u32 cecr_subblock; 86 u16 bd_status; 87 int ret, i; 88 void *bd_buffer; 89 dma_addr_t bd_dma_addr; 90 u32 riptr; 91 u32 tiptr; 92 u32 gumr; 93 94 ut_info = priv->ut_info; 95 uf_info = &ut_info->uf_info; 96 97 if (priv->tsa) { 98 uf_info->tsa = 1; 99 uf_info->ctsp = 1; 100 } 101 102 /* This sets HPM register in CMXUCR register which configures a 103 * open drain connected HDLC bus 104 */ 105 if (priv->hdlc_bus) 106 uf_info->brkpt_support = 1; 107 108 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | 109 UCC_HDLC_UCCE_TXB) << 16); 110 111 ret = ucc_fast_init(uf_info, &priv->uccf); 112 if (ret) { 113 dev_err(priv->dev, "Failed to init uccf."); 114 return ret; 115 } 116 117 priv->uf_regs = priv->uccf->uf_regs; 118 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 119 120 /* Loopback mode */ 121 if (priv->loopback) { 122 dev_info(priv->dev, "Loopback Mode\n"); 123 /* use the same clock when work in loopback */ 124 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1); 125 126 gumr = ioread32be(&priv->uf_regs->gumr); 127 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS | 128 UCC_FAST_GUMR_TCI); 129 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); 130 iowrite32be(gumr, &priv->uf_regs->gumr); 131 } 132 133 /* Initialize SI */ 134 if (priv->tsa) 135 ucc_tdm_init(priv->utdm, priv->ut_info); 136 137 /* Write to QE CECR, UCCx channel to Stop Transmission */ 138 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 139 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 140 QE_CR_PROTOCOL_UNSPECIFIED, 0); 141 142 /* Set UPSMR normal mode (need fixed)*/ 143 iowrite32be(0, &priv->uf_regs->upsmr); 144 145 /* hdlc_bus mode */ 146 if (priv->hdlc_bus) { 147 u32 upsmr; 148 149 dev_info(priv->dev, "HDLC bus Mode\n"); 150 upsmr = ioread32be(&priv->uf_regs->upsmr); 151 152 /* bus mode and retransmit enable, with collision window 153 * set to 8 bytes 154 */ 155 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS | 156 UCC_HDLC_UPSMR_CW8; 157 iowrite32be(upsmr, &priv->uf_regs->upsmr); 158 159 /* explicitly disable CDS & CTSP */ 160 gumr = ioread32be(&priv->uf_regs->gumr); 161 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP); 162 /* set automatic sync to explicitly ignore CD signal */ 163 gumr |= UCC_FAST_GUMR_SYNL_AUTO; 164 iowrite32be(gumr, &priv->uf_regs->gumr); 165 } 166 167 priv->rx_ring_size = RX_BD_RING_LEN; 168 priv->tx_ring_size = TX_BD_RING_LEN; 169 /* Alloc Rx BD */ 170 priv->rx_bd_base = dma_alloc_coherent(priv->dev, 171 RX_BD_RING_LEN * sizeof(struct qe_bd), 172 &priv->dma_rx_bd, GFP_KERNEL); 173 174 if (!priv->rx_bd_base) { 175 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); 176 ret = -ENOMEM; 177 goto free_uccf; 178 } 179 180 /* Alloc Tx BD */ 181 priv->tx_bd_base = dma_alloc_coherent(priv->dev, 182 TX_BD_RING_LEN * sizeof(struct qe_bd), 183 &priv->dma_tx_bd, GFP_KERNEL); 184 185 if (!priv->tx_bd_base) { 186 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); 187 ret = -ENOMEM; 188 goto free_rx_bd; 189 } 190 191 /* Alloc parameter ram for ucc hdlc */ 192 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), 193 ALIGNMENT_OF_UCC_HDLC_PRAM); 194 195 if (priv->ucc_pram_offset < 0) { 196 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); 197 ret = -ENOMEM; 198 goto free_tx_bd; 199 } 200 201 priv->rx_skbuff = kcalloc(priv->rx_ring_size, 202 sizeof(*priv->rx_skbuff), 203 GFP_KERNEL); 204 if (!priv->rx_skbuff) 205 goto free_ucc_pram; 206 207 priv->tx_skbuff = kcalloc(priv->tx_ring_size, 208 sizeof(*priv->tx_skbuff), 209 GFP_KERNEL); 210 if (!priv->tx_skbuff) 211 goto free_rx_skbuff; 212 213 priv->skb_curtx = 0; 214 priv->skb_dirtytx = 0; 215 priv->curtx_bd = priv->tx_bd_base; 216 priv->dirty_tx = priv->tx_bd_base; 217 priv->currx_bd = priv->rx_bd_base; 218 priv->currx_bdnum = 0; 219 220 /* init parameter base */ 221 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 222 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 223 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 224 225 priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 226 qe_muram_addr(priv->ucc_pram_offset); 227 228 /* Zero out parameter ram */ 229 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); 230 231 /* Alloc riptr, tiptr */ 232 riptr = qe_muram_alloc(32, 32); 233 if (riptr < 0) { 234 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); 235 ret = -ENOMEM; 236 goto free_tx_skbuff; 237 } 238 239 tiptr = qe_muram_alloc(32, 32); 240 if (tiptr < 0) { 241 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); 242 ret = -ENOMEM; 243 goto free_riptr; 244 } 245 246 /* Set RIPTR, TIPTR */ 247 iowrite16be(riptr, &priv->ucc_pram->riptr); 248 iowrite16be(tiptr, &priv->ucc_pram->tiptr); 249 250 /* Set MRBLR */ 251 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr); 252 253 /* Set RBASE, TBASE */ 254 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase); 255 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase); 256 257 /* Set RSTATE, TSTATE */ 258 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate); 259 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate); 260 261 /* Set C_MASK, C_PRES for 16bit CRC */ 262 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask); 263 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres); 264 265 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); 266 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); 267 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); 268 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); 269 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); 270 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); 271 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); 272 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 273 274 /* Get BD buffer */ 275 bd_buffer = dma_zalloc_coherent(priv->dev, 276 (RX_BD_RING_LEN + TX_BD_RING_LEN) * 277 MAX_RX_BUF_LENGTH, 278 &bd_dma_addr, GFP_KERNEL); 279 280 if (!bd_buffer) { 281 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 282 ret = -ENOMEM; 283 goto free_tiptr; 284 } 285 286 priv->rx_buffer = bd_buffer; 287 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 288 289 priv->dma_rx_addr = bd_dma_addr; 290 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 291 292 for (i = 0; i < RX_BD_RING_LEN; i++) { 293 if (i < (RX_BD_RING_LEN - 1)) 294 bd_status = R_E_S | R_I_S; 295 else 296 bd_status = R_E_S | R_I_S | R_W_S; 297 298 iowrite16be(bd_status, &priv->rx_bd_base[i].status); 299 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 300 &priv->rx_bd_base[i].buf); 301 } 302 303 for (i = 0; i < TX_BD_RING_LEN; i++) { 304 if (i < (TX_BD_RING_LEN - 1)) 305 bd_status = T_I_S | T_TC_S; 306 else 307 bd_status = T_I_S | T_TC_S | T_W_S; 308 309 iowrite16be(bd_status, &priv->tx_bd_base[i].status); 310 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 311 &priv->tx_bd_base[i].buf); 312 } 313 314 return 0; 315 316 free_tiptr: 317 qe_muram_free(tiptr); 318 free_riptr: 319 qe_muram_free(riptr); 320 free_tx_skbuff: 321 kfree(priv->tx_skbuff); 322 free_rx_skbuff: 323 kfree(priv->rx_skbuff); 324 free_ucc_pram: 325 qe_muram_free(priv->ucc_pram_offset); 326 free_tx_bd: 327 dma_free_coherent(priv->dev, 328 TX_BD_RING_LEN * sizeof(struct qe_bd), 329 priv->tx_bd_base, priv->dma_tx_bd); 330 free_rx_bd: 331 dma_free_coherent(priv->dev, 332 RX_BD_RING_LEN * sizeof(struct qe_bd), 333 priv->rx_bd_base, priv->dma_rx_bd); 334 free_uccf: 335 ucc_fast_free(priv->uccf); 336 337 return ret; 338 } 339 340 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) 341 { 342 hdlc_device *hdlc = dev_to_hdlc(dev); 343 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; 344 struct qe_bd __iomem *bd; 345 u16 bd_status; 346 unsigned long flags; 347 u16 *proto_head; 348 349 switch (dev->type) { 350 case ARPHRD_RAWHDLC: 351 if (skb_headroom(skb) < HDLC_HEAD_LEN) { 352 dev->stats.tx_dropped++; 353 dev_kfree_skb(skb); 354 netdev_err(dev, "No enough space for hdlc head\n"); 355 return -ENOMEM; 356 } 357 358 skb_push(skb, HDLC_HEAD_LEN); 359 360 proto_head = (u16 *)skb->data; 361 *proto_head = htons(DEFAULT_HDLC_HEAD); 362 363 dev->stats.tx_bytes += skb->len; 364 break; 365 366 case ARPHRD_PPP: 367 proto_head = (u16 *)skb->data; 368 if (*proto_head != htons(DEFAULT_PPP_HEAD)) { 369 dev->stats.tx_dropped++; 370 dev_kfree_skb(skb); 371 netdev_err(dev, "Wrong ppp header\n"); 372 return -ENOMEM; 373 } 374 375 dev->stats.tx_bytes += skb->len; 376 break; 377 378 default: 379 dev->stats.tx_dropped++; 380 dev_kfree_skb(skb); 381 return -ENOMEM; 382 } 383 spin_lock_irqsave(&priv->lock, flags); 384 385 /* Start from the next BD that should be filled */ 386 bd = priv->curtx_bd; 387 bd_status = ioread16be(&bd->status); 388 /* Save the skb pointer so we can free it later */ 389 priv->tx_skbuff[priv->skb_curtx] = skb; 390 391 /* Update the current skb pointer (wrapping if this was the last) */ 392 priv->skb_curtx = 393 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 394 395 /* copy skb data to tx buffer for sdma processing */ 396 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 397 skb->data, skb->len); 398 399 /* set bd status and length */ 400 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; 401 402 iowrite16be(skb->len, &bd->length); 403 iowrite16be(bd_status, &bd->status); 404 405 /* Move to next BD in the ring */ 406 if (!(bd_status & T_W_S)) 407 bd += 1; 408 else 409 bd = priv->tx_bd_base; 410 411 if (bd == priv->dirty_tx) { 412 if (!netif_queue_stopped(dev)) 413 netif_stop_queue(dev); 414 } 415 416 priv->curtx_bd = bd; 417 418 spin_unlock_irqrestore(&priv->lock, flags); 419 420 return NETDEV_TX_OK; 421 } 422 423 static int hdlc_tx_done(struct ucc_hdlc_private *priv) 424 { 425 /* Start from the next BD that should be filled */ 426 struct net_device *dev = priv->ndev; 427 struct qe_bd *bd; /* BD pointer */ 428 u16 bd_status; 429 430 bd = priv->dirty_tx; 431 bd_status = ioread16be(&bd->status); 432 433 /* Normal processing. */ 434 while ((bd_status & T_R_S) == 0) { 435 struct sk_buff *skb; 436 437 /* BD contains already transmitted buffer. */ 438 /* Handle the transmitted buffer and release */ 439 /* the BD to be used with the current frame */ 440 441 skb = priv->tx_skbuff[priv->skb_dirtytx]; 442 if (!skb) 443 break; 444 dev->stats.tx_packets++; 445 memset(priv->tx_buffer + 446 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 447 0, skb->len); 448 dev_kfree_skb_irq(skb); 449 450 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 451 priv->skb_dirtytx = 452 (priv->skb_dirtytx + 453 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 454 455 /* We freed a buffer, so now we can restart transmission */ 456 if (netif_queue_stopped(dev)) 457 netif_wake_queue(dev); 458 459 /* Advance the confirmation BD pointer */ 460 if (!(bd_status & T_W_S)) 461 bd += 1; 462 else 463 bd = priv->tx_bd_base; 464 bd_status = ioread16be(&bd->status); 465 } 466 priv->dirty_tx = bd; 467 468 return 0; 469 } 470 471 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) 472 { 473 struct net_device *dev = priv->ndev; 474 struct sk_buff *skb = NULL; 475 hdlc_device *hdlc = dev_to_hdlc(dev); 476 struct qe_bd *bd; 477 u16 bd_status; 478 u16 length, howmany = 0; 479 u8 *bdbuffer; 480 481 bd = priv->currx_bd; 482 bd_status = ioread16be(&bd->status); 483 484 /* while there are received buffers and BD is full (~R_E) */ 485 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { 486 if (bd_status & R_OV_S) 487 dev->stats.rx_over_errors++; 488 if (bd_status & R_CR_S) { 489 dev->stats.rx_crc_errors++; 490 dev->stats.rx_dropped++; 491 goto recycle; 492 } 493 bdbuffer = priv->rx_buffer + 494 (priv->currx_bdnum * MAX_RX_BUF_LENGTH); 495 length = ioread16be(&bd->length); 496 497 switch (dev->type) { 498 case ARPHRD_RAWHDLC: 499 bdbuffer += HDLC_HEAD_LEN; 500 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); 501 502 skb = dev_alloc_skb(length); 503 if (!skb) { 504 dev->stats.rx_dropped++; 505 return -ENOMEM; 506 } 507 508 skb_put(skb, length); 509 skb->len = length; 510 skb->dev = dev; 511 memcpy(skb->data, bdbuffer, length); 512 break; 513 514 case ARPHRD_PPP: 515 length -= HDLC_CRC_SIZE; 516 517 skb = dev_alloc_skb(length); 518 if (!skb) { 519 dev->stats.rx_dropped++; 520 return -ENOMEM; 521 } 522 523 skb_put(skb, length); 524 skb->len = length; 525 skb->dev = dev; 526 memcpy(skb->data, bdbuffer, length); 527 break; 528 } 529 530 dev->stats.rx_packets++; 531 dev->stats.rx_bytes += skb->len; 532 howmany++; 533 if (hdlc->proto) 534 skb->protocol = hdlc_type_trans(skb, dev); 535 netif_receive_skb(skb); 536 537 recycle: 538 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status); 539 540 /* update to point at the next bd */ 541 if (bd_status & R_W_S) { 542 priv->currx_bdnum = 0; 543 bd = priv->rx_bd_base; 544 } else { 545 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) 546 priv->currx_bdnum += 1; 547 else 548 priv->currx_bdnum = RX_BD_RING_LEN - 1; 549 550 bd += 1; 551 } 552 553 bd_status = ioread16be(&bd->status); 554 } 555 556 priv->currx_bd = bd; 557 return howmany; 558 } 559 560 static int ucc_hdlc_poll(struct napi_struct *napi, int budget) 561 { 562 struct ucc_hdlc_private *priv = container_of(napi, 563 struct ucc_hdlc_private, 564 napi); 565 int howmany; 566 567 /* Tx event processing */ 568 spin_lock(&priv->lock); 569 hdlc_tx_done(priv); 570 spin_unlock(&priv->lock); 571 572 howmany = 0; 573 howmany += hdlc_rx_done(priv, budget - howmany); 574 575 if (howmany < budget) { 576 napi_complete_done(napi, howmany); 577 qe_setbits32(priv->uccf->p_uccm, 578 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); 579 } 580 581 return howmany; 582 } 583 584 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) 585 { 586 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; 587 struct net_device *dev = priv->ndev; 588 struct ucc_fast_private *uccf; 589 struct ucc_tdm_info *ut_info; 590 u32 ucce; 591 u32 uccm; 592 593 ut_info = priv->ut_info; 594 uccf = priv->uccf; 595 596 ucce = ioread32be(uccf->p_ucce); 597 uccm = ioread32be(uccf->p_uccm); 598 ucce &= uccm; 599 iowrite32be(ucce, uccf->p_ucce); 600 if (!ucce) 601 return IRQ_NONE; 602 603 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) { 604 if (napi_schedule_prep(&priv->napi)) { 605 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) 606 << 16); 607 iowrite32be(uccm, uccf->p_uccm); 608 __napi_schedule(&priv->napi); 609 } 610 } 611 612 /* Errors and other events */ 613 if (ucce >> 16 & UCC_HDLC_UCCE_BSY) 614 dev->stats.rx_errors++; 615 if (ucce >> 16 & UCC_HDLC_UCCE_TXE) 616 dev->stats.tx_errors++; 617 618 return IRQ_HANDLED; 619 } 620 621 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 622 { 623 const size_t size = sizeof(te1_settings); 624 te1_settings line; 625 struct ucc_hdlc_private *priv = netdev_priv(dev); 626 627 if (cmd != SIOCWANDEV) 628 return hdlc_ioctl(dev, ifr, cmd); 629 630 switch (ifr->ifr_settings.type) { 631 case IF_GET_IFACE: 632 ifr->ifr_settings.type = IF_IFACE_E1; 633 if (ifr->ifr_settings.size < size) { 634 ifr->ifr_settings.size = size; /* data size wanted */ 635 return -ENOBUFS; 636 } 637 memset(&line, 0, sizeof(line)); 638 line.clock_type = priv->clocking; 639 640 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) 641 return -EFAULT; 642 return 0; 643 644 default: 645 return hdlc_ioctl(dev, ifr, cmd); 646 } 647 } 648 649 static int uhdlc_open(struct net_device *dev) 650 { 651 u32 cecr_subblock; 652 hdlc_device *hdlc = dev_to_hdlc(dev); 653 struct ucc_hdlc_private *priv = hdlc->priv; 654 struct ucc_tdm *utdm = priv->utdm; 655 656 if (priv->hdlc_busy != 1) { 657 if (request_irq(priv->ut_info->uf_info.irq, 658 ucc_hdlc_irq_handler, 0, "hdlc", priv)) 659 return -ENODEV; 660 661 cecr_subblock = ucc_fast_get_qe_cr_subblock( 662 priv->ut_info->uf_info.ucc_num); 663 664 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 665 QE_CR_PROTOCOL_UNSPECIFIED, 0); 666 667 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 668 669 /* Enable the TDM port */ 670 if (priv->tsa) 671 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 672 673 priv->hdlc_busy = 1; 674 netif_device_attach(priv->ndev); 675 napi_enable(&priv->napi); 676 netif_start_queue(dev); 677 hdlc_open(dev); 678 } 679 680 return 0; 681 } 682 683 static void uhdlc_memclean(struct ucc_hdlc_private *priv) 684 { 685 qe_muram_free(priv->ucc_pram->riptr); 686 qe_muram_free(priv->ucc_pram->tiptr); 687 688 if (priv->rx_bd_base) { 689 dma_free_coherent(priv->dev, 690 RX_BD_RING_LEN * sizeof(struct qe_bd), 691 priv->rx_bd_base, priv->dma_rx_bd); 692 693 priv->rx_bd_base = NULL; 694 priv->dma_rx_bd = 0; 695 } 696 697 if (priv->tx_bd_base) { 698 dma_free_coherent(priv->dev, 699 TX_BD_RING_LEN * sizeof(struct qe_bd), 700 priv->tx_bd_base, priv->dma_tx_bd); 701 702 priv->tx_bd_base = NULL; 703 priv->dma_tx_bd = 0; 704 } 705 706 if (priv->ucc_pram) { 707 qe_muram_free(priv->ucc_pram_offset); 708 priv->ucc_pram = NULL; 709 priv->ucc_pram_offset = 0; 710 } 711 712 kfree(priv->rx_skbuff); 713 priv->rx_skbuff = NULL; 714 715 kfree(priv->tx_skbuff); 716 priv->tx_skbuff = NULL; 717 718 if (priv->uf_regs) { 719 iounmap(priv->uf_regs); 720 priv->uf_regs = NULL; 721 } 722 723 if (priv->uccf) { 724 ucc_fast_free(priv->uccf); 725 priv->uccf = NULL; 726 } 727 728 if (priv->rx_buffer) { 729 dma_free_coherent(priv->dev, 730 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 731 priv->rx_buffer, priv->dma_rx_addr); 732 priv->rx_buffer = NULL; 733 priv->dma_rx_addr = 0; 734 } 735 736 if (priv->tx_buffer) { 737 dma_free_coherent(priv->dev, 738 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 739 priv->tx_buffer, priv->dma_tx_addr); 740 priv->tx_buffer = NULL; 741 priv->dma_tx_addr = 0; 742 } 743 } 744 745 static int uhdlc_close(struct net_device *dev) 746 { 747 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 748 struct ucc_tdm *utdm = priv->utdm; 749 u32 cecr_subblock; 750 751 napi_disable(&priv->napi); 752 cecr_subblock = ucc_fast_get_qe_cr_subblock( 753 priv->ut_info->uf_info.ucc_num); 754 755 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 756 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 757 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, 758 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 759 760 if (priv->tsa) 761 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port); 762 763 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 764 765 free_irq(priv->ut_info->uf_info.irq, priv); 766 netif_stop_queue(dev); 767 priv->hdlc_busy = 0; 768 769 return 0; 770 } 771 772 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, 773 unsigned short parity) 774 { 775 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 776 777 if (encoding != ENCODING_NRZ && 778 encoding != ENCODING_NRZI) 779 return -EINVAL; 780 781 if (parity != PARITY_NONE && 782 parity != PARITY_CRC32_PR1_CCITT && 783 parity != PARITY_CRC16_PR1_CCITT) 784 return -EINVAL; 785 786 priv->encoding = encoding; 787 priv->parity = parity; 788 789 return 0; 790 } 791 792 #ifdef CONFIG_PM 793 static void store_clk_config(struct ucc_hdlc_private *priv) 794 { 795 struct qe_mux *qe_mux_reg = &qe_immr->qmx; 796 797 /* store si clk */ 798 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); 799 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l); 800 801 /* store si sync */ 802 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr); 803 804 /* store ucc clk */ 805 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); 806 } 807 808 static void resume_clk_config(struct ucc_hdlc_private *priv) 809 { 810 struct qe_mux *qe_mux_reg = &qe_immr->qmx; 811 812 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); 813 814 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h); 815 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l); 816 817 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr); 818 } 819 820 static int uhdlc_suspend(struct device *dev) 821 { 822 struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 823 struct ucc_tdm_info *ut_info; 824 struct ucc_fast __iomem *uf_regs; 825 826 if (!priv) 827 return -EINVAL; 828 829 if (!netif_running(priv->ndev)) 830 return 0; 831 832 netif_device_detach(priv->ndev); 833 napi_disable(&priv->napi); 834 835 ut_info = priv->ut_info; 836 uf_regs = priv->uf_regs; 837 838 /* backup gumr guemr*/ 839 priv->gumr = ioread32be(&uf_regs->gumr); 840 priv->guemr = ioread8(&uf_regs->guemr); 841 842 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak), 843 GFP_KERNEL); 844 if (!priv->ucc_pram_bak) 845 return -ENOMEM; 846 847 /* backup HDLC parameter */ 848 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, 849 sizeof(struct ucc_hdlc_param)); 850 851 /* store the clk configuration */ 852 store_clk_config(priv); 853 854 /* save power */ 855 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 856 857 return 0; 858 } 859 860 static int uhdlc_resume(struct device *dev) 861 { 862 struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 863 struct ucc_tdm *utdm; 864 struct ucc_tdm_info *ut_info; 865 struct ucc_fast __iomem *uf_regs; 866 struct ucc_fast_private *uccf; 867 struct ucc_fast_info *uf_info; 868 int ret, i; 869 u32 cecr_subblock; 870 u16 bd_status; 871 872 if (!priv) 873 return -EINVAL; 874 875 if (!netif_running(priv->ndev)) 876 return 0; 877 878 utdm = priv->utdm; 879 ut_info = priv->ut_info; 880 uf_info = &ut_info->uf_info; 881 uf_regs = priv->uf_regs; 882 uccf = priv->uccf; 883 884 /* restore gumr guemr */ 885 iowrite8(priv->guemr, &uf_regs->guemr); 886 iowrite32be(priv->gumr, &uf_regs->gumr); 887 888 /* Set Virtual Fifo registers */ 889 iowrite16be(uf_info->urfs, &uf_regs->urfs); 890 iowrite16be(uf_info->urfet, &uf_regs->urfet); 891 iowrite16be(uf_info->urfset, &uf_regs->urfset); 892 iowrite16be(uf_info->utfs, &uf_regs->utfs); 893 iowrite16be(uf_info->utfet, &uf_regs->utfet); 894 iowrite16be(uf_info->utftt, &uf_regs->utftt); 895 /* utfb, urfb are offsets from MURAM base */ 896 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); 897 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); 898 899 /* Rx Tx and sync clock routing */ 900 resume_clk_config(priv); 901 902 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 903 iowrite32be(0xffffffff, &uf_regs->ucce); 904 905 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 906 907 /* rebuild SIRAM */ 908 if (priv->tsa) 909 ucc_tdm_init(priv->utdm, priv->ut_info); 910 911 /* Write to QE CECR, UCCx channel to Stop Transmission */ 912 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 913 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 914 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 915 916 /* Set UPSMR normal mode */ 917 iowrite32be(0, &uf_regs->upsmr); 918 919 /* init parameter base */ 920 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 921 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 922 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 923 924 priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 925 qe_muram_addr(priv->ucc_pram_offset); 926 927 /* restore ucc parameter */ 928 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, 929 sizeof(struct ucc_hdlc_param)); 930 kfree(priv->ucc_pram_bak); 931 932 /* rebuild BD entry */ 933 for (i = 0; i < RX_BD_RING_LEN; i++) { 934 if (i < (RX_BD_RING_LEN - 1)) 935 bd_status = R_E_S | R_I_S; 936 else 937 bd_status = R_E_S | R_I_S | R_W_S; 938 939 iowrite16be(bd_status, &priv->rx_bd_base[i].status); 940 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 941 &priv->rx_bd_base[i].buf); 942 } 943 944 for (i = 0; i < TX_BD_RING_LEN; i++) { 945 if (i < (TX_BD_RING_LEN - 1)) 946 bd_status = T_I_S | T_TC_S; 947 else 948 bd_status = T_I_S | T_TC_S | T_W_S; 949 950 iowrite16be(bd_status, &priv->tx_bd_base[i].status); 951 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 952 &priv->tx_bd_base[i].buf); 953 } 954 955 /* if hdlc is busy enable TX and RX */ 956 if (priv->hdlc_busy == 1) { 957 cecr_subblock = ucc_fast_get_qe_cr_subblock( 958 priv->ut_info->uf_info.ucc_num); 959 960 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 961 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 962 963 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 964 965 /* Enable the TDM port */ 966 if (priv->tsa) 967 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 968 } 969 970 napi_enable(&priv->napi); 971 netif_device_attach(priv->ndev); 972 973 return 0; 974 } 975 976 static const struct dev_pm_ops uhdlc_pm_ops = { 977 .suspend = uhdlc_suspend, 978 .resume = uhdlc_resume, 979 .freeze = uhdlc_suspend, 980 .thaw = uhdlc_resume, 981 }; 982 983 #define HDLC_PM_OPS (&uhdlc_pm_ops) 984 985 #else 986 987 #define HDLC_PM_OPS NULL 988 989 #endif 990 static const struct net_device_ops uhdlc_ops = { 991 .ndo_open = uhdlc_open, 992 .ndo_stop = uhdlc_close, 993 .ndo_start_xmit = hdlc_start_xmit, 994 .ndo_do_ioctl = uhdlc_ioctl, 995 }; 996 997 static int ucc_hdlc_probe(struct platform_device *pdev) 998 { 999 struct device_node *np = pdev->dev.of_node; 1000 struct ucc_hdlc_private *uhdlc_priv = NULL; 1001 struct ucc_tdm_info *ut_info; 1002 struct ucc_tdm *utdm = NULL; 1003 struct resource res; 1004 struct net_device *dev; 1005 hdlc_device *hdlc; 1006 int ucc_num; 1007 const char *sprop; 1008 int ret; 1009 u32 val; 1010 1011 ret = of_property_read_u32_index(np, "cell-index", 0, &val); 1012 if (ret) { 1013 dev_err(&pdev->dev, "Invalid ucc property\n"); 1014 return -ENODEV; 1015 } 1016 1017 ucc_num = val - 1; 1018 if ((ucc_num > 3) || (ucc_num < 0)) { 1019 dev_err(&pdev->dev, ": Invalid UCC num\n"); 1020 return -EINVAL; 1021 } 1022 1023 memcpy(&utdm_info[ucc_num], &utdm_primary_info, 1024 sizeof(utdm_primary_info)); 1025 1026 ut_info = &utdm_info[ucc_num]; 1027 ut_info->uf_info.ucc_num = ucc_num; 1028 1029 sprop = of_get_property(np, "rx-clock-name", NULL); 1030 if (sprop) { 1031 ut_info->uf_info.rx_clock = qe_clock_source(sprop); 1032 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) || 1033 (ut_info->uf_info.rx_clock > QE_CLK24)) { 1034 dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1035 return -EINVAL; 1036 } 1037 } else { 1038 dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1039 return -EINVAL; 1040 } 1041 1042 sprop = of_get_property(np, "tx-clock-name", NULL); 1043 if (sprop) { 1044 ut_info->uf_info.tx_clock = qe_clock_source(sprop); 1045 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) || 1046 (ut_info->uf_info.tx_clock > QE_CLK24)) { 1047 dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1048 return -EINVAL; 1049 } 1050 } else { 1051 dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1052 return -EINVAL; 1053 } 1054 1055 ret = of_address_to_resource(np, 0, &res); 1056 if (ret) 1057 return -EINVAL; 1058 1059 ut_info->uf_info.regs = res.start; 1060 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0); 1061 1062 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL); 1063 if (!uhdlc_priv) { 1064 return -ENOMEM; 1065 } 1066 1067 dev_set_drvdata(&pdev->dev, uhdlc_priv); 1068 uhdlc_priv->dev = &pdev->dev; 1069 uhdlc_priv->ut_info = ut_info; 1070 1071 if (of_get_property(np, "fsl,tdm-interface", NULL)) 1072 uhdlc_priv->tsa = 1; 1073 1074 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) 1075 uhdlc_priv->loopback = 1; 1076 1077 if (of_get_property(np, "fsl,hdlc-bus", NULL)) 1078 uhdlc_priv->hdlc_bus = 1; 1079 1080 if (uhdlc_priv->tsa == 1) { 1081 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); 1082 if (!utdm) { 1083 ret = -ENOMEM; 1084 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n"); 1085 goto free_uhdlc_priv; 1086 } 1087 uhdlc_priv->utdm = utdm; 1088 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1089 if (ret) 1090 goto free_utdm; 1091 } 1092 1093 ret = uhdlc_init(uhdlc_priv); 1094 if (ret) { 1095 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1096 goto free_utdm; 1097 } 1098 1099 dev = alloc_hdlcdev(uhdlc_priv); 1100 if (!dev) { 1101 ret = -ENOMEM; 1102 pr_err("ucc_hdlc: unable to allocate memory\n"); 1103 goto undo_uhdlc_init; 1104 } 1105 1106 uhdlc_priv->ndev = dev; 1107 hdlc = dev_to_hdlc(dev); 1108 dev->tx_queue_len = 16; 1109 dev->netdev_ops = &uhdlc_ops; 1110 hdlc->attach = ucc_hdlc_attach; 1111 hdlc->xmit = ucc_hdlc_tx; 1112 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); 1113 if (register_hdlc_device(dev)) { 1114 ret = -ENOBUFS; 1115 pr_err("ucc_hdlc: unable to register hdlc device\n"); 1116 free_netdev(dev); 1117 goto free_dev; 1118 } 1119 1120 return 0; 1121 1122 free_dev: 1123 free_netdev(dev); 1124 undo_uhdlc_init: 1125 free_utdm: 1126 if (uhdlc_priv->tsa) 1127 kfree(utdm); 1128 free_uhdlc_priv: 1129 kfree(uhdlc_priv); 1130 return ret; 1131 } 1132 1133 static int ucc_hdlc_remove(struct platform_device *pdev) 1134 { 1135 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); 1136 1137 uhdlc_memclean(priv); 1138 1139 if (priv->utdm->si_regs) { 1140 iounmap(priv->utdm->si_regs); 1141 priv->utdm->si_regs = NULL; 1142 } 1143 1144 if (priv->utdm->siram) { 1145 iounmap(priv->utdm->siram); 1146 priv->utdm->siram = NULL; 1147 } 1148 kfree(priv); 1149 1150 dev_info(&pdev->dev, "UCC based hdlc module removed\n"); 1151 1152 return 0; 1153 } 1154 1155 static const struct of_device_id fsl_ucc_hdlc_of_match[] = { 1156 { 1157 .compatible = "fsl,ucc-hdlc", 1158 }, 1159 {}, 1160 }; 1161 1162 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); 1163 1164 static struct platform_driver ucc_hdlc_driver = { 1165 .probe = ucc_hdlc_probe, 1166 .remove = ucc_hdlc_remove, 1167 .driver = { 1168 .name = DRV_NAME, 1169 .pm = HDLC_PM_OPS, 1170 .of_match_table = fsl_ucc_hdlc_of_match, 1171 }, 1172 }; 1173 1174 module_platform_driver(ucc_hdlc_driver); 1175 MODULE_LICENSE("GPL"); 1176