1 /* 2 * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. 3 * 4 * Author: Shlomi Gridish <gridish@freescale.com> 5 * Li Yang <leoli@freescale.com> 6 * 7 * Description: 8 * QE UCC Gigabit Ethernet Driver 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/init.h> 17 #include <linux/errno.h> 18 #include <linux/slab.h> 19 #include <linux/stddef.h> 20 #include <linux/module.h> 21 #include <linux/interrupt.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/spinlock.h> 26 #include <linux/mm.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/mii.h> 29 #include <linux/phy.h> 30 #include <linux/workqueue.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_platform.h> 34 35 #include <asm/uaccess.h> 36 #include <asm/irq.h> 37 #include <asm/io.h> 38 #include <asm/immap_qe.h> 39 #include <asm/qe.h> 40 #include <asm/ucc.h> 41 #include <asm/ucc_fast.h> 42 #include <asm/machdep.h> 43 44 #include "ucc_geth.h" 45 46 #undef DEBUG 47 48 #define ugeth_printk(level, format, arg...) \ 49 printk(level format "\n", ## arg) 50 51 #define ugeth_dbg(format, arg...) \ 52 ugeth_printk(KERN_DEBUG , format , ## arg) 53 #define ugeth_err(format, arg...) \ 54 ugeth_printk(KERN_ERR , format , ## arg) 55 #define ugeth_info(format, arg...) \ 56 ugeth_printk(KERN_INFO , format , ## arg) 57 #define ugeth_warn(format, arg...) \ 58 ugeth_printk(KERN_WARNING , format , ## arg) 59 60 #ifdef UGETH_VERBOSE_DEBUG 61 #define ugeth_vdbg ugeth_dbg 62 #else 63 #define ugeth_vdbg(fmt, args...) do { } while (0) 64 #endif /* UGETH_VERBOSE_DEBUG */ 65 #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 66 67 68 static DEFINE_SPINLOCK(ugeth_lock); 69 70 static struct { 71 u32 msg_enable; 72 } debug = { -1 }; 73 74 module_param_named(debug, debug.msg_enable, int, 0); 75 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); 76 77 static struct ucc_geth_info ugeth_primary_info = { 78 .uf_info = { 79 .bd_mem_part = MEM_PART_SYSTEM, 80 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, 81 .max_rx_buf_length = 1536, 82 /* adjusted at startup if max-speed 1000 */ 83 .urfs = UCC_GETH_URFS_INIT, 84 .urfet = UCC_GETH_URFET_INIT, 85 .urfset = UCC_GETH_URFSET_INIT, 86 .utfs = UCC_GETH_UTFS_INIT, 87 .utfet = UCC_GETH_UTFET_INIT, 88 .utftt = UCC_GETH_UTFTT_INIT, 89 .ufpt = 256, 90 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, 91 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 92 .tenc = UCC_FAST_TX_ENCODING_NRZ, 93 .renc = UCC_FAST_RX_ENCODING_NRZ, 94 .tcrc = UCC_FAST_16_BIT_CRC, 95 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 96 }, 97 .numQueuesTx = 1, 98 .numQueuesRx = 1, 99 .extendedFilteringChainPointer = ((uint32_t) NULL), 100 .typeorlen = 3072 /*1536 */ , 101 .nonBackToBackIfgPart1 = 0x40, 102 .nonBackToBackIfgPart2 = 0x60, 103 .miminumInterFrameGapEnforcement = 0x50, 104 .backToBackInterFrameGap = 0x60, 105 .mblinterval = 128, 106 .nortsrbytetime = 5, 107 .fracsiz = 1, 108 .strictpriorityq = 0xff, 109 .altBebTruncation = 0xa, 110 .excessDefer = 1, 111 .maxRetransmission = 0xf, 112 .collisionWindow = 0x37, 113 .receiveFlowControl = 1, 114 .transmitFlowControl = 1, 115 .maxGroupAddrInHash = 4, 116 .maxIndAddrInHash = 4, 117 .prel = 7, 118 .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ 119 .minFrameLength = 64, 120 .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ 121 .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */ 122 .vlantype = 0x8100, 123 .ecamptr = ((uint32_t) NULL), 124 .eventRegMask = UCCE_OTHER, 125 .pausePeriod = 0xf000, 126 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, 127 .bdRingLenTx = { 128 TX_BD_RING_LEN, 129 TX_BD_RING_LEN, 130 TX_BD_RING_LEN, 131 TX_BD_RING_LEN, 132 TX_BD_RING_LEN, 133 TX_BD_RING_LEN, 134 TX_BD_RING_LEN, 135 TX_BD_RING_LEN}, 136 137 .bdRingLenRx = { 138 RX_BD_RING_LEN, 139 RX_BD_RING_LEN, 140 RX_BD_RING_LEN, 141 RX_BD_RING_LEN, 142 RX_BD_RING_LEN, 143 RX_BD_RING_LEN, 144 RX_BD_RING_LEN, 145 RX_BD_RING_LEN}, 146 147 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, 148 .largestexternallookupkeysize = 149 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, 150 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | 151 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | 152 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, 153 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, 154 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, 155 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, 156 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, 157 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, 158 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, 159 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, 160 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 161 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, 162 }; 163 164 static struct ucc_geth_info ugeth_info[8]; 165 166 #ifdef DEBUG 167 static void mem_disp(u8 *addr, int size) 168 { 169 u8 *i; 170 int size16Aling = (size >> 4) << 4; 171 int size4Aling = (size >> 2) << 2; 172 int notAlign = 0; 173 if (size % 16) 174 notAlign = 1; 175 176 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) 177 printk("0x%08x: %08x %08x %08x %08x\r\n", 178 (u32) i, 179 *((u32 *) (i)), 180 *((u32 *) (i + 4)), 181 *((u32 *) (i + 8)), *((u32 *) (i + 12))); 182 if (notAlign == 1) 183 printk("0x%08x: ", (u32) i); 184 for (; (u32) i < (u32) addr + size4Aling; i += 4) 185 printk("%08x ", *((u32 *) (i))); 186 for (; (u32) i < (u32) addr + size; i++) 187 printk("%02x", *((i))); 188 if (notAlign == 1) 189 printk("\r\n"); 190 } 191 #endif /* DEBUG */ 192 193 static struct list_head *dequeue(struct list_head *lh) 194 { 195 unsigned long flags; 196 197 spin_lock_irqsave(&ugeth_lock, flags); 198 if (!list_empty(lh)) { 199 struct list_head *node = lh->next; 200 list_del(node); 201 spin_unlock_irqrestore(&ugeth_lock, flags); 202 return node; 203 } else { 204 spin_unlock_irqrestore(&ugeth_lock, flags); 205 return NULL; 206 } 207 } 208 209 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 210 u8 __iomem *bd) 211 { 212 struct sk_buff *skb; 213 214 skb = netdev_alloc_skb(ugeth->ndev, 215 ugeth->ug_info->uf_info.max_rx_buf_length + 216 UCC_GETH_RX_DATA_BUF_ALIGNMENT); 217 if (!skb) 218 return NULL; 219 220 /* We need the data buffer to be aligned properly. We will reserve 221 * as many bytes as needed to align the data properly 222 */ 223 skb_reserve(skb, 224 UCC_GETH_RX_DATA_BUF_ALIGNMENT - 225 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 226 1))); 227 228 out_be32(&((struct qe_bd __iomem *)bd)->buf, 229 dma_map_single(ugeth->dev, 230 skb->data, 231 ugeth->ug_info->uf_info.max_rx_buf_length + 232 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 233 DMA_FROM_DEVICE)); 234 235 out_be32((u32 __iomem *)bd, 236 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); 237 238 return skb; 239 } 240 241 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) 242 { 243 u8 __iomem *bd; 244 u32 bd_status; 245 struct sk_buff *skb; 246 int i; 247 248 bd = ugeth->p_rx_bd_ring[rxQ]; 249 i = 0; 250 251 do { 252 bd_status = in_be32((u32 __iomem *)bd); 253 skb = get_new_skb(ugeth, bd); 254 255 if (!skb) /* If can not allocate data buffer, 256 abort. Cleanup will be elsewhere */ 257 return -ENOMEM; 258 259 ugeth->rx_skbuff[rxQ][i] = skb; 260 261 /* advance the BD pointer */ 262 bd += sizeof(struct qe_bd); 263 i++; 264 } while (!(bd_status & R_W)); 265 266 return 0; 267 } 268 269 static int fill_init_enet_entries(struct ucc_geth_private *ugeth, 270 u32 *p_start, 271 u8 num_entries, 272 u32 thread_size, 273 u32 thread_alignment, 274 unsigned int risc, 275 int skip_page_for_first_entry) 276 { 277 u32 init_enet_offset; 278 u8 i; 279 int snum; 280 281 for (i = 0; i < num_entries; i++) { 282 if ((snum = qe_get_snum()) < 0) { 283 if (netif_msg_ifup(ugeth)) 284 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 285 return snum; 286 } 287 if ((i == 0) && skip_page_for_first_entry) 288 /* First entry of Rx does not have page */ 289 init_enet_offset = 0; 290 else { 291 init_enet_offset = 292 qe_muram_alloc(thread_size, thread_alignment); 293 if (IS_ERR_VALUE(init_enet_offset)) { 294 if (netif_msg_ifup(ugeth)) 295 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); 296 qe_put_snum((u8) snum); 297 return -ENOMEM; 298 } 299 } 300 *(p_start++) = 301 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset 302 | risc; 303 } 304 305 return 0; 306 } 307 308 static int return_init_enet_entries(struct ucc_geth_private *ugeth, 309 u32 *p_start, 310 u8 num_entries, 311 unsigned int risc, 312 int skip_page_for_first_entry) 313 { 314 u32 init_enet_offset; 315 u8 i; 316 int snum; 317 318 for (i = 0; i < num_entries; i++) { 319 u32 val = *p_start; 320 321 /* Check that this entry was actually valid -- 322 needed in case failed in allocations */ 323 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 324 snum = 325 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 326 ENET_INIT_PARAM_SNUM_SHIFT; 327 qe_put_snum((u8) snum); 328 if (!((i == 0) && skip_page_for_first_entry)) { 329 /* First entry of Rx does not have page */ 330 init_enet_offset = 331 (val & ENET_INIT_PARAM_PTR_MASK); 332 qe_muram_free(init_enet_offset); 333 } 334 *p_start++ = 0; 335 } 336 } 337 338 return 0; 339 } 340 341 #ifdef DEBUG 342 static int dump_init_enet_entries(struct ucc_geth_private *ugeth, 343 u32 __iomem *p_start, 344 u8 num_entries, 345 u32 thread_size, 346 unsigned int risc, 347 int skip_page_for_first_entry) 348 { 349 u32 init_enet_offset; 350 u8 i; 351 int snum; 352 353 for (i = 0; i < num_entries; i++) { 354 u32 val = in_be32(p_start); 355 356 /* Check that this entry was actually valid -- 357 needed in case failed in allocations */ 358 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { 359 snum = 360 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> 361 ENET_INIT_PARAM_SNUM_SHIFT; 362 qe_put_snum((u8) snum); 363 if (!((i == 0) && skip_page_for_first_entry)) { 364 /* First entry of Rx does not have page */ 365 init_enet_offset = 366 (in_be32(p_start) & 367 ENET_INIT_PARAM_PTR_MASK); 368 ugeth_info("Init enet entry %d:", i); 369 ugeth_info("Base address: 0x%08x", 370 (u32) 371 qe_muram_addr(init_enet_offset)); 372 mem_disp(qe_muram_addr(init_enet_offset), 373 thread_size); 374 } 375 p_start++; 376 } 377 } 378 379 return 0; 380 } 381 #endif 382 383 static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 384 { 385 kfree(enet_addr_cont); 386 } 387 388 static void set_mac_addr(__be16 __iomem *reg, u8 *mac) 389 { 390 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); 391 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); 392 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); 393 } 394 395 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 396 { 397 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 398 399 if (!(paddr_num < NUM_OF_PADDRS)) { 400 ugeth_warn("%s: Illagel paddr_num.", __func__); 401 return -EINVAL; 402 } 403 404 p_82xx_addr_filt = 405 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 406 addressfiltering; 407 408 /* Writing address ff.ff.ff.ff.ff.ff disables address 409 recognition for this register */ 410 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); 411 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); 412 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); 413 414 return 0; 415 } 416 417 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, 418 u8 *p_enet_addr) 419 { 420 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 421 u32 cecr_subblock; 422 423 p_82xx_addr_filt = 424 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> 425 addressfiltering; 426 427 cecr_subblock = 428 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 429 430 /* Ethernet frames are defined in Little Endian mode, 431 therefore to insert */ 432 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 433 434 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 435 436 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, 437 QE_CR_PROTOCOL_ETHERNET, 0); 438 } 439 440 static inline int compare_addr(u8 **addr1, u8 **addr2) 441 { 442 return memcmp(addr1, addr2, ETH_ALEN); 443 } 444 445 #ifdef DEBUG 446 static void get_statistics(struct ucc_geth_private *ugeth, 447 struct ucc_geth_tx_firmware_statistics * 448 tx_firmware_statistics, 449 struct ucc_geth_rx_firmware_statistics * 450 rx_firmware_statistics, 451 struct ucc_geth_hardware_statistics *hardware_statistics) 452 { 453 struct ucc_fast __iomem *uf_regs; 454 struct ucc_geth __iomem *ug_regs; 455 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; 456 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; 457 458 ug_regs = ugeth->ug_regs; 459 uf_regs = (struct ucc_fast __iomem *) ug_regs; 460 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; 461 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; 462 463 /* Tx firmware only if user handed pointer and driver actually 464 gathers Tx firmware statistics */ 465 if (tx_firmware_statistics && p_tx_fw_statistics_pram) { 466 tx_firmware_statistics->sicoltx = 467 in_be32(&p_tx_fw_statistics_pram->sicoltx); 468 tx_firmware_statistics->mulcoltx = 469 in_be32(&p_tx_fw_statistics_pram->mulcoltx); 470 tx_firmware_statistics->latecoltxfr = 471 in_be32(&p_tx_fw_statistics_pram->latecoltxfr); 472 tx_firmware_statistics->frabortduecol = 473 in_be32(&p_tx_fw_statistics_pram->frabortduecol); 474 tx_firmware_statistics->frlostinmactxer = 475 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); 476 tx_firmware_statistics->carriersenseertx = 477 in_be32(&p_tx_fw_statistics_pram->carriersenseertx); 478 tx_firmware_statistics->frtxok = 479 in_be32(&p_tx_fw_statistics_pram->frtxok); 480 tx_firmware_statistics->txfrexcessivedefer = 481 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); 482 tx_firmware_statistics->txpkts256 = 483 in_be32(&p_tx_fw_statistics_pram->txpkts256); 484 tx_firmware_statistics->txpkts512 = 485 in_be32(&p_tx_fw_statistics_pram->txpkts512); 486 tx_firmware_statistics->txpkts1024 = 487 in_be32(&p_tx_fw_statistics_pram->txpkts1024); 488 tx_firmware_statistics->txpktsjumbo = 489 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); 490 } 491 492 /* Rx firmware only if user handed pointer and driver actually 493 * gathers Rx firmware statistics */ 494 if (rx_firmware_statistics && p_rx_fw_statistics_pram) { 495 int i; 496 rx_firmware_statistics->frrxfcser = 497 in_be32(&p_rx_fw_statistics_pram->frrxfcser); 498 rx_firmware_statistics->fraligner = 499 in_be32(&p_rx_fw_statistics_pram->fraligner); 500 rx_firmware_statistics->inrangelenrxer = 501 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); 502 rx_firmware_statistics->outrangelenrxer = 503 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); 504 rx_firmware_statistics->frtoolong = 505 in_be32(&p_rx_fw_statistics_pram->frtoolong); 506 rx_firmware_statistics->runt = 507 in_be32(&p_rx_fw_statistics_pram->runt); 508 rx_firmware_statistics->verylongevent = 509 in_be32(&p_rx_fw_statistics_pram->verylongevent); 510 rx_firmware_statistics->symbolerror = 511 in_be32(&p_rx_fw_statistics_pram->symbolerror); 512 rx_firmware_statistics->dropbsy = 513 in_be32(&p_rx_fw_statistics_pram->dropbsy); 514 for (i = 0; i < 0x8; i++) 515 rx_firmware_statistics->res0[i] = 516 p_rx_fw_statistics_pram->res0[i]; 517 rx_firmware_statistics->mismatchdrop = 518 in_be32(&p_rx_fw_statistics_pram->mismatchdrop); 519 rx_firmware_statistics->underpkts = 520 in_be32(&p_rx_fw_statistics_pram->underpkts); 521 rx_firmware_statistics->pkts256 = 522 in_be32(&p_rx_fw_statistics_pram->pkts256); 523 rx_firmware_statistics->pkts512 = 524 in_be32(&p_rx_fw_statistics_pram->pkts512); 525 rx_firmware_statistics->pkts1024 = 526 in_be32(&p_rx_fw_statistics_pram->pkts1024); 527 rx_firmware_statistics->pktsjumbo = 528 in_be32(&p_rx_fw_statistics_pram->pktsjumbo); 529 rx_firmware_statistics->frlossinmacer = 530 in_be32(&p_rx_fw_statistics_pram->frlossinmacer); 531 rx_firmware_statistics->pausefr = 532 in_be32(&p_rx_fw_statistics_pram->pausefr); 533 for (i = 0; i < 0x4; i++) 534 rx_firmware_statistics->res1[i] = 535 p_rx_fw_statistics_pram->res1[i]; 536 rx_firmware_statistics->removevlan = 537 in_be32(&p_rx_fw_statistics_pram->removevlan); 538 rx_firmware_statistics->replacevlan = 539 in_be32(&p_rx_fw_statistics_pram->replacevlan); 540 rx_firmware_statistics->insertvlan = 541 in_be32(&p_rx_fw_statistics_pram->insertvlan); 542 } 543 544 /* Hardware only if user handed pointer and driver actually 545 gathers hardware statistics */ 546 if (hardware_statistics && 547 (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { 548 hardware_statistics->tx64 = in_be32(&ug_regs->tx64); 549 hardware_statistics->tx127 = in_be32(&ug_regs->tx127); 550 hardware_statistics->tx255 = in_be32(&ug_regs->tx255); 551 hardware_statistics->rx64 = in_be32(&ug_regs->rx64); 552 hardware_statistics->rx127 = in_be32(&ug_regs->rx127); 553 hardware_statistics->rx255 = in_be32(&ug_regs->rx255); 554 hardware_statistics->txok = in_be32(&ug_regs->txok); 555 hardware_statistics->txcf = in_be16(&ug_regs->txcf); 556 hardware_statistics->tmca = in_be32(&ug_regs->tmca); 557 hardware_statistics->tbca = in_be32(&ug_regs->tbca); 558 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); 559 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); 560 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); 561 hardware_statistics->rmca = in_be32(&ug_regs->rmca); 562 hardware_statistics->rbca = in_be32(&ug_regs->rbca); 563 } 564 } 565 566 static void dump_bds(struct ucc_geth_private *ugeth) 567 { 568 int i; 569 int length; 570 571 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 572 if (ugeth->p_tx_bd_ring[i]) { 573 length = 574 (ugeth->ug_info->bdRingLenTx[i] * 575 sizeof(struct qe_bd)); 576 ugeth_info("TX BDs[%d]", i); 577 mem_disp(ugeth->p_tx_bd_ring[i], length); 578 } 579 } 580 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 581 if (ugeth->p_rx_bd_ring[i]) { 582 length = 583 (ugeth->ug_info->bdRingLenRx[i] * 584 sizeof(struct qe_bd)); 585 ugeth_info("RX BDs[%d]", i); 586 mem_disp(ugeth->p_rx_bd_ring[i], length); 587 } 588 } 589 } 590 591 static void dump_regs(struct ucc_geth_private *ugeth) 592 { 593 int i; 594 595 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1); 596 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 597 598 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 599 (u32) & ugeth->ug_regs->maccfg1, 600 in_be32(&ugeth->ug_regs->maccfg1)); 601 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", 602 (u32) & ugeth->ug_regs->maccfg2, 603 in_be32(&ugeth->ug_regs->maccfg2)); 604 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", 605 (u32) & ugeth->ug_regs->ipgifg, 606 in_be32(&ugeth->ug_regs->ipgifg)); 607 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", 608 (u32) & ugeth->ug_regs->hafdup, 609 in_be32(&ugeth->ug_regs->hafdup)); 610 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", 611 (u32) & ugeth->ug_regs->ifctl, 612 in_be32(&ugeth->ug_regs->ifctl)); 613 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", 614 (u32) & ugeth->ug_regs->ifstat, 615 in_be32(&ugeth->ug_regs->ifstat)); 616 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", 617 (u32) & ugeth->ug_regs->macstnaddr1, 618 in_be32(&ugeth->ug_regs->macstnaddr1)); 619 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", 620 (u32) & ugeth->ug_regs->macstnaddr2, 621 in_be32(&ugeth->ug_regs->macstnaddr2)); 622 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", 623 (u32) & ugeth->ug_regs->uempr, 624 in_be32(&ugeth->ug_regs->uempr)); 625 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", 626 (u32) & ugeth->ug_regs->utbipar, 627 in_be32(&ugeth->ug_regs->utbipar)); 628 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", 629 (u32) & ugeth->ug_regs->uescr, 630 in_be16(&ugeth->ug_regs->uescr)); 631 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", 632 (u32) & ugeth->ug_regs->tx64, 633 in_be32(&ugeth->ug_regs->tx64)); 634 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", 635 (u32) & ugeth->ug_regs->tx127, 636 in_be32(&ugeth->ug_regs->tx127)); 637 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", 638 (u32) & ugeth->ug_regs->tx255, 639 in_be32(&ugeth->ug_regs->tx255)); 640 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", 641 (u32) & ugeth->ug_regs->rx64, 642 in_be32(&ugeth->ug_regs->rx64)); 643 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", 644 (u32) & ugeth->ug_regs->rx127, 645 in_be32(&ugeth->ug_regs->rx127)); 646 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", 647 (u32) & ugeth->ug_regs->rx255, 648 in_be32(&ugeth->ug_regs->rx255)); 649 ugeth_info("txok : addr - 0x%08x, val - 0x%08x", 650 (u32) & ugeth->ug_regs->txok, 651 in_be32(&ugeth->ug_regs->txok)); 652 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", 653 (u32) & ugeth->ug_regs->txcf, 654 in_be16(&ugeth->ug_regs->txcf)); 655 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", 656 (u32) & ugeth->ug_regs->tmca, 657 in_be32(&ugeth->ug_regs->tmca)); 658 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", 659 (u32) & ugeth->ug_regs->tbca, 660 in_be32(&ugeth->ug_regs->tbca)); 661 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", 662 (u32) & ugeth->ug_regs->rxfok, 663 in_be32(&ugeth->ug_regs->rxfok)); 664 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", 665 (u32) & ugeth->ug_regs->rxbok, 666 in_be32(&ugeth->ug_regs->rxbok)); 667 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", 668 (u32) & ugeth->ug_regs->rbyt, 669 in_be32(&ugeth->ug_regs->rbyt)); 670 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", 671 (u32) & ugeth->ug_regs->rmca, 672 in_be32(&ugeth->ug_regs->rmca)); 673 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", 674 (u32) & ugeth->ug_regs->rbca, 675 in_be32(&ugeth->ug_regs->rbca)); 676 ugeth_info("scar : addr - 0x%08x, val - 0x%08x", 677 (u32) & ugeth->ug_regs->scar, 678 in_be32(&ugeth->ug_regs->scar)); 679 ugeth_info("scam : addr - 0x%08x, val - 0x%08x", 680 (u32) & ugeth->ug_regs->scam, 681 in_be32(&ugeth->ug_regs->scam)); 682 683 if (ugeth->p_thread_data_tx) { 684 int numThreadsTxNumerical; 685 switch (ugeth->ug_info->numThreadsTx) { 686 case UCC_GETH_NUM_OF_THREADS_1: 687 numThreadsTxNumerical = 1; 688 break; 689 case UCC_GETH_NUM_OF_THREADS_2: 690 numThreadsTxNumerical = 2; 691 break; 692 case UCC_GETH_NUM_OF_THREADS_4: 693 numThreadsTxNumerical = 4; 694 break; 695 case UCC_GETH_NUM_OF_THREADS_6: 696 numThreadsTxNumerical = 6; 697 break; 698 case UCC_GETH_NUM_OF_THREADS_8: 699 numThreadsTxNumerical = 8; 700 break; 701 default: 702 numThreadsTxNumerical = 0; 703 break; 704 } 705 706 ugeth_info("Thread data TXs:"); 707 ugeth_info("Base address: 0x%08x", 708 (u32) ugeth->p_thread_data_tx); 709 for (i = 0; i < numThreadsTxNumerical; i++) { 710 ugeth_info("Thread data TX[%d]:", i); 711 ugeth_info("Base address: 0x%08x", 712 (u32) & ugeth->p_thread_data_tx[i]); 713 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 714 sizeof(struct ucc_geth_thread_data_tx)); 715 } 716 } 717 if (ugeth->p_thread_data_rx) { 718 int numThreadsRxNumerical; 719 switch (ugeth->ug_info->numThreadsRx) { 720 case UCC_GETH_NUM_OF_THREADS_1: 721 numThreadsRxNumerical = 1; 722 break; 723 case UCC_GETH_NUM_OF_THREADS_2: 724 numThreadsRxNumerical = 2; 725 break; 726 case UCC_GETH_NUM_OF_THREADS_4: 727 numThreadsRxNumerical = 4; 728 break; 729 case UCC_GETH_NUM_OF_THREADS_6: 730 numThreadsRxNumerical = 6; 731 break; 732 case UCC_GETH_NUM_OF_THREADS_8: 733 numThreadsRxNumerical = 8; 734 break; 735 default: 736 numThreadsRxNumerical = 0; 737 break; 738 } 739 740 ugeth_info("Thread data RX:"); 741 ugeth_info("Base address: 0x%08x", 742 (u32) ugeth->p_thread_data_rx); 743 for (i = 0; i < numThreadsRxNumerical; i++) { 744 ugeth_info("Thread data RX[%d]:", i); 745 ugeth_info("Base address: 0x%08x", 746 (u32) & ugeth->p_thread_data_rx[i]); 747 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 748 sizeof(struct ucc_geth_thread_data_rx)); 749 } 750 } 751 if (ugeth->p_exf_glbl_param) { 752 ugeth_info("EXF global param:"); 753 ugeth_info("Base address: 0x%08x", 754 (u32) ugeth->p_exf_glbl_param); 755 mem_disp((u8 *) ugeth->p_exf_glbl_param, 756 sizeof(*ugeth->p_exf_glbl_param)); 757 } 758 if (ugeth->p_tx_glbl_pram) { 759 ugeth_info("TX global param:"); 760 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); 761 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", 762 (u32) & ugeth->p_tx_glbl_pram->temoder, 763 in_be16(&ugeth->p_tx_glbl_pram->temoder)); 764 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", 765 (u32) & ugeth->p_tx_glbl_pram->sqptr, 766 in_be32(&ugeth->p_tx_glbl_pram->sqptr)); 767 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", 768 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, 769 in_be32(&ugeth->p_tx_glbl_pram-> 770 schedulerbasepointer)); 771 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", 772 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, 773 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); 774 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", 775 (u32) & ugeth->p_tx_glbl_pram->tstate, 776 in_be32(&ugeth->p_tx_glbl_pram->tstate)); 777 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", 778 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], 779 ugeth->p_tx_glbl_pram->iphoffset[0]); 780 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", 781 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], 782 ugeth->p_tx_glbl_pram->iphoffset[1]); 783 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", 784 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], 785 ugeth->p_tx_glbl_pram->iphoffset[2]); 786 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", 787 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], 788 ugeth->p_tx_glbl_pram->iphoffset[3]); 789 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", 790 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], 791 ugeth->p_tx_glbl_pram->iphoffset[4]); 792 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", 793 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], 794 ugeth->p_tx_glbl_pram->iphoffset[5]); 795 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", 796 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], 797 ugeth->p_tx_glbl_pram->iphoffset[6]); 798 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", 799 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], 800 ugeth->p_tx_glbl_pram->iphoffset[7]); 801 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", 802 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], 803 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); 804 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", 805 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], 806 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); 807 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", 808 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], 809 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); 810 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", 811 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], 812 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); 813 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", 814 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], 815 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); 816 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", 817 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], 818 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); 819 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", 820 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], 821 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); 822 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", 823 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], 824 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); 825 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", 826 (u32) & ugeth->p_tx_glbl_pram->tqptr, 827 in_be32(&ugeth->p_tx_glbl_pram->tqptr)); 828 } 829 if (ugeth->p_rx_glbl_pram) { 830 ugeth_info("RX global param:"); 831 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); 832 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", 833 (u32) & ugeth->p_rx_glbl_pram->remoder, 834 in_be32(&ugeth->p_rx_glbl_pram->remoder)); 835 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", 836 (u32) & ugeth->p_rx_glbl_pram->rqptr, 837 in_be32(&ugeth->p_rx_glbl_pram->rqptr)); 838 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", 839 (u32) & ugeth->p_rx_glbl_pram->typeorlen, 840 in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); 841 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", 842 (u32) & ugeth->p_rx_glbl_pram->rxgstpack, 843 ugeth->p_rx_glbl_pram->rxgstpack); 844 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", 845 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, 846 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); 847 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", 848 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, 849 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); 850 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", 851 (u32) & ugeth->p_rx_glbl_pram->rstate, 852 ugeth->p_rx_glbl_pram->rstate); 853 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", 854 (u32) & ugeth->p_rx_glbl_pram->mrblr, 855 in_be16(&ugeth->p_rx_glbl_pram->mrblr)); 856 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", 857 (u32) & ugeth->p_rx_glbl_pram->rbdqptr, 858 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); 859 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", 860 (u32) & ugeth->p_rx_glbl_pram->mflr, 861 in_be16(&ugeth->p_rx_glbl_pram->mflr)); 862 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", 863 (u32) & ugeth->p_rx_glbl_pram->minflr, 864 in_be16(&ugeth->p_rx_glbl_pram->minflr)); 865 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", 866 (u32) & ugeth->p_rx_glbl_pram->maxd1, 867 in_be16(&ugeth->p_rx_glbl_pram->maxd1)); 868 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", 869 (u32) & ugeth->p_rx_glbl_pram->maxd2, 870 in_be16(&ugeth->p_rx_glbl_pram->maxd2)); 871 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", 872 (u32) & ugeth->p_rx_glbl_pram->ecamptr, 873 in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); 874 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", 875 (u32) & ugeth->p_rx_glbl_pram->l2qt, 876 in_be32(&ugeth->p_rx_glbl_pram->l2qt)); 877 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", 878 (u32) & ugeth->p_rx_glbl_pram->l3qt[0], 879 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); 880 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", 881 (u32) & ugeth->p_rx_glbl_pram->l3qt[1], 882 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); 883 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", 884 (u32) & ugeth->p_rx_glbl_pram->l3qt[2], 885 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); 886 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", 887 (u32) & ugeth->p_rx_glbl_pram->l3qt[3], 888 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); 889 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", 890 (u32) & ugeth->p_rx_glbl_pram->l3qt[4], 891 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); 892 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", 893 (u32) & ugeth->p_rx_glbl_pram->l3qt[5], 894 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); 895 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", 896 (u32) & ugeth->p_rx_glbl_pram->l3qt[6], 897 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); 898 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", 899 (u32) & ugeth->p_rx_glbl_pram->l3qt[7], 900 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); 901 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", 902 (u32) & ugeth->p_rx_glbl_pram->vlantype, 903 in_be16(&ugeth->p_rx_glbl_pram->vlantype)); 904 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", 905 (u32) & ugeth->p_rx_glbl_pram->vlantci, 906 in_be16(&ugeth->p_rx_glbl_pram->vlantci)); 907 for (i = 0; i < 64; i++) 908 ugeth_info 909 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", 910 i, 911 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], 912 ugeth->p_rx_glbl_pram->addressfiltering[i]); 913 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", 914 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, 915 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); 916 } 917 if (ugeth->p_send_q_mem_reg) { 918 ugeth_info("Send Q memory registers:"); 919 ugeth_info("Base address: 0x%08x", 920 (u32) ugeth->p_send_q_mem_reg); 921 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 922 ugeth_info("SQQD[%d]:", i); 923 ugeth_info("Base address: 0x%08x", 924 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 925 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 926 sizeof(struct ucc_geth_send_queue_qd)); 927 } 928 } 929 if (ugeth->p_scheduler) { 930 ugeth_info("Scheduler:"); 931 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); 932 mem_disp((u8 *) ugeth->p_scheduler, 933 sizeof(*ugeth->p_scheduler)); 934 } 935 if (ugeth->p_tx_fw_statistics_pram) { 936 ugeth_info("TX FW statistics pram:"); 937 ugeth_info("Base address: 0x%08x", 938 (u32) ugeth->p_tx_fw_statistics_pram); 939 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, 940 sizeof(*ugeth->p_tx_fw_statistics_pram)); 941 } 942 if (ugeth->p_rx_fw_statistics_pram) { 943 ugeth_info("RX FW statistics pram:"); 944 ugeth_info("Base address: 0x%08x", 945 (u32) ugeth->p_rx_fw_statistics_pram); 946 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, 947 sizeof(*ugeth->p_rx_fw_statistics_pram)); 948 } 949 if (ugeth->p_rx_irq_coalescing_tbl) { 950 ugeth_info("RX IRQ coalescing tables:"); 951 ugeth_info("Base address: 0x%08x", 952 (u32) ugeth->p_rx_irq_coalescing_tbl); 953 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 954 ugeth_info("RX IRQ coalescing table entry[%d]:", i); 955 ugeth_info("Base address: 0x%08x", 956 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 957 coalescingentry[i]); 958 ugeth_info 959 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", 960 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 961 coalescingentry[i].interruptcoalescingmaxvalue, 962 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 963 coalescingentry[i]. 964 interruptcoalescingmaxvalue)); 965 ugeth_info 966 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", 967 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 968 coalescingentry[i].interruptcoalescingcounter, 969 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 970 coalescingentry[i]. 971 interruptcoalescingcounter)); 972 } 973 } 974 if (ugeth->p_rx_bd_qs_tbl) { 975 ugeth_info("RX BD QS tables:"); 976 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); 977 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 978 ugeth_info("RX BD QS table[%d]:", i); 979 ugeth_info("Base address: 0x%08x", 980 (u32) & ugeth->p_rx_bd_qs_tbl[i]); 981 ugeth_info 982 ("bdbaseptr : addr - 0x%08x, val - 0x%08x", 983 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, 984 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); 985 ugeth_info 986 ("bdptr : addr - 0x%08x, val - 0x%08x", 987 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, 988 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); 989 ugeth_info 990 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", 991 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 992 in_be32(&ugeth->p_rx_bd_qs_tbl[i]. 993 externalbdbaseptr)); 994 ugeth_info 995 ("externalbdptr : addr - 0x%08x, val - 0x%08x", 996 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, 997 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); 998 ugeth_info("ucode RX Prefetched BDs:"); 999 ugeth_info("Base address: 0x%08x", 1000 (u32) 1001 qe_muram_addr(in_be32 1002 (&ugeth->p_rx_bd_qs_tbl[i]. 1003 bdbaseptr))); 1004 mem_disp((u8 *) 1005 qe_muram_addr(in_be32 1006 (&ugeth->p_rx_bd_qs_tbl[i]. 1007 bdbaseptr)), 1008 sizeof(struct ucc_geth_rx_prefetched_bds)); 1009 } 1010 } 1011 if (ugeth->p_init_enet_param_shadow) { 1012 int size; 1013 ugeth_info("Init enet param shadow:"); 1014 ugeth_info("Base address: 0x%08x", 1015 (u32) ugeth->p_init_enet_param_shadow); 1016 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1017 sizeof(*ugeth->p_init_enet_param_shadow)); 1018 1019 size = sizeof(struct ucc_geth_thread_rx_pram); 1020 if (ugeth->ug_info->rxExtendedFiltering) { 1021 size += 1022 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 1023 if (ugeth->ug_info->largestexternallookupkeysize == 1024 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 1025 size += 1026 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 1027 if (ugeth->ug_info->largestexternallookupkeysize == 1028 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 1029 size += 1030 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 1031 } 1032 1033 dump_init_enet_entries(ugeth, 1034 &(ugeth->p_init_enet_param_shadow-> 1035 txthread[0]), 1036 ENET_INIT_PARAM_MAX_ENTRIES_TX, 1037 sizeof(struct ucc_geth_thread_tx_pram), 1038 ugeth->ug_info->riscTx, 0); 1039 dump_init_enet_entries(ugeth, 1040 &(ugeth->p_init_enet_param_shadow-> 1041 rxthread[0]), 1042 ENET_INIT_PARAM_MAX_ENTRIES_RX, size, 1043 ugeth->ug_info->riscRx, 1); 1044 } 1045 } 1046 #endif /* DEBUG */ 1047 1048 static void init_default_reg_vals(u32 __iomem *upsmr_register, 1049 u32 __iomem *maccfg1_register, 1050 u32 __iomem *maccfg2_register) 1051 { 1052 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); 1053 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); 1054 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); 1055 } 1056 1057 static int init_half_duplex_params(int alt_beb, 1058 int back_pressure_no_backoff, 1059 int no_backoff, 1060 int excess_defer, 1061 u8 alt_beb_truncation, 1062 u8 max_retransmissions, 1063 u8 collision_window, 1064 u32 __iomem *hafdup_register) 1065 { 1066 u32 value = 0; 1067 1068 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || 1069 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || 1070 (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) 1071 return -EINVAL; 1072 1073 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); 1074 1075 if (alt_beb) 1076 value |= HALFDUP_ALT_BEB; 1077 if (back_pressure_no_backoff) 1078 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; 1079 if (no_backoff) 1080 value |= HALFDUP_NO_BACKOFF; 1081 if (excess_defer) 1082 value |= HALFDUP_EXCESSIVE_DEFER; 1083 1084 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); 1085 1086 value |= collision_window; 1087 1088 out_be32(hafdup_register, value); 1089 return 0; 1090 } 1091 1092 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, 1093 u8 non_btb_ipg, 1094 u8 min_ifg, 1095 u8 btb_ipg, 1096 u32 __iomem *ipgifg_register) 1097 { 1098 u32 value = 0; 1099 1100 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back 1101 IPG part 2 */ 1102 if (non_btb_cs_ipg > non_btb_ipg) 1103 return -EINVAL; 1104 1105 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || 1106 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || 1107 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ 1108 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) 1109 return -EINVAL; 1110 1111 value |= 1112 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & 1113 IPGIFG_NBTB_CS_IPG_MASK); 1114 value |= 1115 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & 1116 IPGIFG_NBTB_IPG_MASK); 1117 value |= 1118 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & 1119 IPGIFG_MIN_IFG_MASK); 1120 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); 1121 1122 out_be32(ipgifg_register, value); 1123 return 0; 1124 } 1125 1126 int init_flow_control_params(u32 automatic_flow_control_mode, 1127 int rx_flow_control_enable, 1128 int tx_flow_control_enable, 1129 u16 pause_period, 1130 u16 extension_field, 1131 u32 __iomem *upsmr_register, 1132 u32 __iomem *uempr_register, 1133 u32 __iomem *maccfg1_register) 1134 { 1135 u32 value = 0; 1136 1137 /* Set UEMPR register */ 1138 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; 1139 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; 1140 out_be32(uempr_register, value); 1141 1142 /* Set UPSMR register */ 1143 setbits32(upsmr_register, automatic_flow_control_mode); 1144 1145 value = in_be32(maccfg1_register); 1146 if (rx_flow_control_enable) 1147 value |= MACCFG1_FLOW_RX; 1148 if (tx_flow_control_enable) 1149 value |= MACCFG1_FLOW_TX; 1150 out_be32(maccfg1_register, value); 1151 1152 return 0; 1153 } 1154 1155 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, 1156 int auto_zero_hardware_statistics, 1157 u32 __iomem *upsmr_register, 1158 u16 __iomem *uescr_register) 1159 { 1160 u16 uescr_value = 0; 1161 1162 /* Enable hardware statistics gathering if requested */ 1163 if (enable_hardware_statistics) 1164 setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); 1165 1166 /* Clear hardware statistics counters */ 1167 uescr_value = in_be16(uescr_register); 1168 uescr_value |= UESCR_CLRCNT; 1169 /* Automatically zero hardware statistics counters on read, 1170 if requested */ 1171 if (auto_zero_hardware_statistics) 1172 uescr_value |= UESCR_AUTOZ; 1173 out_be16(uescr_register, uescr_value); 1174 1175 return 0; 1176 } 1177 1178 static int init_firmware_statistics_gathering_mode(int 1179 enable_tx_firmware_statistics, 1180 int enable_rx_firmware_statistics, 1181 u32 __iomem *tx_rmon_base_ptr, 1182 u32 tx_firmware_statistics_structure_address, 1183 u32 __iomem *rx_rmon_base_ptr, 1184 u32 rx_firmware_statistics_structure_address, 1185 u16 __iomem *temoder_register, 1186 u32 __iomem *remoder_register) 1187 { 1188 /* Note: this function does not check if */ 1189 /* the parameters it receives are NULL */ 1190 1191 if (enable_tx_firmware_statistics) { 1192 out_be32(tx_rmon_base_ptr, 1193 tx_firmware_statistics_structure_address); 1194 setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); 1195 } 1196 1197 if (enable_rx_firmware_statistics) { 1198 out_be32(rx_rmon_base_ptr, 1199 rx_firmware_statistics_structure_address); 1200 setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); 1201 } 1202 1203 return 0; 1204 } 1205 1206 static int init_mac_station_addr_regs(u8 address_byte_0, 1207 u8 address_byte_1, 1208 u8 address_byte_2, 1209 u8 address_byte_3, 1210 u8 address_byte_4, 1211 u8 address_byte_5, 1212 u32 __iomem *macstnaddr1_register, 1213 u32 __iomem *macstnaddr2_register) 1214 { 1215 u32 value = 0; 1216 1217 /* Example: for a station address of 0x12345678ABCD, */ 1218 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ 1219 1220 /* MACSTNADDR1 Register: */ 1221 1222 /* 0 7 8 15 */ 1223 /* station address byte 5 station address byte 4 */ 1224 /* 16 23 24 31 */ 1225 /* station address byte 3 station address byte 2 */ 1226 value |= (u32) ((address_byte_2 << 0) & 0x000000FF); 1227 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); 1228 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); 1229 value |= (u32) ((address_byte_5 << 24) & 0xFF000000); 1230 1231 out_be32(macstnaddr1_register, value); 1232 1233 /* MACSTNADDR2 Register: */ 1234 1235 /* 0 7 8 15 */ 1236 /* station address byte 1 station address byte 0 */ 1237 /* 16 23 24 31 */ 1238 /* reserved reserved */ 1239 value = 0; 1240 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); 1241 value |= (u32) ((address_byte_1 << 24) & 0xFF000000); 1242 1243 out_be32(macstnaddr2_register, value); 1244 1245 return 0; 1246 } 1247 1248 static int init_check_frame_length_mode(int length_check, 1249 u32 __iomem *maccfg2_register) 1250 { 1251 u32 value = 0; 1252 1253 value = in_be32(maccfg2_register); 1254 1255 if (length_check) 1256 value |= MACCFG2_LC; 1257 else 1258 value &= ~MACCFG2_LC; 1259 1260 out_be32(maccfg2_register, value); 1261 return 0; 1262 } 1263 1264 static int init_preamble_length(u8 preamble_length, 1265 u32 __iomem *maccfg2_register) 1266 { 1267 if ((preamble_length < 3) || (preamble_length > 7)) 1268 return -EINVAL; 1269 1270 clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, 1271 preamble_length << MACCFG2_PREL_SHIFT); 1272 1273 return 0; 1274 } 1275 1276 static int init_rx_parameters(int reject_broadcast, 1277 int receive_short_frames, 1278 int promiscuous, u32 __iomem *upsmr_register) 1279 { 1280 u32 value = 0; 1281 1282 value = in_be32(upsmr_register); 1283 1284 if (reject_broadcast) 1285 value |= UCC_GETH_UPSMR_BRO; 1286 else 1287 value &= ~UCC_GETH_UPSMR_BRO; 1288 1289 if (receive_short_frames) 1290 value |= UCC_GETH_UPSMR_RSH; 1291 else 1292 value &= ~UCC_GETH_UPSMR_RSH; 1293 1294 if (promiscuous) 1295 value |= UCC_GETH_UPSMR_PRO; 1296 else 1297 value &= ~UCC_GETH_UPSMR_PRO; 1298 1299 out_be32(upsmr_register, value); 1300 1301 return 0; 1302 } 1303 1304 static int init_max_rx_buff_len(u16 max_rx_buf_len, 1305 u16 __iomem *mrblr_register) 1306 { 1307 /* max_rx_buf_len value must be a multiple of 128 */ 1308 if ((max_rx_buf_len == 0) || 1309 (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) 1310 return -EINVAL; 1311 1312 out_be16(mrblr_register, max_rx_buf_len); 1313 return 0; 1314 } 1315 1316 static int init_min_frame_len(u16 min_frame_length, 1317 u16 __iomem *minflr_register, 1318 u16 __iomem *mrblr_register) 1319 { 1320 u16 mrblr_value = 0; 1321 1322 mrblr_value = in_be16(mrblr_register); 1323 if (min_frame_length >= (mrblr_value - 4)) 1324 return -EINVAL; 1325 1326 out_be16(minflr_register, min_frame_length); 1327 return 0; 1328 } 1329 1330 static int adjust_enet_interface(struct ucc_geth_private *ugeth) 1331 { 1332 struct ucc_geth_info *ug_info; 1333 struct ucc_geth __iomem *ug_regs; 1334 struct ucc_fast __iomem *uf_regs; 1335 int ret_val; 1336 u32 upsmr, maccfg2; 1337 u16 value; 1338 1339 ugeth_vdbg("%s: IN", __func__); 1340 1341 ug_info = ugeth->ug_info; 1342 ug_regs = ugeth->ug_regs; 1343 uf_regs = ugeth->uccf->uf_regs; 1344 1345 /* Set MACCFG2 */ 1346 maccfg2 = in_be32(&ug_regs->maccfg2); 1347 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 1348 if ((ugeth->max_speed == SPEED_10) || 1349 (ugeth->max_speed == SPEED_100)) 1350 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 1351 else if (ugeth->max_speed == SPEED_1000) 1352 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 1353 maccfg2 |= ug_info->padAndCrc; 1354 out_be32(&ug_regs->maccfg2, maccfg2); 1355 1356 /* Set UPSMR */ 1357 upsmr = in_be32(&uf_regs->upsmr); 1358 upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | 1359 UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); 1360 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1361 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1362 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1363 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1364 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1365 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1366 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) 1367 upsmr |= UCC_GETH_UPSMR_RPM; 1368 switch (ugeth->max_speed) { 1369 case SPEED_10: 1370 upsmr |= UCC_GETH_UPSMR_R10M; 1371 /* FALLTHROUGH */ 1372 case SPEED_100: 1373 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) 1374 upsmr |= UCC_GETH_UPSMR_RMM; 1375 } 1376 } 1377 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1378 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1379 upsmr |= UCC_GETH_UPSMR_TBIM; 1380 } 1381 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) 1382 upsmr |= UCC_GETH_UPSMR_SGMM; 1383 1384 out_be32(&uf_regs->upsmr, upsmr); 1385 1386 /* Disable autonegotiation in tbi mode, because by default it 1387 comes up in autonegotiation mode. */ 1388 /* Note that this depends on proper setting in utbipar register. */ 1389 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1390 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1391 struct ucc_geth_info *ug_info = ugeth->ug_info; 1392 struct phy_device *tbiphy; 1393 1394 if (!ug_info->tbi_node) 1395 ugeth_warn("TBI mode requires that the device " 1396 "tree specify a tbi-handle\n"); 1397 1398 tbiphy = of_phy_find_device(ug_info->tbi_node); 1399 if (!tbiphy) 1400 ugeth_warn("Could not get TBI device\n"); 1401 1402 value = phy_read(tbiphy, ENET_TBI_MII_CR); 1403 value &= ~0x1000; /* Turn off autonegotiation */ 1404 phy_write(tbiphy, ENET_TBI_MII_CR, value); 1405 } 1406 1407 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1408 1409 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1410 if (ret_val != 0) { 1411 if (netif_msg_probe(ugeth)) 1412 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1413 __func__); 1414 return ret_val; 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1421 { 1422 struct ucc_fast_private *uccf; 1423 u32 cecr_subblock; 1424 u32 temp; 1425 int i = 10; 1426 1427 uccf = ugeth->uccf; 1428 1429 /* Mask GRACEFUL STOP TX interrupt bit and clear it */ 1430 clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); 1431 out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ 1432 1433 /* Issue host command */ 1434 cecr_subblock = 1435 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1436 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 1437 QE_CR_PROTOCOL_ETHERNET, 0); 1438 1439 /* Wait for command to complete */ 1440 do { 1441 msleep(10); 1442 temp = in_be32(uccf->p_ucce); 1443 } while (!(temp & UCC_GETH_UCCE_GRA) && --i); 1444 1445 uccf->stopped_tx = 1; 1446 1447 return 0; 1448 } 1449 1450 static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) 1451 { 1452 struct ucc_fast_private *uccf; 1453 u32 cecr_subblock; 1454 u8 temp; 1455 int i = 10; 1456 1457 uccf = ugeth->uccf; 1458 1459 /* Clear acknowledge bit */ 1460 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1461 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 1462 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); 1463 1464 /* Keep issuing command and checking acknowledge bit until 1465 it is asserted, according to spec */ 1466 do { 1467 /* Issue host command */ 1468 cecr_subblock = 1469 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. 1470 ucc_num); 1471 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1472 QE_CR_PROTOCOL_ETHERNET, 0); 1473 msleep(10); 1474 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1475 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); 1476 1477 uccf->stopped_rx = 1; 1478 1479 return 0; 1480 } 1481 1482 static int ugeth_restart_tx(struct ucc_geth_private *ugeth) 1483 { 1484 struct ucc_fast_private *uccf; 1485 u32 cecr_subblock; 1486 1487 uccf = ugeth->uccf; 1488 1489 cecr_subblock = 1490 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1491 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); 1492 uccf->stopped_tx = 0; 1493 1494 return 0; 1495 } 1496 1497 static int ugeth_restart_rx(struct ucc_geth_private *ugeth) 1498 { 1499 struct ucc_fast_private *uccf; 1500 u32 cecr_subblock; 1501 1502 uccf = ugeth->uccf; 1503 1504 cecr_subblock = 1505 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 1506 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 1507 0); 1508 uccf->stopped_rx = 0; 1509 1510 return 0; 1511 } 1512 1513 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1514 { 1515 struct ucc_fast_private *uccf; 1516 int enabled_tx, enabled_rx; 1517 1518 uccf = ugeth->uccf; 1519 1520 /* check if the UCC number is in range. */ 1521 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1522 if (netif_msg_probe(ugeth)) 1523 ugeth_err("%s: ucc_num out of range.", __func__); 1524 return -EINVAL; 1525 } 1526 1527 enabled_tx = uccf->enabled_tx; 1528 enabled_rx = uccf->enabled_rx; 1529 1530 /* Get Tx and Rx going again, in case this channel was actively 1531 disabled. */ 1532 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) 1533 ugeth_restart_tx(ugeth); 1534 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) 1535 ugeth_restart_rx(ugeth); 1536 1537 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ 1538 1539 return 0; 1540 1541 } 1542 1543 static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) 1544 { 1545 struct ucc_fast_private *uccf; 1546 1547 uccf = ugeth->uccf; 1548 1549 /* check if the UCC number is in range. */ 1550 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1551 if (netif_msg_probe(ugeth)) 1552 ugeth_err("%s: ucc_num out of range.", __func__); 1553 return -EINVAL; 1554 } 1555 1556 /* Stop any transmissions */ 1557 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) 1558 ugeth_graceful_stop_tx(ugeth); 1559 1560 /* Stop any receptions */ 1561 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) 1562 ugeth_graceful_stop_rx(ugeth); 1563 1564 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ 1565 1566 return 0; 1567 } 1568 1569 static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1570 { 1571 /* Prevent any further xmits, plus detach the device. */ 1572 netif_device_detach(ugeth->ndev); 1573 1574 /* Wait for any current xmits to finish. */ 1575 netif_tx_disable(ugeth->ndev); 1576 1577 /* Disable the interrupt to avoid NAPI rescheduling. */ 1578 disable_irq(ugeth->ug_info->uf_info.irq); 1579 1580 /* Stop NAPI, and possibly wait for its completion. */ 1581 napi_disable(&ugeth->napi); 1582 } 1583 1584 static void ugeth_activate(struct ucc_geth_private *ugeth) 1585 { 1586 napi_enable(&ugeth->napi); 1587 enable_irq(ugeth->ug_info->uf_info.irq); 1588 netif_device_attach(ugeth->ndev); 1589 } 1590 1591 /* Called every time the controller might need to be made 1592 * aware of new link state. The PHY code conveys this 1593 * information through variables in the ugeth structure, and this 1594 * function converts those variables into the appropriate 1595 * register values, and can bring down the device if needed. 1596 */ 1597 1598 static void adjust_link(struct net_device *dev) 1599 { 1600 struct ucc_geth_private *ugeth = netdev_priv(dev); 1601 struct ucc_geth __iomem *ug_regs; 1602 struct ucc_fast __iomem *uf_regs; 1603 struct phy_device *phydev = ugeth->phydev; 1604 int new_state = 0; 1605 1606 ug_regs = ugeth->ug_regs; 1607 uf_regs = ugeth->uccf->uf_regs; 1608 1609 if (phydev->link) { 1610 u32 tempval = in_be32(&ug_regs->maccfg2); 1611 u32 upsmr = in_be32(&uf_regs->upsmr); 1612 /* Now we make sure that we can be in full duplex mode. 1613 * If not, we operate in half-duplex mode. */ 1614 if (phydev->duplex != ugeth->oldduplex) { 1615 new_state = 1; 1616 if (!(phydev->duplex)) 1617 tempval &= ~(MACCFG2_FDX); 1618 else 1619 tempval |= MACCFG2_FDX; 1620 ugeth->oldduplex = phydev->duplex; 1621 } 1622 1623 if (phydev->speed != ugeth->oldspeed) { 1624 new_state = 1; 1625 switch (phydev->speed) { 1626 case SPEED_1000: 1627 tempval = ((tempval & 1628 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1629 MACCFG2_INTERFACE_MODE_BYTE); 1630 break; 1631 case SPEED_100: 1632 case SPEED_10: 1633 tempval = ((tempval & 1634 ~(MACCFG2_INTERFACE_MODE_MASK)) | 1635 MACCFG2_INTERFACE_MODE_NIBBLE); 1636 /* if reduced mode, re-set UPSMR.R10M */ 1637 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || 1638 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || 1639 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || 1640 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1641 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || 1642 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1643 if (phydev->speed == SPEED_10) 1644 upsmr |= UCC_GETH_UPSMR_R10M; 1645 else 1646 upsmr &= ~UCC_GETH_UPSMR_R10M; 1647 } 1648 break; 1649 default: 1650 if (netif_msg_link(ugeth)) 1651 ugeth_warn( 1652 "%s: Ack! Speed (%d) is not 10/100/1000!", 1653 dev->name, phydev->speed); 1654 break; 1655 } 1656 ugeth->oldspeed = phydev->speed; 1657 } 1658 1659 if (!ugeth->oldlink) { 1660 new_state = 1; 1661 ugeth->oldlink = 1; 1662 } 1663 1664 if (new_state) { 1665 /* 1666 * To change the MAC configuration we need to disable 1667 * the controller. To do so, we have to either grab 1668 * ugeth->lock, which is a bad idea since 'graceful 1669 * stop' commands might take quite a while, or we can 1670 * quiesce driver's activity. 1671 */ 1672 ugeth_quiesce(ugeth); 1673 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 1674 1675 out_be32(&ug_regs->maccfg2, tempval); 1676 out_be32(&uf_regs->upsmr, upsmr); 1677 1678 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 1679 ugeth_activate(ugeth); 1680 } 1681 } else if (ugeth->oldlink) { 1682 new_state = 1; 1683 ugeth->oldlink = 0; 1684 ugeth->oldspeed = 0; 1685 ugeth->oldduplex = -1; 1686 } 1687 1688 if (new_state && netif_msg_link(ugeth)) 1689 phy_print_status(phydev); 1690 } 1691 1692 /* Initialize TBI PHY interface for communicating with the 1693 * SERDES lynx PHY on the chip. We communicate with this PHY 1694 * through the MDIO bus on each controller, treating it as a 1695 * "normal" PHY at the address found in the UTBIPA register. We assume 1696 * that the UTBIPA register is valid. Either the MDIO bus code will set 1697 * it to a value that doesn't conflict with other PHYs on the bus, or the 1698 * value doesn't matter, as there are no other PHYs on the bus. 1699 */ 1700 static void uec_configure_serdes(struct net_device *dev) 1701 { 1702 struct ucc_geth_private *ugeth = netdev_priv(dev); 1703 struct ucc_geth_info *ug_info = ugeth->ug_info; 1704 struct phy_device *tbiphy; 1705 1706 if (!ug_info->tbi_node) { 1707 dev_warn(&dev->dev, "SGMII mode requires that the device " 1708 "tree specify a tbi-handle\n"); 1709 return; 1710 } 1711 1712 tbiphy = of_phy_find_device(ug_info->tbi_node); 1713 if (!tbiphy) { 1714 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1715 return; 1716 } 1717 1718 /* 1719 * If the link is already up, we must already be ok, and don't need to 1720 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1721 * everything for us? Resetting it takes the link down and requires 1722 * several seconds for it to come back. 1723 */ 1724 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) 1725 return; 1726 1727 /* Single clk mode, mii mode off(for serdes communication) */ 1728 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1729 1730 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1731 1732 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); 1733 } 1734 1735 /* Configure the PHY for dev. 1736 * returns 0 if success. -1 if failure 1737 */ 1738 static int init_phy(struct net_device *dev) 1739 { 1740 struct ucc_geth_private *priv = netdev_priv(dev); 1741 struct ucc_geth_info *ug_info = priv->ug_info; 1742 struct phy_device *phydev; 1743 1744 priv->oldlink = 0; 1745 priv->oldspeed = 0; 1746 priv->oldduplex = -1; 1747 1748 phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, 1749 priv->phy_interface); 1750 if (!phydev) 1751 phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1752 priv->phy_interface); 1753 if (!phydev) { 1754 dev_err(&dev->dev, "Could not attach to PHY\n"); 1755 return -ENODEV; 1756 } 1757 1758 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) 1759 uec_configure_serdes(dev); 1760 1761 phydev->supported &= (SUPPORTED_MII | 1762 SUPPORTED_Autoneg | 1763 ADVERTISED_10baseT_Half | 1764 ADVERTISED_10baseT_Full | 1765 ADVERTISED_100baseT_Half | 1766 ADVERTISED_100baseT_Full); 1767 1768 if (priv->max_speed == SPEED_1000) 1769 phydev->supported |= ADVERTISED_1000baseT_Full; 1770 1771 phydev->advertising = phydev->supported; 1772 1773 priv->phydev = phydev; 1774 1775 return 0; 1776 } 1777 1778 static void ugeth_dump_regs(struct ucc_geth_private *ugeth) 1779 { 1780 #ifdef DEBUG 1781 ucc_fast_dump_regs(ugeth->uccf); 1782 dump_regs(ugeth); 1783 dump_bds(ugeth); 1784 #endif 1785 } 1786 1787 static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 1788 ugeth, 1789 enum enet_addr_type 1790 enet_addr_type) 1791 { 1792 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 1793 struct ucc_fast_private *uccf; 1794 enum comm_dir comm_dir; 1795 struct list_head *p_lh; 1796 u16 i, num; 1797 u32 __iomem *addr_h; 1798 u32 __iomem *addr_l; 1799 u8 *p_counter; 1800 1801 uccf = ugeth->uccf; 1802 1803 p_82xx_addr_filt = 1804 (struct ucc_geth_82xx_address_filtering_pram __iomem *) 1805 ugeth->p_rx_glbl_pram->addressfiltering; 1806 1807 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { 1808 addr_h = &(p_82xx_addr_filt->gaddr_h); 1809 addr_l = &(p_82xx_addr_filt->gaddr_l); 1810 p_lh = &ugeth->group_hash_q; 1811 p_counter = &(ugeth->numGroupAddrInHash); 1812 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { 1813 addr_h = &(p_82xx_addr_filt->iaddr_h); 1814 addr_l = &(p_82xx_addr_filt->iaddr_l); 1815 p_lh = &ugeth->ind_hash_q; 1816 p_counter = &(ugeth->numIndAddrInHash); 1817 } else 1818 return -EINVAL; 1819 1820 comm_dir = 0; 1821 if (uccf->enabled_tx) 1822 comm_dir |= COMM_DIR_TX; 1823 if (uccf->enabled_rx) 1824 comm_dir |= COMM_DIR_RX; 1825 if (comm_dir) 1826 ugeth_disable(ugeth, comm_dir); 1827 1828 /* Clear the hash table. */ 1829 out_be32(addr_h, 0x00000000); 1830 out_be32(addr_l, 0x00000000); 1831 1832 if (!p_lh) 1833 return 0; 1834 1835 num = *p_counter; 1836 1837 /* Delete all remaining CQ elements */ 1838 for (i = 0; i < num; i++) 1839 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); 1840 1841 *p_counter = 0; 1842 1843 if (comm_dir) 1844 ugeth_enable(ugeth, comm_dir); 1845 1846 return 0; 1847 } 1848 1849 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 1850 u8 paddr_num) 1851 { 1852 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ 1853 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 1854 } 1855 1856 static void ucc_geth_free_rx(struct ucc_geth_private *ugeth) 1857 { 1858 struct ucc_geth_info *ug_info; 1859 struct ucc_fast_info *uf_info; 1860 u16 i, j; 1861 u8 __iomem *bd; 1862 1863 1864 ug_info = ugeth->ug_info; 1865 uf_info = &ug_info->uf_info; 1866 1867 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 1868 if (ugeth->p_rx_bd_ring[i]) { 1869 /* Return existing data buffers in ring */ 1870 bd = ugeth->p_rx_bd_ring[i]; 1871 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 1872 if (ugeth->rx_skbuff[i][j]) { 1873 dma_unmap_single(ugeth->dev, 1874 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1875 ugeth->ug_info-> 1876 uf_info.max_rx_buf_length + 1877 UCC_GETH_RX_DATA_BUF_ALIGNMENT, 1878 DMA_FROM_DEVICE); 1879 dev_kfree_skb_any( 1880 ugeth->rx_skbuff[i][j]); 1881 ugeth->rx_skbuff[i][j] = NULL; 1882 } 1883 bd += sizeof(struct qe_bd); 1884 } 1885 1886 kfree(ugeth->rx_skbuff[i]); 1887 1888 if (ugeth->ug_info->uf_info.bd_mem_part == 1889 MEM_PART_SYSTEM) 1890 kfree((void *)ugeth->rx_bd_ring_offset[i]); 1891 else if (ugeth->ug_info->uf_info.bd_mem_part == 1892 MEM_PART_MURAM) 1893 qe_muram_free(ugeth->rx_bd_ring_offset[i]); 1894 ugeth->p_rx_bd_ring[i] = NULL; 1895 } 1896 } 1897 1898 } 1899 1900 static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) 1901 { 1902 struct ucc_geth_info *ug_info; 1903 struct ucc_fast_info *uf_info; 1904 u16 i, j; 1905 u8 __iomem *bd; 1906 1907 ug_info = ugeth->ug_info; 1908 uf_info = &ug_info->uf_info; 1909 1910 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 1911 bd = ugeth->p_tx_bd_ring[i]; 1912 if (!bd) 1913 continue; 1914 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 1915 if (ugeth->tx_skbuff[i][j]) { 1916 dma_unmap_single(ugeth->dev, 1917 in_be32(&((struct qe_bd __iomem *)bd)->buf), 1918 (in_be32((u32 __iomem *)bd) & 1919 BD_LENGTH_MASK), 1920 DMA_TO_DEVICE); 1921 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); 1922 ugeth->tx_skbuff[i][j] = NULL; 1923 } 1924 } 1925 1926 kfree(ugeth->tx_skbuff[i]); 1927 1928 if (ugeth->p_tx_bd_ring[i]) { 1929 if (ugeth->ug_info->uf_info.bd_mem_part == 1930 MEM_PART_SYSTEM) 1931 kfree((void *)ugeth->tx_bd_ring_offset[i]); 1932 else if (ugeth->ug_info->uf_info.bd_mem_part == 1933 MEM_PART_MURAM) 1934 qe_muram_free(ugeth->tx_bd_ring_offset[i]); 1935 ugeth->p_tx_bd_ring[i] = NULL; 1936 } 1937 } 1938 1939 } 1940 1941 static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 1942 { 1943 if (!ugeth) 1944 return; 1945 1946 if (ugeth->uccf) { 1947 ucc_fast_free(ugeth->uccf); 1948 ugeth->uccf = NULL; 1949 } 1950 1951 if (ugeth->p_thread_data_tx) { 1952 qe_muram_free(ugeth->thread_dat_tx_offset); 1953 ugeth->p_thread_data_tx = NULL; 1954 } 1955 if (ugeth->p_thread_data_rx) { 1956 qe_muram_free(ugeth->thread_dat_rx_offset); 1957 ugeth->p_thread_data_rx = NULL; 1958 } 1959 if (ugeth->p_exf_glbl_param) { 1960 qe_muram_free(ugeth->exf_glbl_param_offset); 1961 ugeth->p_exf_glbl_param = NULL; 1962 } 1963 if (ugeth->p_rx_glbl_pram) { 1964 qe_muram_free(ugeth->rx_glbl_pram_offset); 1965 ugeth->p_rx_glbl_pram = NULL; 1966 } 1967 if (ugeth->p_tx_glbl_pram) { 1968 qe_muram_free(ugeth->tx_glbl_pram_offset); 1969 ugeth->p_tx_glbl_pram = NULL; 1970 } 1971 if (ugeth->p_send_q_mem_reg) { 1972 qe_muram_free(ugeth->send_q_mem_reg_offset); 1973 ugeth->p_send_q_mem_reg = NULL; 1974 } 1975 if (ugeth->p_scheduler) { 1976 qe_muram_free(ugeth->scheduler_offset); 1977 ugeth->p_scheduler = NULL; 1978 } 1979 if (ugeth->p_tx_fw_statistics_pram) { 1980 qe_muram_free(ugeth->tx_fw_statistics_pram_offset); 1981 ugeth->p_tx_fw_statistics_pram = NULL; 1982 } 1983 if (ugeth->p_rx_fw_statistics_pram) { 1984 qe_muram_free(ugeth->rx_fw_statistics_pram_offset); 1985 ugeth->p_rx_fw_statistics_pram = NULL; 1986 } 1987 if (ugeth->p_rx_irq_coalescing_tbl) { 1988 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); 1989 ugeth->p_rx_irq_coalescing_tbl = NULL; 1990 } 1991 if (ugeth->p_rx_bd_qs_tbl) { 1992 qe_muram_free(ugeth->rx_bd_qs_tbl_offset); 1993 ugeth->p_rx_bd_qs_tbl = NULL; 1994 } 1995 if (ugeth->p_init_enet_param_shadow) { 1996 return_init_enet_entries(ugeth, 1997 &(ugeth->p_init_enet_param_shadow-> 1998 rxthread[0]), 1999 ENET_INIT_PARAM_MAX_ENTRIES_RX, 2000 ugeth->ug_info->riscRx, 1); 2001 return_init_enet_entries(ugeth, 2002 &(ugeth->p_init_enet_param_shadow-> 2003 txthread[0]), 2004 ENET_INIT_PARAM_MAX_ENTRIES_TX, 2005 ugeth->ug_info->riscTx, 0); 2006 kfree(ugeth->p_init_enet_param_shadow); 2007 ugeth->p_init_enet_param_shadow = NULL; 2008 } 2009 ucc_geth_free_tx(ugeth); 2010 ucc_geth_free_rx(ugeth); 2011 while (!list_empty(&ugeth->group_hash_q)) 2012 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2013 (dequeue(&ugeth->group_hash_q))); 2014 while (!list_empty(&ugeth->ind_hash_q)) 2015 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2016 (dequeue(&ugeth->ind_hash_q))); 2017 if (ugeth->ug_regs) { 2018 iounmap(ugeth->ug_regs); 2019 ugeth->ug_regs = NULL; 2020 } 2021 } 2022 2023 static void ucc_geth_set_multi(struct net_device *dev) 2024 { 2025 struct ucc_geth_private *ugeth; 2026 struct netdev_hw_addr *ha; 2027 struct ucc_fast __iomem *uf_regs; 2028 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2029 2030 ugeth = netdev_priv(dev); 2031 2032 uf_regs = ugeth->uccf->uf_regs; 2033 2034 if (dev->flags & IFF_PROMISC) { 2035 setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); 2036 } else { 2037 clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); 2038 2039 p_82xx_addr_filt = 2040 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2041 p_rx_glbl_pram->addressfiltering; 2042 2043 if (dev->flags & IFF_ALLMULTI) { 2044 /* Catch all multicast addresses, so set the 2045 * filter to all 1's. 2046 */ 2047 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); 2048 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); 2049 } else { 2050 /* Clear filter and add the addresses in the list. 2051 */ 2052 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2053 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2054 2055 netdev_for_each_mc_addr(ha, dev) { 2056 /* Ask CPM to run CRC and set bit in 2057 * filter mask. 2058 */ 2059 hw_add_addr_in_hash(ugeth, ha->addr); 2060 } 2061 } 2062 } 2063 } 2064 2065 static void ucc_geth_stop(struct ucc_geth_private *ugeth) 2066 { 2067 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; 2068 struct phy_device *phydev = ugeth->phydev; 2069 2070 ugeth_vdbg("%s: IN", __func__); 2071 2072 /* 2073 * Tell the kernel the link is down. 2074 * Must be done before disabling the controller 2075 * or deadlock may happen. 2076 */ 2077 phy_stop(phydev); 2078 2079 /* Disable the controller */ 2080 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2081 2082 /* Mask all interrupts */ 2083 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2084 2085 /* Clear all interrupts */ 2086 out_be32(ugeth->uccf->p_ucce, 0xffffffff); 2087 2088 /* Disable Rx and Tx */ 2089 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2090 2091 ucc_geth_memclean(ugeth); 2092 } 2093 2094 static int ucc_struct_init(struct ucc_geth_private *ugeth) 2095 { 2096 struct ucc_geth_info *ug_info; 2097 struct ucc_fast_info *uf_info; 2098 int i; 2099 2100 ug_info = ugeth->ug_info; 2101 uf_info = &ug_info->uf_info; 2102 2103 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2104 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2105 if (netif_msg_probe(ugeth)) 2106 ugeth_err("%s: Bad memory partition value.", 2107 __func__); 2108 return -EINVAL; 2109 } 2110 2111 /* Rx BD lengths */ 2112 for (i = 0; i < ug_info->numQueuesRx; i++) { 2113 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || 2114 (ug_info->bdRingLenRx[i] % 2115 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2116 if (netif_msg_probe(ugeth)) 2117 ugeth_err 2118 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2119 __func__); 2120 return -EINVAL; 2121 } 2122 } 2123 2124 /* Tx BD lengths */ 2125 for (i = 0; i < ug_info->numQueuesTx; i++) { 2126 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2127 if (netif_msg_probe(ugeth)) 2128 ugeth_err 2129 ("%s: Tx BD ring length must be no smaller than 2.", 2130 __func__); 2131 return -EINVAL; 2132 } 2133 } 2134 2135 /* mrblr */ 2136 if ((uf_info->max_rx_buf_length == 0) || 2137 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2138 if (netif_msg_probe(ugeth)) 2139 ugeth_err 2140 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2141 __func__); 2142 return -EINVAL; 2143 } 2144 2145 /* num Tx queues */ 2146 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2147 if (netif_msg_probe(ugeth)) 2148 ugeth_err("%s: number of tx queues too large.", __func__); 2149 return -EINVAL; 2150 } 2151 2152 /* num Rx queues */ 2153 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2154 if (netif_msg_probe(ugeth)) 2155 ugeth_err("%s: number of rx queues too large.", __func__); 2156 return -EINVAL; 2157 } 2158 2159 /* l2qt */ 2160 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2161 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2162 if (netif_msg_probe(ugeth)) 2163 ugeth_err 2164 ("%s: VLAN priority table entry must not be" 2165 " larger than number of Rx queues.", 2166 __func__); 2167 return -EINVAL; 2168 } 2169 } 2170 2171 /* l3qt */ 2172 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2173 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2174 if (netif_msg_probe(ugeth)) 2175 ugeth_err 2176 ("%s: IP priority table entry must not be" 2177 " larger than number of Rx queues.", 2178 __func__); 2179 return -EINVAL; 2180 } 2181 } 2182 2183 if (ug_info->cam && !ug_info->ecamptr) { 2184 if (netif_msg_probe(ugeth)) 2185 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2186 __func__); 2187 return -EINVAL; 2188 } 2189 2190 if ((ug_info->numStationAddresses != 2191 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && 2192 ug_info->rxExtendedFiltering) { 2193 if (netif_msg_probe(ugeth)) 2194 ugeth_err("%s: Number of station addresses greater than 1 " 2195 "not allowed in extended parsing mode.", 2196 __func__); 2197 return -EINVAL; 2198 } 2199 2200 /* Generate uccm_mask for receive */ 2201 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ 2202 for (i = 0; i < ug_info->numQueuesRx; i++) 2203 uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); 2204 2205 for (i = 0; i < ug_info->numQueuesTx; i++) 2206 uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); 2207 /* Initialize the general fast UCC block. */ 2208 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2209 if (netif_msg_probe(ugeth)) 2210 ugeth_err("%s: Failed to init uccf.", __func__); 2211 return -ENOMEM; 2212 } 2213 2214 /* read the number of risc engines, update the riscTx and riscRx 2215 * if there are 4 riscs in QE 2216 */ 2217 if (qe_get_num_of_risc() == 4) { 2218 ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; 2219 ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; 2220 } 2221 2222 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); 2223 if (!ugeth->ug_regs) { 2224 if (netif_msg_probe(ugeth)) 2225 ugeth_err("%s: Failed to ioremap regs.", __func__); 2226 return -ENOMEM; 2227 } 2228 2229 return 0; 2230 } 2231 2232 static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) 2233 { 2234 struct ucc_geth_info *ug_info; 2235 struct ucc_fast_info *uf_info; 2236 int length; 2237 u16 i, j; 2238 u8 __iomem *bd; 2239 2240 ug_info = ugeth->ug_info; 2241 uf_info = &ug_info->uf_info; 2242 2243 /* Allocate Tx bds */ 2244 for (j = 0; j < ug_info->numQueuesTx; j++) { 2245 /* Allocate in multiple of 2246 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, 2247 according to spec */ 2248 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) 2249 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2250 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2251 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % 2252 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) 2253 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 2254 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2255 u32 align = 4; 2256 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2257 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2258 ugeth->tx_bd_ring_offset[j] = 2259 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2260 2261 if (ugeth->tx_bd_ring_offset[j] != 0) 2262 ugeth->p_tx_bd_ring[j] = 2263 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + 2264 align) & ~(align - 1)); 2265 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2266 ugeth->tx_bd_ring_offset[j] = 2267 qe_muram_alloc(length, 2268 UCC_GETH_TX_BD_RING_ALIGNMENT); 2269 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2270 ugeth->p_tx_bd_ring[j] = 2271 (u8 __iomem *) qe_muram_addr(ugeth-> 2272 tx_bd_ring_offset[j]); 2273 } 2274 if (!ugeth->p_tx_bd_ring[j]) { 2275 if (netif_msg_ifup(ugeth)) 2276 ugeth_err 2277 ("%s: Can not allocate memory for Tx bd rings.", 2278 __func__); 2279 return -ENOMEM; 2280 } 2281 /* Zero unused end of bd ring, according to spec */ 2282 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + 2283 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, 2284 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); 2285 } 2286 2287 /* Init Tx bds */ 2288 for (j = 0; j < ug_info->numQueuesTx; j++) { 2289 /* Setup the skbuff rings */ 2290 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2291 ugeth->ug_info->bdRingLenTx[j], 2292 GFP_KERNEL); 2293 2294 if (ugeth->tx_skbuff[j] == NULL) { 2295 if (netif_msg_ifup(ugeth)) 2296 ugeth_err("%s: Could not allocate tx_skbuff", 2297 __func__); 2298 return -ENOMEM; 2299 } 2300 2301 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) 2302 ugeth->tx_skbuff[j][i] = NULL; 2303 2304 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; 2305 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; 2306 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { 2307 /* clear bd buffer */ 2308 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2309 /* set bd status and length */ 2310 out_be32((u32 __iomem *)bd, 0); 2311 bd += sizeof(struct qe_bd); 2312 } 2313 bd -= sizeof(struct qe_bd); 2314 /* set bd status and length */ 2315 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ 2316 } 2317 2318 return 0; 2319 } 2320 2321 static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth) 2322 { 2323 struct ucc_geth_info *ug_info; 2324 struct ucc_fast_info *uf_info; 2325 int length; 2326 u16 i, j; 2327 u8 __iomem *bd; 2328 2329 ug_info = ugeth->ug_info; 2330 uf_info = &ug_info->uf_info; 2331 2332 /* Allocate Rx bds */ 2333 for (j = 0; j < ug_info->numQueuesRx; j++) { 2334 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); 2335 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { 2336 u32 align = 4; 2337 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2338 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2339 ugeth->rx_bd_ring_offset[j] = 2340 (u32) kmalloc((u32) (length + align), GFP_KERNEL); 2341 if (ugeth->rx_bd_ring_offset[j] != 0) 2342 ugeth->p_rx_bd_ring[j] = 2343 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + 2344 align) & ~(align - 1)); 2345 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { 2346 ugeth->rx_bd_ring_offset[j] = 2347 qe_muram_alloc(length, 2348 UCC_GETH_RX_BD_RING_ALIGNMENT); 2349 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2350 ugeth->p_rx_bd_ring[j] = 2351 (u8 __iomem *) qe_muram_addr(ugeth-> 2352 rx_bd_ring_offset[j]); 2353 } 2354 if (!ugeth->p_rx_bd_ring[j]) { 2355 if (netif_msg_ifup(ugeth)) 2356 ugeth_err 2357 ("%s: Can not allocate memory for Rx bd rings.", 2358 __func__); 2359 return -ENOMEM; 2360 } 2361 } 2362 2363 /* Init Rx bds */ 2364 for (j = 0; j < ug_info->numQueuesRx; j++) { 2365 /* Setup the skbuff rings */ 2366 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * 2367 ugeth->ug_info->bdRingLenRx[j], 2368 GFP_KERNEL); 2369 2370 if (ugeth->rx_skbuff[j] == NULL) { 2371 if (netif_msg_ifup(ugeth)) 2372 ugeth_err("%s: Could not allocate rx_skbuff", 2373 __func__); 2374 return -ENOMEM; 2375 } 2376 2377 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) 2378 ugeth->rx_skbuff[j][i] = NULL; 2379 2380 ugeth->skb_currx[j] = 0; 2381 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2382 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2383 /* set bd status and length */ 2384 out_be32((u32 __iomem *)bd, R_I); 2385 /* clear bd buffer */ 2386 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); 2387 bd += sizeof(struct qe_bd); 2388 } 2389 bd -= sizeof(struct qe_bd); 2390 /* set bd status and length */ 2391 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ 2392 } 2393 2394 return 0; 2395 } 2396 2397 static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2398 { 2399 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2400 struct ucc_geth_init_pram __iomem *p_init_enet_pram; 2401 struct ucc_fast_private *uccf; 2402 struct ucc_geth_info *ug_info; 2403 struct ucc_fast_info *uf_info; 2404 struct ucc_fast __iomem *uf_regs; 2405 struct ucc_geth __iomem *ug_regs; 2406 int ret_val = -EINVAL; 2407 u32 remoder = UCC_GETH_REMODER_INIT; 2408 u32 init_enet_pram_offset, cecr_subblock, command; 2409 u32 ifstat, i, j, size, l2qt, l3qt; 2410 u16 temoder = UCC_GETH_TEMODER_INIT; 2411 u16 test; 2412 u8 function_code = 0; 2413 u8 __iomem *endOfRing; 2414 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2415 2416 ugeth_vdbg("%s: IN", __func__); 2417 uccf = ugeth->uccf; 2418 ug_info = ugeth->ug_info; 2419 uf_info = &ug_info->uf_info; 2420 uf_regs = uccf->uf_regs; 2421 ug_regs = ugeth->ug_regs; 2422 2423 switch (ug_info->numThreadsRx) { 2424 case UCC_GETH_NUM_OF_THREADS_1: 2425 numThreadsRxNumerical = 1; 2426 break; 2427 case UCC_GETH_NUM_OF_THREADS_2: 2428 numThreadsRxNumerical = 2; 2429 break; 2430 case UCC_GETH_NUM_OF_THREADS_4: 2431 numThreadsRxNumerical = 4; 2432 break; 2433 case UCC_GETH_NUM_OF_THREADS_6: 2434 numThreadsRxNumerical = 6; 2435 break; 2436 case UCC_GETH_NUM_OF_THREADS_8: 2437 numThreadsRxNumerical = 8; 2438 break; 2439 default: 2440 if (netif_msg_ifup(ugeth)) 2441 ugeth_err("%s: Bad number of Rx threads value.", 2442 __func__); 2443 return -EINVAL; 2444 break; 2445 } 2446 2447 switch (ug_info->numThreadsTx) { 2448 case UCC_GETH_NUM_OF_THREADS_1: 2449 numThreadsTxNumerical = 1; 2450 break; 2451 case UCC_GETH_NUM_OF_THREADS_2: 2452 numThreadsTxNumerical = 2; 2453 break; 2454 case UCC_GETH_NUM_OF_THREADS_4: 2455 numThreadsTxNumerical = 4; 2456 break; 2457 case UCC_GETH_NUM_OF_THREADS_6: 2458 numThreadsTxNumerical = 6; 2459 break; 2460 case UCC_GETH_NUM_OF_THREADS_8: 2461 numThreadsTxNumerical = 8; 2462 break; 2463 default: 2464 if (netif_msg_ifup(ugeth)) 2465 ugeth_err("%s: Bad number of Tx threads value.", 2466 __func__); 2467 return -EINVAL; 2468 break; 2469 } 2470 2471 /* Calculate rx_extended_features */ 2472 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || 2473 ug_info->ipAddressAlignment || 2474 (ug_info->numStationAddresses != 2475 UCC_GETH_NUM_OF_STATION_ADDRESSES_1); 2476 2477 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || 2478 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || 2479 (ug_info->vlanOperationNonTagged != 2480 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); 2481 2482 init_default_reg_vals(&uf_regs->upsmr, 2483 &ug_regs->maccfg1, &ug_regs->maccfg2); 2484 2485 /* Set UPSMR */ 2486 /* For more details see the hardware spec. */ 2487 init_rx_parameters(ug_info->bro, 2488 ug_info->rsh, ug_info->pro, &uf_regs->upsmr); 2489 2490 /* We're going to ignore other registers for now, */ 2491 /* except as needed to get up and running */ 2492 2493 /* Set MACCFG1 */ 2494 /* For more details see the hardware spec. */ 2495 init_flow_control_params(ug_info->aufc, 2496 ug_info->receiveFlowControl, 2497 ug_info->transmitFlowControl, 2498 ug_info->pausePeriod, 2499 ug_info->extensionField, 2500 &uf_regs->upsmr, 2501 &ug_regs->uempr, &ug_regs->maccfg1); 2502 2503 setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2504 2505 /* Set IPGIFG */ 2506 /* For more details see the hardware spec. */ 2507 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, 2508 ug_info->nonBackToBackIfgPart2, 2509 ug_info-> 2510 miminumInterFrameGapEnforcement, 2511 ug_info->backToBackInterFrameGap, 2512 &ug_regs->ipgifg); 2513 if (ret_val != 0) { 2514 if (netif_msg_ifup(ugeth)) 2515 ugeth_err("%s: IPGIFG initialization parameter too large.", 2516 __func__); 2517 return ret_val; 2518 } 2519 2520 /* Set HAFDUP */ 2521 /* For more details see the hardware spec. */ 2522 ret_val = init_half_duplex_params(ug_info->altBeb, 2523 ug_info->backPressureNoBackoff, 2524 ug_info->noBackoff, 2525 ug_info->excessDefer, 2526 ug_info->altBebTruncation, 2527 ug_info->maxRetransmission, 2528 ug_info->collisionWindow, 2529 &ug_regs->hafdup); 2530 if (ret_val != 0) { 2531 if (netif_msg_ifup(ugeth)) 2532 ugeth_err("%s: Half Duplex initialization parameter too large.", 2533 __func__); 2534 return ret_val; 2535 } 2536 2537 /* Set IFSTAT */ 2538 /* For more details see the hardware spec. */ 2539 /* Read only - resets upon read */ 2540 ifstat = in_be32(&ug_regs->ifstat); 2541 2542 /* Clear UEMPR */ 2543 /* For more details see the hardware spec. */ 2544 out_be32(&ug_regs->uempr, 0); 2545 2546 /* Set UESCR */ 2547 /* For more details see the hardware spec. */ 2548 init_hw_statistics_gathering_mode((ug_info->statisticsMode & 2549 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 2550 0, &uf_regs->upsmr, &ug_regs->uescr); 2551 2552 ret_val = ucc_geth_alloc_tx(ugeth); 2553 if (ret_val != 0) 2554 return ret_val; 2555 2556 ret_val = ucc_geth_alloc_rx(ugeth); 2557 if (ret_val != 0) 2558 return ret_val; 2559 2560 /* 2561 * Global PRAM 2562 */ 2563 /* Tx global PRAM */ 2564 /* Allocate global tx parameter RAM page */ 2565 ugeth->tx_glbl_pram_offset = 2566 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2567 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2568 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2569 if (netif_msg_ifup(ugeth)) 2570 ugeth_err 2571 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2572 __func__); 2573 return -ENOMEM; 2574 } 2575 ugeth->p_tx_glbl_pram = 2576 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> 2577 tx_glbl_pram_offset); 2578 /* Zero out p_tx_glbl_pram */ 2579 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); 2580 2581 /* Fill global PRAM */ 2582 2583 /* TQPTR */ 2584 /* Size varies with number of Tx threads */ 2585 ugeth->thread_dat_tx_offset = 2586 qe_muram_alloc(numThreadsTxNumerical * 2587 sizeof(struct ucc_geth_thread_data_tx) + 2588 32 * (numThreadsTxNumerical == 1), 2589 UCC_GETH_THREAD_DATA_ALIGNMENT); 2590 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2591 if (netif_msg_ifup(ugeth)) 2592 ugeth_err 2593 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2594 __func__); 2595 return -ENOMEM; 2596 } 2597 2598 ugeth->p_thread_data_tx = 2599 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> 2600 thread_dat_tx_offset); 2601 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); 2602 2603 /* vtagtable */ 2604 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) 2605 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], 2606 ug_info->vtagtable[i]); 2607 2608 /* iphoffset */ 2609 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) 2610 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], 2611 ug_info->iphoffset[i]); 2612 2613 /* SQPTR */ 2614 /* Size varies with number of Tx queues */ 2615 ugeth->send_q_mem_reg_offset = 2616 qe_muram_alloc(ug_info->numQueuesTx * 2617 sizeof(struct ucc_geth_send_queue_qd), 2618 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2619 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2620 if (netif_msg_ifup(ugeth)) 2621 ugeth_err 2622 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2623 __func__); 2624 return -ENOMEM; 2625 } 2626 2627 ugeth->p_send_q_mem_reg = 2628 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> 2629 send_q_mem_reg_offset); 2630 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); 2631 2632 /* Setup the table */ 2633 /* Assume BD rings are already established */ 2634 for (i = 0; i < ug_info->numQueuesTx; i++) { 2635 endOfRing = 2636 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - 2637 1) * sizeof(struct qe_bd); 2638 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2639 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2640 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); 2641 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2642 last_bd_completed_address, 2643 (u32) virt_to_phys(endOfRing)); 2644 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2645 MEM_PART_MURAM) { 2646 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, 2647 (u32) immrbar_virt_to_phys(ugeth-> 2648 p_tx_bd_ring[i])); 2649 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. 2650 last_bd_completed_address, 2651 (u32) immrbar_virt_to_phys(endOfRing)); 2652 } 2653 } 2654 2655 /* schedulerbasepointer */ 2656 2657 if (ug_info->numQueuesTx > 1) { 2658 /* scheduler exists only if more than 1 tx queue */ 2659 ugeth->scheduler_offset = 2660 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2661 UCC_GETH_SCHEDULER_ALIGNMENT); 2662 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2663 if (netif_msg_ifup(ugeth)) 2664 ugeth_err 2665 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2666 __func__); 2667 return -ENOMEM; 2668 } 2669 2670 ugeth->p_scheduler = 2671 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> 2672 scheduler_offset); 2673 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, 2674 ugeth->scheduler_offset); 2675 /* Zero out p_scheduler */ 2676 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); 2677 2678 /* Set values in scheduler */ 2679 out_be32(&ugeth->p_scheduler->mblinterval, 2680 ug_info->mblinterval); 2681 out_be16(&ugeth->p_scheduler->nortsrbytetime, 2682 ug_info->nortsrbytetime); 2683 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); 2684 out_8(&ugeth->p_scheduler->strictpriorityq, 2685 ug_info->strictpriorityq); 2686 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); 2687 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); 2688 for (i = 0; i < NUM_TX_QUEUES; i++) 2689 out_8(&ugeth->p_scheduler->weightfactor[i], 2690 ug_info->weightfactor[i]); 2691 2692 /* Set pointers to cpucount registers in scheduler */ 2693 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); 2694 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); 2695 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); 2696 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); 2697 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); 2698 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); 2699 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); 2700 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); 2701 } 2702 2703 /* schedulerbasepointer */ 2704 /* TxRMON_PTR (statistics) */ 2705 if (ug_info-> 2706 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 2707 ugeth->tx_fw_statistics_pram_offset = 2708 qe_muram_alloc(sizeof 2709 (struct ucc_geth_tx_firmware_statistics_pram), 2710 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2711 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2712 if (netif_msg_ifup(ugeth)) 2713 ugeth_err 2714 ("%s: Can not allocate DPRAM memory for" 2715 " p_tx_fw_statistics_pram.", 2716 __func__); 2717 return -ENOMEM; 2718 } 2719 ugeth->p_tx_fw_statistics_pram = 2720 (struct ucc_geth_tx_firmware_statistics_pram __iomem *) 2721 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); 2722 /* Zero out p_tx_fw_statistics_pram */ 2723 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 2724 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); 2725 } 2726 2727 /* temoder */ 2728 /* Already has speed set */ 2729 2730 if (ug_info->numQueuesTx > 1) 2731 temoder |= TEMODER_SCHEDULER_ENABLE; 2732 if (ug_info->ipCheckSumGenerate) 2733 temoder |= TEMODER_IP_CHECKSUM_GENERATE; 2734 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); 2735 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); 2736 2737 test = in_be16(&ugeth->p_tx_glbl_pram->temoder); 2738 2739 /* Function code register value to be used later */ 2740 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; 2741 /* Required for QE */ 2742 2743 /* function code register */ 2744 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); 2745 2746 /* Rx global PRAM */ 2747 /* Allocate global rx parameter RAM page */ 2748 ugeth->rx_glbl_pram_offset = 2749 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2750 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2751 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2752 if (netif_msg_ifup(ugeth)) 2753 ugeth_err 2754 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2755 __func__); 2756 return -ENOMEM; 2757 } 2758 ugeth->p_rx_glbl_pram = 2759 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> 2760 rx_glbl_pram_offset); 2761 /* Zero out p_rx_glbl_pram */ 2762 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); 2763 2764 /* Fill global PRAM */ 2765 2766 /* RQPTR */ 2767 /* Size varies with number of Rx threads */ 2768 ugeth->thread_dat_rx_offset = 2769 qe_muram_alloc(numThreadsRxNumerical * 2770 sizeof(struct ucc_geth_thread_data_rx), 2771 UCC_GETH_THREAD_DATA_ALIGNMENT); 2772 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2773 if (netif_msg_ifup(ugeth)) 2774 ugeth_err 2775 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2776 __func__); 2777 return -ENOMEM; 2778 } 2779 2780 ugeth->p_thread_data_rx = 2781 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> 2782 thread_dat_rx_offset); 2783 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); 2784 2785 /* typeorlen */ 2786 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); 2787 2788 /* rxrmonbaseptr (statistics) */ 2789 if (ug_info-> 2790 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 2791 ugeth->rx_fw_statistics_pram_offset = 2792 qe_muram_alloc(sizeof 2793 (struct ucc_geth_rx_firmware_statistics_pram), 2794 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2795 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2796 if (netif_msg_ifup(ugeth)) 2797 ugeth_err 2798 ("%s: Can not allocate DPRAM memory for" 2799 " p_rx_fw_statistics_pram.", __func__); 2800 return -ENOMEM; 2801 } 2802 ugeth->p_rx_fw_statistics_pram = 2803 (struct ucc_geth_rx_firmware_statistics_pram __iomem *) 2804 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); 2805 /* Zero out p_rx_fw_statistics_pram */ 2806 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, 2807 sizeof(struct ucc_geth_rx_firmware_statistics_pram)); 2808 } 2809 2810 /* intCoalescingPtr */ 2811 2812 /* Size varies with number of Rx queues */ 2813 ugeth->rx_irq_coalescing_tbl_offset = 2814 qe_muram_alloc(ug_info->numQueuesRx * 2815 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 2816 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 2817 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 2818 if (netif_msg_ifup(ugeth)) 2819 ugeth_err 2820 ("%s: Can not allocate DPRAM memory for" 2821 " p_rx_irq_coalescing_tbl.", __func__); 2822 return -ENOMEM; 2823 } 2824 2825 ugeth->p_rx_irq_coalescing_tbl = 2826 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) 2827 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); 2828 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, 2829 ugeth->rx_irq_coalescing_tbl_offset); 2830 2831 /* Fill interrupt coalescing table */ 2832 for (i = 0; i < ug_info->numQueuesRx; i++) { 2833 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2834 interruptcoalescingmaxvalue, 2835 ug_info->interruptcoalescingmaxvalue[i]); 2836 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. 2837 interruptcoalescingcounter, 2838 ug_info->interruptcoalescingmaxvalue[i]); 2839 } 2840 2841 /* MRBLR */ 2842 init_max_rx_buff_len(uf_info->max_rx_buf_length, 2843 &ugeth->p_rx_glbl_pram->mrblr); 2844 /* MFLR */ 2845 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); 2846 /* MINFLR */ 2847 init_min_frame_len(ug_info->minFrameLength, 2848 &ugeth->p_rx_glbl_pram->minflr, 2849 &ugeth->p_rx_glbl_pram->mrblr); 2850 /* MAXD1 */ 2851 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); 2852 /* MAXD2 */ 2853 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); 2854 2855 /* l2qt */ 2856 l2qt = 0; 2857 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) 2858 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); 2859 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); 2860 2861 /* l3qt */ 2862 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { 2863 l3qt = 0; 2864 for (i = 0; i < 8; i++) 2865 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); 2866 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); 2867 } 2868 2869 /* vlantype */ 2870 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); 2871 2872 /* vlantci */ 2873 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); 2874 2875 /* ecamptr */ 2876 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); 2877 2878 /* RBDQPTR */ 2879 /* Size varies with number of Rx queues */ 2880 ugeth->rx_bd_qs_tbl_offset = 2881 qe_muram_alloc(ug_info->numQueuesRx * 2882 (sizeof(struct ucc_geth_rx_bd_queues_entry) + 2883 sizeof(struct ucc_geth_rx_prefetched_bds)), 2884 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 2885 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 2886 if (netif_msg_ifup(ugeth)) 2887 ugeth_err 2888 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 2889 __func__); 2890 return -ENOMEM; 2891 } 2892 2893 ugeth->p_rx_bd_qs_tbl = 2894 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> 2895 rx_bd_qs_tbl_offset); 2896 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); 2897 /* Zero out p_rx_bd_qs_tbl */ 2898 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 2899 0, 2900 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + 2901 sizeof(struct ucc_geth_rx_prefetched_bds))); 2902 2903 /* Setup the table */ 2904 /* Assume BD rings are already established */ 2905 for (i = 0; i < ug_info->numQueuesRx; i++) { 2906 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { 2907 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 2908 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); 2909 } else if (ugeth->ug_info->uf_info.bd_mem_part == 2910 MEM_PART_MURAM) { 2911 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 2912 (u32) immrbar_virt_to_phys(ugeth-> 2913 p_rx_bd_ring[i])); 2914 } 2915 /* rest of fields handled by QE */ 2916 } 2917 2918 /* remoder */ 2919 /* Already has speed set */ 2920 2921 if (ugeth->rx_extended_features) 2922 remoder |= REMODER_RX_EXTENDED_FEATURES; 2923 if (ug_info->rxExtendedFiltering) 2924 remoder |= REMODER_RX_EXTENDED_FILTERING; 2925 if (ug_info->dynamicMaxFrameLength) 2926 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; 2927 if (ug_info->dynamicMinFrameLength) 2928 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; 2929 remoder |= 2930 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; 2931 remoder |= 2932 ug_info-> 2933 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; 2934 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; 2935 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); 2936 if (ug_info->ipCheckSumCheck) 2937 remoder |= REMODER_IP_CHECKSUM_CHECK; 2938 if (ug_info->ipAddressAlignment) 2939 remoder |= REMODER_IP_ADDRESS_ALIGNMENT; 2940 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); 2941 2942 /* Note that this function must be called */ 2943 /* ONLY AFTER p_tx_fw_statistics_pram */ 2944 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ 2945 init_firmware_statistics_gathering_mode((ug_info-> 2946 statisticsMode & 2947 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), 2948 (ug_info->statisticsMode & 2949 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), 2950 &ugeth->p_tx_glbl_pram->txrmonbaseptr, 2951 ugeth->tx_fw_statistics_pram_offset, 2952 &ugeth->p_rx_glbl_pram->rxrmonbaseptr, 2953 ugeth->rx_fw_statistics_pram_offset, 2954 &ugeth->p_tx_glbl_pram->temoder, 2955 &ugeth->p_rx_glbl_pram->remoder); 2956 2957 /* function code register */ 2958 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); 2959 2960 /* initialize extended filtering */ 2961 if (ug_info->rxExtendedFiltering) { 2962 if (!ug_info->extendedFilteringChainPointer) { 2963 if (netif_msg_ifup(ugeth)) 2964 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 2965 __func__); 2966 return -EINVAL; 2967 } 2968 2969 /* Allocate memory for extended filtering Mode Global 2970 Parameters */ 2971 ugeth->exf_glbl_param_offset = 2972 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 2973 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 2974 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 2975 if (netif_msg_ifup(ugeth)) 2976 ugeth_err 2977 ("%s: Can not allocate DPRAM memory for" 2978 " p_exf_glbl_param.", __func__); 2979 return -ENOMEM; 2980 } 2981 2982 ugeth->p_exf_glbl_param = 2983 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> 2984 exf_glbl_param_offset); 2985 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, 2986 ugeth->exf_glbl_param_offset); 2987 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, 2988 (u32) ug_info->extendedFilteringChainPointer); 2989 2990 } else { /* initialize 82xx style address filtering */ 2991 2992 /* Init individual address recognition registers to disabled */ 2993 2994 for (j = 0; j < NUM_OF_PADDRS; j++) 2995 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); 2996 2997 p_82xx_addr_filt = 2998 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> 2999 p_rx_glbl_pram->addressfiltering; 3000 3001 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3002 ENET_ADDR_TYPE_GROUP); 3003 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, 3004 ENET_ADDR_TYPE_INDIVIDUAL); 3005 } 3006 3007 /* 3008 * Initialize UCC at QE level 3009 */ 3010 3011 command = QE_INIT_TX_RX; 3012 3013 /* Allocate shadow InitEnet command parameter structure. 3014 * This is needed because after the InitEnet command is executed, 3015 * the structure in DPRAM is released, because DPRAM is a premium 3016 * resource. 3017 * This shadow structure keeps a copy of what was done so that the 3018 * allocated resources can be released when the channel is freed. 3019 */ 3020 if (!(ugeth->p_init_enet_param_shadow = 3021 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 3022 if (netif_msg_ifup(ugeth)) 3023 ugeth_err 3024 ("%s: Can not allocate memory for" 3025 " p_UccInitEnetParamShadows.", __func__); 3026 return -ENOMEM; 3027 } 3028 /* Zero out *p_init_enet_param_shadow */ 3029 memset((char *)ugeth->p_init_enet_param_shadow, 3030 0, sizeof(struct ucc_geth_init_pram)); 3031 3032 /* Fill shadow InitEnet command parameter structure */ 3033 3034 ugeth->p_init_enet_param_shadow->resinit1 = 3035 ENET_INIT_PARAM_MAGIC_RES_INIT1; 3036 ugeth->p_init_enet_param_shadow->resinit2 = 3037 ENET_INIT_PARAM_MAGIC_RES_INIT2; 3038 ugeth->p_init_enet_param_shadow->resinit3 = 3039 ENET_INIT_PARAM_MAGIC_RES_INIT3; 3040 ugeth->p_init_enet_param_shadow->resinit4 = 3041 ENET_INIT_PARAM_MAGIC_RES_INIT4; 3042 ugeth->p_init_enet_param_shadow->resinit5 = 3043 ENET_INIT_PARAM_MAGIC_RES_INIT5; 3044 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3045 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; 3046 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3047 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; 3048 3049 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= 3050 ugeth->rx_glbl_pram_offset | ug_info->riscRx; 3051 if ((ug_info->largestexternallookupkeysize != 3052 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && 3053 (ug_info->largestexternallookupkeysize != 3054 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && 3055 (ug_info->largestexternallookupkeysize != 3056 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3057 if (netif_msg_ifup(ugeth)) 3058 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3059 __func__); 3060 return -EINVAL; 3061 } 3062 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 3063 ug_info->largestexternallookupkeysize; 3064 size = sizeof(struct ucc_geth_thread_rx_pram); 3065 if (ug_info->rxExtendedFiltering) { 3066 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 3067 if (ug_info->largestexternallookupkeysize == 3068 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3069 size += 3070 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 3071 if (ug_info->largestexternallookupkeysize == 3072 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 3073 size += 3074 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 3075 } 3076 3077 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> 3078 p_init_enet_param_shadow->rxthread[0]), 3079 (u8) (numThreadsRxNumerical + 1) 3080 /* Rx needs one extra for terminator */ 3081 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3082 ug_info->riscRx, 1)) != 0) { 3083 if (netif_msg_ifup(ugeth)) 3084 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3085 __func__); 3086 return ret_val; 3087 } 3088 3089 ugeth->p_init_enet_param_shadow->txglobal = 3090 ugeth->tx_glbl_pram_offset | ug_info->riscTx; 3091 if ((ret_val = 3092 fill_init_enet_entries(ugeth, 3093 &(ugeth->p_init_enet_param_shadow-> 3094 txthread[0]), numThreadsTxNumerical, 3095 sizeof(struct ucc_geth_thread_tx_pram), 3096 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3097 ug_info->riscTx, 0)) != 0) { 3098 if (netif_msg_ifup(ugeth)) 3099 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3100 __func__); 3101 return ret_val; 3102 } 3103 3104 /* Load Rx bds with buffers */ 3105 for (i = 0; i < ug_info->numQueuesRx; i++) { 3106 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3107 if (netif_msg_ifup(ugeth)) 3108 ugeth_err("%s: Can not fill Rx bds with buffers.", 3109 __func__); 3110 return ret_val; 3111 } 3112 } 3113 3114 /* Allocate InitEnet command parameter structure */ 3115 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3116 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3117 if (netif_msg_ifup(ugeth)) 3118 ugeth_err 3119 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3120 __func__); 3121 return -ENOMEM; 3122 } 3123 p_init_enet_pram = 3124 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); 3125 3126 /* Copy shadow InitEnet command parameter structure into PRAM */ 3127 out_8(&p_init_enet_pram->resinit1, 3128 ugeth->p_init_enet_param_shadow->resinit1); 3129 out_8(&p_init_enet_pram->resinit2, 3130 ugeth->p_init_enet_param_shadow->resinit2); 3131 out_8(&p_init_enet_pram->resinit3, 3132 ugeth->p_init_enet_param_shadow->resinit3); 3133 out_8(&p_init_enet_pram->resinit4, 3134 ugeth->p_init_enet_param_shadow->resinit4); 3135 out_be16(&p_init_enet_pram->resinit5, 3136 ugeth->p_init_enet_param_shadow->resinit5); 3137 out_8(&p_init_enet_pram->largestexternallookupkeysize, 3138 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); 3139 out_be32(&p_init_enet_pram->rgftgfrxglobal, 3140 ugeth->p_init_enet_param_shadow->rgftgfrxglobal); 3141 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) 3142 out_be32(&p_init_enet_pram->rxthread[i], 3143 ugeth->p_init_enet_param_shadow->rxthread[i]); 3144 out_be32(&p_init_enet_pram->txglobal, 3145 ugeth->p_init_enet_param_shadow->txglobal); 3146 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) 3147 out_be32(&p_init_enet_pram->txthread[i], 3148 ugeth->p_init_enet_param_shadow->txthread[i]); 3149 3150 /* Issue QE command */ 3151 cecr_subblock = 3152 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 3153 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 3154 init_enet_pram_offset); 3155 3156 /* Free InitEnet command parameter */ 3157 qe_muram_free(init_enet_pram_offset); 3158 3159 return 0; 3160 } 3161 3162 /* This is called by the kernel when a frame is ready for transmission. */ 3163 /* It is pointed to by the dev->hard_start_xmit function pointer */ 3164 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3165 { 3166 struct ucc_geth_private *ugeth = netdev_priv(dev); 3167 #ifdef CONFIG_UGETH_TX_ON_DEMAND 3168 struct ucc_fast_private *uccf; 3169 #endif 3170 u8 __iomem *bd; /* BD pointer */ 3171 u32 bd_status; 3172 u8 txQ = 0; 3173 unsigned long flags; 3174 3175 ugeth_vdbg("%s: IN", __func__); 3176 3177 spin_lock_irqsave(&ugeth->lock, flags); 3178 3179 dev->stats.tx_bytes += skb->len; 3180 3181 /* Start from the next BD that should be filled */ 3182 bd = ugeth->txBd[txQ]; 3183 bd_status = in_be32((u32 __iomem *)bd); 3184 /* Save the skb pointer so we can free it later */ 3185 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; 3186 3187 /* Update the current skb pointer (wrapping if this was the last) */ 3188 ugeth->skb_curtx[txQ] = 3189 (ugeth->skb_curtx[txQ] + 3190 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3191 3192 /* set up the buffer descriptor */ 3193 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3194 dma_map_single(ugeth->dev, skb->data, 3195 skb->len, DMA_TO_DEVICE)); 3196 3197 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3198 3199 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; 3200 3201 /* set bd status and length */ 3202 out_be32((u32 __iomem *)bd, bd_status); 3203 3204 /* Move to next BD in the ring */ 3205 if (!(bd_status & T_W)) 3206 bd += sizeof(struct qe_bd); 3207 else 3208 bd = ugeth->p_tx_bd_ring[txQ]; 3209 3210 /* If the next BD still needs to be cleaned up, then the bds 3211 are full. We need to tell the kernel to stop sending us stuff. */ 3212 if (bd == ugeth->confBd[txQ]) { 3213 if (!netif_queue_stopped(dev)) 3214 netif_stop_queue(dev); 3215 } 3216 3217 ugeth->txBd[txQ] = bd; 3218 3219 skb_tx_timestamp(skb); 3220 3221 if (ugeth->p_scheduler) { 3222 ugeth->cpucount[txQ]++; 3223 /* Indicate to QE that there are more Tx bds ready for 3224 transmission */ 3225 /* This is done by writing a running counter of the bd 3226 count to the scheduler PRAM. */ 3227 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); 3228 } 3229 3230 #ifdef CONFIG_UGETH_TX_ON_DEMAND 3231 uccf = ugeth->uccf; 3232 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3233 #endif 3234 spin_unlock_irqrestore(&ugeth->lock, flags); 3235 3236 return NETDEV_TX_OK; 3237 } 3238 3239 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) 3240 { 3241 struct sk_buff *skb; 3242 u8 __iomem *bd; 3243 u16 length, howmany = 0; 3244 u32 bd_status; 3245 u8 *bdBuffer; 3246 struct net_device *dev; 3247 3248 ugeth_vdbg("%s: IN", __func__); 3249 3250 dev = ugeth->ndev; 3251 3252 /* collect received buffers */ 3253 bd = ugeth->rxBd[rxQ]; 3254 3255 bd_status = in_be32((u32 __iomem *)bd); 3256 3257 /* while there are received buffers and BD is full (~R_E) */ 3258 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { 3259 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); 3260 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); 3261 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; 3262 3263 /* determine whether buffer is first, last, first and last 3264 (single buffer frame) or middle (not first and not last) */ 3265 if (!skb || 3266 (!(bd_status & (R_F | R_L))) || 3267 (bd_status & R_ERRORS_FATAL)) { 3268 if (netif_msg_rx_err(ugeth)) 3269 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3270 __func__, __LINE__, (u32) skb); 3271 dev_kfree_skb(skb); 3272 3273 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3274 dev->stats.rx_dropped++; 3275 } else { 3276 dev->stats.rx_packets++; 3277 howmany++; 3278 3279 /* Prep the skb for the packet */ 3280 skb_put(skb, length); 3281 3282 /* Tell the skb what kind of packet this is */ 3283 skb->protocol = eth_type_trans(skb, ugeth->ndev); 3284 3285 dev->stats.rx_bytes += length; 3286 /* Send the packet up the stack */ 3287 netif_receive_skb(skb); 3288 } 3289 3290 skb = get_new_skb(ugeth, bd); 3291 if (!skb) { 3292 if (netif_msg_rx_err(ugeth)) 3293 ugeth_warn("%s: No Rx Data Buffer", __func__); 3294 dev->stats.rx_dropped++; 3295 break; 3296 } 3297 3298 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; 3299 3300 /* update to point at the next skb */ 3301 ugeth->skb_currx[rxQ] = 3302 (ugeth->skb_currx[rxQ] + 3303 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); 3304 3305 if (bd_status & R_W) 3306 bd = ugeth->p_rx_bd_ring[rxQ]; 3307 else 3308 bd += sizeof(struct qe_bd); 3309 3310 bd_status = in_be32((u32 __iomem *)bd); 3311 } 3312 3313 ugeth->rxBd[rxQ] = bd; 3314 return howmany; 3315 } 3316 3317 static int ucc_geth_tx(struct net_device *dev, u8 txQ) 3318 { 3319 /* Start from the next BD that should be filled */ 3320 struct ucc_geth_private *ugeth = netdev_priv(dev); 3321 u8 __iomem *bd; /* BD pointer */ 3322 u32 bd_status; 3323 3324 bd = ugeth->confBd[txQ]; 3325 bd_status = in_be32((u32 __iomem *)bd); 3326 3327 /* Normal processing. */ 3328 while ((bd_status & T_R) == 0) { 3329 struct sk_buff *skb; 3330 3331 /* BD contains already transmitted buffer. */ 3332 /* Handle the transmitted buffer and release */ 3333 /* the BD to be used with the current frame */ 3334 3335 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3336 if (!skb) 3337 break; 3338 3339 dev->stats.tx_packets++; 3340 3341 dev_kfree_skb(skb); 3342 3343 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3344 ugeth->skb_dirtytx[txQ] = 3345 (ugeth->skb_dirtytx[txQ] + 3346 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); 3347 3348 /* We freed a buffer, so now we can restart transmission */ 3349 if (netif_queue_stopped(dev)) 3350 netif_wake_queue(dev); 3351 3352 /* Advance the confirmation BD pointer */ 3353 if (!(bd_status & T_W)) 3354 bd += sizeof(struct qe_bd); 3355 else 3356 bd = ugeth->p_tx_bd_ring[txQ]; 3357 bd_status = in_be32((u32 __iomem *)bd); 3358 } 3359 ugeth->confBd[txQ] = bd; 3360 return 0; 3361 } 3362 3363 static int ucc_geth_poll(struct napi_struct *napi, int budget) 3364 { 3365 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); 3366 struct ucc_geth_info *ug_info; 3367 int howmany, i; 3368 3369 ug_info = ugeth->ug_info; 3370 3371 /* Tx event processing */ 3372 spin_lock(&ugeth->lock); 3373 for (i = 0; i < ug_info->numQueuesTx; i++) 3374 ucc_geth_tx(ugeth->ndev, i); 3375 spin_unlock(&ugeth->lock); 3376 3377 howmany = 0; 3378 for (i = 0; i < ug_info->numQueuesRx; i++) 3379 howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3380 3381 if (howmany < budget) { 3382 napi_complete(napi); 3383 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); 3384 } 3385 3386 return howmany; 3387 } 3388 3389 static irqreturn_t ucc_geth_irq_handler(int irq, void *info) 3390 { 3391 struct net_device *dev = info; 3392 struct ucc_geth_private *ugeth = netdev_priv(dev); 3393 struct ucc_fast_private *uccf; 3394 struct ucc_geth_info *ug_info; 3395 register u32 ucce; 3396 register u32 uccm; 3397 3398 ugeth_vdbg("%s: IN", __func__); 3399 3400 uccf = ugeth->uccf; 3401 ug_info = ugeth->ug_info; 3402 3403 /* read and clear events */ 3404 ucce = (u32) in_be32(uccf->p_ucce); 3405 uccm = (u32) in_be32(uccf->p_uccm); 3406 ucce &= uccm; 3407 out_be32(uccf->p_ucce, ucce); 3408 3409 /* check for receive events that require processing */ 3410 if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { 3411 if (napi_schedule_prep(&ugeth->napi)) { 3412 uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); 3413 out_be32(uccf->p_uccm, uccm); 3414 __napi_schedule(&ugeth->napi); 3415 } 3416 } 3417 3418 /* Errors and other events */ 3419 if (ucce & UCCE_OTHER) { 3420 if (ucce & UCC_GETH_UCCE_BSY) 3421 dev->stats.rx_errors++; 3422 if (ucce & UCC_GETH_UCCE_TXE) 3423 dev->stats.tx_errors++; 3424 } 3425 3426 return IRQ_HANDLED; 3427 } 3428 3429 #ifdef CONFIG_NET_POLL_CONTROLLER 3430 /* 3431 * Polling 'interrupt' - used by things like netconsole to send skbs 3432 * without having to re-enable interrupts. It's not called while 3433 * the interrupt routine is executing. 3434 */ 3435 static void ucc_netpoll(struct net_device *dev) 3436 { 3437 struct ucc_geth_private *ugeth = netdev_priv(dev); 3438 int irq = ugeth->ug_info->uf_info.irq; 3439 3440 disable_irq(irq); 3441 ucc_geth_irq_handler(irq, dev); 3442 enable_irq(irq); 3443 } 3444 #endif /* CONFIG_NET_POLL_CONTROLLER */ 3445 3446 static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) 3447 { 3448 struct ucc_geth_private *ugeth = netdev_priv(dev); 3449 struct sockaddr *addr = p; 3450 3451 if (!is_valid_ether_addr(addr->sa_data)) 3452 return -EADDRNOTAVAIL; 3453 3454 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3455 3456 /* 3457 * If device is not running, we will set mac addr register 3458 * when opening the device. 3459 */ 3460 if (!netif_running(dev)) 3461 return 0; 3462 3463 spin_lock_irq(&ugeth->lock); 3464 init_mac_station_addr_regs(dev->dev_addr[0], 3465 dev->dev_addr[1], 3466 dev->dev_addr[2], 3467 dev->dev_addr[3], 3468 dev->dev_addr[4], 3469 dev->dev_addr[5], 3470 &ugeth->ug_regs->macstnaddr1, 3471 &ugeth->ug_regs->macstnaddr2); 3472 spin_unlock_irq(&ugeth->lock); 3473 3474 return 0; 3475 } 3476 3477 static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) 3478 { 3479 struct net_device *dev = ugeth->ndev; 3480 int err; 3481 3482 err = ucc_struct_init(ugeth); 3483 if (err) { 3484 if (netif_msg_ifup(ugeth)) 3485 ugeth_err("%s: Cannot configure internal struct, " 3486 "aborting.", dev->name); 3487 goto err; 3488 } 3489 3490 err = ucc_geth_startup(ugeth); 3491 if (err) { 3492 if (netif_msg_ifup(ugeth)) 3493 ugeth_err("%s: Cannot configure net device, aborting.", 3494 dev->name); 3495 goto err; 3496 } 3497 3498 err = adjust_enet_interface(ugeth); 3499 if (err) { 3500 if (netif_msg_ifup(ugeth)) 3501 ugeth_err("%s: Cannot configure net device, aborting.", 3502 dev->name); 3503 goto err; 3504 } 3505 3506 /* Set MACSTNADDR1, MACSTNADDR2 */ 3507 /* For more details see the hardware spec. */ 3508 init_mac_station_addr_regs(dev->dev_addr[0], 3509 dev->dev_addr[1], 3510 dev->dev_addr[2], 3511 dev->dev_addr[3], 3512 dev->dev_addr[4], 3513 dev->dev_addr[5], 3514 &ugeth->ug_regs->macstnaddr1, 3515 &ugeth->ug_regs->macstnaddr2); 3516 3517 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3518 if (err) { 3519 if (netif_msg_ifup(ugeth)) 3520 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3521 goto err; 3522 } 3523 3524 return 0; 3525 err: 3526 ucc_geth_stop(ugeth); 3527 return err; 3528 } 3529 3530 /* Called when something needs to use the ethernet device */ 3531 /* Returns 0 for success. */ 3532 static int ucc_geth_open(struct net_device *dev) 3533 { 3534 struct ucc_geth_private *ugeth = netdev_priv(dev); 3535 int err; 3536 3537 ugeth_vdbg("%s: IN", __func__); 3538 3539 /* Test station address */ 3540 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3541 if (netif_msg_ifup(ugeth)) 3542 ugeth_err("%s: Multicast address used for station " 3543 "address - is this what you wanted?", 3544 __func__); 3545 return -EINVAL; 3546 } 3547 3548 err = init_phy(dev); 3549 if (err) { 3550 if (netif_msg_ifup(ugeth)) 3551 ugeth_err("%s: Cannot initialize PHY, aborting.", 3552 dev->name); 3553 return err; 3554 } 3555 3556 err = ucc_geth_init_mac(ugeth); 3557 if (err) { 3558 if (netif_msg_ifup(ugeth)) 3559 ugeth_err("%s: Cannot initialize MAC, aborting.", 3560 dev->name); 3561 goto err; 3562 } 3563 3564 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 3565 0, "UCC Geth", dev); 3566 if (err) { 3567 if (netif_msg_ifup(ugeth)) 3568 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3569 dev->name); 3570 goto err; 3571 } 3572 3573 phy_start(ugeth->phydev); 3574 napi_enable(&ugeth->napi); 3575 netif_start_queue(dev); 3576 3577 device_set_wakeup_capable(&dev->dev, 3578 qe_alive_during_sleep() || ugeth->phydev->irq); 3579 device_set_wakeup_enable(&dev->dev, ugeth->wol_en); 3580 3581 return err; 3582 3583 err: 3584 ucc_geth_stop(ugeth); 3585 return err; 3586 } 3587 3588 /* Stops the kernel queue, and halts the controller */ 3589 static int ucc_geth_close(struct net_device *dev) 3590 { 3591 struct ucc_geth_private *ugeth = netdev_priv(dev); 3592 3593 ugeth_vdbg("%s: IN", __func__); 3594 3595 napi_disable(&ugeth->napi); 3596 3597 cancel_work_sync(&ugeth->timeout_work); 3598 ucc_geth_stop(ugeth); 3599 phy_disconnect(ugeth->phydev); 3600 ugeth->phydev = NULL; 3601 3602 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3603 3604 netif_stop_queue(dev); 3605 3606 return 0; 3607 } 3608 3609 /* Reopen device. This will reset the MAC and PHY. */ 3610 static void ucc_geth_timeout_work(struct work_struct *work) 3611 { 3612 struct ucc_geth_private *ugeth; 3613 struct net_device *dev; 3614 3615 ugeth = container_of(work, struct ucc_geth_private, timeout_work); 3616 dev = ugeth->ndev; 3617 3618 ugeth_vdbg("%s: IN", __func__); 3619 3620 dev->stats.tx_errors++; 3621 3622 ugeth_dump_regs(ugeth); 3623 3624 if (dev->flags & IFF_UP) { 3625 /* 3626 * Must reset MAC *and* PHY. This is done by reopening 3627 * the device. 3628 */ 3629 netif_tx_stop_all_queues(dev); 3630 ucc_geth_stop(ugeth); 3631 ucc_geth_init_mac(ugeth); 3632 /* Must start PHY here */ 3633 phy_start(ugeth->phydev); 3634 netif_tx_start_all_queues(dev); 3635 } 3636 3637 netif_tx_schedule_all(dev); 3638 } 3639 3640 /* 3641 * ucc_geth_timeout gets called when a packet has not been 3642 * transmitted after a set amount of time. 3643 */ 3644 static void ucc_geth_timeout(struct net_device *dev) 3645 { 3646 struct ucc_geth_private *ugeth = netdev_priv(dev); 3647 3648 schedule_work(&ugeth->timeout_work); 3649 } 3650 3651 3652 #ifdef CONFIG_PM 3653 3654 static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) 3655 { 3656 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3657 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3658 3659 if (!netif_running(ndev)) 3660 return 0; 3661 3662 netif_device_detach(ndev); 3663 napi_disable(&ugeth->napi); 3664 3665 /* 3666 * Disable the controller, otherwise we'll wakeup on any network 3667 * activity. 3668 */ 3669 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 3670 3671 if (ugeth->wol_en & WAKE_MAGIC) { 3672 setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); 3673 setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); 3674 ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); 3675 } else if (!(ugeth->wol_en & WAKE_PHY)) { 3676 phy_stop(ugeth->phydev); 3677 } 3678 3679 return 0; 3680 } 3681 3682 static int ucc_geth_resume(struct platform_device *ofdev) 3683 { 3684 struct net_device *ndev = dev_get_drvdata(&ofdev->dev); 3685 struct ucc_geth_private *ugeth = netdev_priv(ndev); 3686 int err; 3687 3688 if (!netif_running(ndev)) 3689 return 0; 3690 3691 if (qe_alive_during_sleep()) { 3692 if (ugeth->wol_en & WAKE_MAGIC) { 3693 ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); 3694 clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); 3695 clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); 3696 } 3697 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3698 } else { 3699 /* 3700 * Full reinitialization is required if QE shuts down 3701 * during sleep. 3702 */ 3703 ucc_geth_memclean(ugeth); 3704 3705 err = ucc_geth_init_mac(ugeth); 3706 if (err) { 3707 ugeth_err("%s: Cannot initialize MAC, aborting.", 3708 ndev->name); 3709 return err; 3710 } 3711 } 3712 3713 ugeth->oldlink = 0; 3714 ugeth->oldspeed = 0; 3715 ugeth->oldduplex = -1; 3716 3717 phy_stop(ugeth->phydev); 3718 phy_start(ugeth->phydev); 3719 3720 napi_enable(&ugeth->napi); 3721 netif_device_attach(ndev); 3722 3723 return 0; 3724 } 3725 3726 #else 3727 #define ucc_geth_suspend NULL 3728 #define ucc_geth_resume NULL 3729 #endif 3730 3731 static phy_interface_t to_phy_interface(const char *phy_connection_type) 3732 { 3733 if (strcasecmp(phy_connection_type, "mii") == 0) 3734 return PHY_INTERFACE_MODE_MII; 3735 if (strcasecmp(phy_connection_type, "gmii") == 0) 3736 return PHY_INTERFACE_MODE_GMII; 3737 if (strcasecmp(phy_connection_type, "tbi") == 0) 3738 return PHY_INTERFACE_MODE_TBI; 3739 if (strcasecmp(phy_connection_type, "rmii") == 0) 3740 return PHY_INTERFACE_MODE_RMII; 3741 if (strcasecmp(phy_connection_type, "rgmii") == 0) 3742 return PHY_INTERFACE_MODE_RGMII; 3743 if (strcasecmp(phy_connection_type, "rgmii-id") == 0) 3744 return PHY_INTERFACE_MODE_RGMII_ID; 3745 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) 3746 return PHY_INTERFACE_MODE_RGMII_TXID; 3747 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) 3748 return PHY_INTERFACE_MODE_RGMII_RXID; 3749 if (strcasecmp(phy_connection_type, "rtbi") == 0) 3750 return PHY_INTERFACE_MODE_RTBI; 3751 if (strcasecmp(phy_connection_type, "sgmii") == 0) 3752 return PHY_INTERFACE_MODE_SGMII; 3753 3754 return PHY_INTERFACE_MODE_MII; 3755 } 3756 3757 static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3758 { 3759 struct ucc_geth_private *ugeth = netdev_priv(dev); 3760 3761 if (!netif_running(dev)) 3762 return -EINVAL; 3763 3764 if (!ugeth->phydev) 3765 return -ENODEV; 3766 3767 return phy_mii_ioctl(ugeth->phydev, rq, cmd); 3768 } 3769 3770 static const struct net_device_ops ucc_geth_netdev_ops = { 3771 .ndo_open = ucc_geth_open, 3772 .ndo_stop = ucc_geth_close, 3773 .ndo_start_xmit = ucc_geth_start_xmit, 3774 .ndo_validate_addr = eth_validate_addr, 3775 .ndo_set_mac_address = ucc_geth_set_mac_addr, 3776 .ndo_change_mtu = eth_change_mtu, 3777 .ndo_set_rx_mode = ucc_geth_set_multi, 3778 .ndo_tx_timeout = ucc_geth_timeout, 3779 .ndo_do_ioctl = ucc_geth_ioctl, 3780 #ifdef CONFIG_NET_POLL_CONTROLLER 3781 .ndo_poll_controller = ucc_netpoll, 3782 #endif 3783 }; 3784 3785 static int ucc_geth_probe(struct platform_device* ofdev) 3786 { 3787 struct device *device = &ofdev->dev; 3788 struct device_node *np = ofdev->dev.of_node; 3789 struct net_device *dev = NULL; 3790 struct ucc_geth_private *ugeth = NULL; 3791 struct ucc_geth_info *ug_info; 3792 struct resource res; 3793 int err, ucc_num, max_speed = 0; 3794 const unsigned int *prop; 3795 const char *sprop; 3796 const void *mac_addr; 3797 phy_interface_t phy_interface; 3798 static const int enet_to_speed[] = { 3799 SPEED_10, SPEED_10, SPEED_10, 3800 SPEED_100, SPEED_100, SPEED_100, 3801 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, 3802 }; 3803 static const phy_interface_t enet_to_phy_interface[] = { 3804 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, 3805 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, 3806 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, 3807 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, 3808 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3809 PHY_INTERFACE_MODE_SGMII, 3810 }; 3811 3812 ugeth_vdbg("%s: IN", __func__); 3813 3814 prop = of_get_property(np, "cell-index", NULL); 3815 if (!prop) { 3816 prop = of_get_property(np, "device-id", NULL); 3817 if (!prop) 3818 return -ENODEV; 3819 } 3820 3821 ucc_num = *prop - 1; 3822 if ((ucc_num < 0) || (ucc_num > 7)) 3823 return -ENODEV; 3824 3825 ug_info = &ugeth_info[ucc_num]; 3826 if (ug_info == NULL) { 3827 if (netif_msg_probe(&debug)) 3828 ugeth_err("%s: [%d] Missing additional data!", 3829 __func__, ucc_num); 3830 return -ENODEV; 3831 } 3832 3833 ug_info->uf_info.ucc_num = ucc_num; 3834 3835 sprop = of_get_property(np, "rx-clock-name", NULL); 3836 if (sprop) { 3837 ug_info->uf_info.rx_clock = qe_clock_source(sprop); 3838 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || 3839 (ug_info->uf_info.rx_clock > QE_CLK24)) { 3840 printk(KERN_ERR 3841 "ucc_geth: invalid rx-clock-name property\n"); 3842 return -EINVAL; 3843 } 3844 } else { 3845 prop = of_get_property(np, "rx-clock", NULL); 3846 if (!prop) { 3847 /* If both rx-clock-name and rx-clock are missing, 3848 we want to tell people to use rx-clock-name. */ 3849 printk(KERN_ERR 3850 "ucc_geth: missing rx-clock-name property\n"); 3851 return -EINVAL; 3852 } 3853 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3854 printk(KERN_ERR 3855 "ucc_geth: invalid rx-clock propperty\n"); 3856 return -EINVAL; 3857 } 3858 ug_info->uf_info.rx_clock = *prop; 3859 } 3860 3861 sprop = of_get_property(np, "tx-clock-name", NULL); 3862 if (sprop) { 3863 ug_info->uf_info.tx_clock = qe_clock_source(sprop); 3864 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || 3865 (ug_info->uf_info.tx_clock > QE_CLK24)) { 3866 printk(KERN_ERR 3867 "ucc_geth: invalid tx-clock-name property\n"); 3868 return -EINVAL; 3869 } 3870 } else { 3871 prop = of_get_property(np, "tx-clock", NULL); 3872 if (!prop) { 3873 printk(KERN_ERR 3874 "ucc_geth: missing tx-clock-name property\n"); 3875 return -EINVAL; 3876 } 3877 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3878 printk(KERN_ERR 3879 "ucc_geth: invalid tx-clock property\n"); 3880 return -EINVAL; 3881 } 3882 ug_info->uf_info.tx_clock = *prop; 3883 } 3884 3885 err = of_address_to_resource(np, 0, &res); 3886 if (err) 3887 return -EINVAL; 3888 3889 ug_info->uf_info.regs = res.start; 3890 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); 3891 3892 ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); 3893 3894 /* Find the TBI PHY node. If it's not there, we don't support SGMII */ 3895 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 3896 3897 /* get the phy interface type, or default to MII */ 3898 prop = of_get_property(np, "phy-connection-type", NULL); 3899 if (!prop) { 3900 /* handle interface property present in old trees */ 3901 prop = of_get_property(ug_info->phy_node, "interface", NULL); 3902 if (prop != NULL) { 3903 phy_interface = enet_to_phy_interface[*prop]; 3904 max_speed = enet_to_speed[*prop]; 3905 } else 3906 phy_interface = PHY_INTERFACE_MODE_MII; 3907 } else { 3908 phy_interface = to_phy_interface((const char *)prop); 3909 } 3910 3911 /* get speed, or derive from PHY interface */ 3912 if (max_speed == 0) 3913 switch (phy_interface) { 3914 case PHY_INTERFACE_MODE_GMII: 3915 case PHY_INTERFACE_MODE_RGMII: 3916 case PHY_INTERFACE_MODE_RGMII_ID: 3917 case PHY_INTERFACE_MODE_RGMII_RXID: 3918 case PHY_INTERFACE_MODE_RGMII_TXID: 3919 case PHY_INTERFACE_MODE_TBI: 3920 case PHY_INTERFACE_MODE_RTBI: 3921 case PHY_INTERFACE_MODE_SGMII: 3922 max_speed = SPEED_1000; 3923 break; 3924 default: 3925 max_speed = SPEED_100; 3926 break; 3927 } 3928 3929 if (max_speed == SPEED_1000) { 3930 unsigned int snums = qe_get_num_of_snums(); 3931 3932 /* configure muram FIFOs for gigabit operation */ 3933 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; 3934 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; 3935 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; 3936 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; 3937 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; 3938 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; 3939 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; 3940 3941 /* If QE's snum number is 46/76 which means we need to support 3942 * 4 UECs at 1000Base-T simultaneously, we need to allocate 3943 * more Threads to Rx. 3944 */ 3945 if ((snums == 76) || (snums == 46)) 3946 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; 3947 else 3948 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; 3949 } 3950 3951 if (netif_msg_probe(&debug)) 3952 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n", 3953 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3954 ug_info->uf_info.irq); 3955 3956 /* Create an ethernet device instance */ 3957 dev = alloc_etherdev(sizeof(*ugeth)); 3958 3959 if (dev == NULL) 3960 return -ENOMEM; 3961 3962 ugeth = netdev_priv(dev); 3963 spin_lock_init(&ugeth->lock); 3964 3965 /* Create CQs for hash tables */ 3966 INIT_LIST_HEAD(&ugeth->group_hash_q); 3967 INIT_LIST_HEAD(&ugeth->ind_hash_q); 3968 3969 dev_set_drvdata(device, dev); 3970 3971 /* Set the dev->base_addr to the gfar reg region */ 3972 dev->base_addr = (unsigned long)(ug_info->uf_info.regs); 3973 3974 SET_NETDEV_DEV(dev, device); 3975 3976 /* Fill in the dev structure */ 3977 uec_set_ethtool_ops(dev); 3978 dev->netdev_ops = &ucc_geth_netdev_ops; 3979 dev->watchdog_timeo = TX_TIMEOUT; 3980 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); 3981 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); 3982 dev->mtu = 1500; 3983 3984 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); 3985 ugeth->phy_interface = phy_interface; 3986 ugeth->max_speed = max_speed; 3987 3988 err = register_netdev(dev); 3989 if (err) { 3990 if (netif_msg_probe(ugeth)) 3991 ugeth_err("%s: Cannot register net device, aborting.", 3992 dev->name); 3993 free_netdev(dev); 3994 return err; 3995 } 3996 3997 mac_addr = of_get_mac_address(np); 3998 if (mac_addr) 3999 memcpy(dev->dev_addr, mac_addr, 6); 4000 4001 ugeth->ug_info = ug_info; 4002 ugeth->dev = device; 4003 ugeth->ndev = dev; 4004 ugeth->node = np; 4005 4006 return 0; 4007 } 4008 4009 static int ucc_geth_remove(struct platform_device* ofdev) 4010 { 4011 struct device *device = &ofdev->dev; 4012 struct net_device *dev = dev_get_drvdata(device); 4013 struct ucc_geth_private *ugeth = netdev_priv(dev); 4014 4015 unregister_netdev(dev); 4016 free_netdev(dev); 4017 ucc_geth_memclean(ugeth); 4018 dev_set_drvdata(device, NULL); 4019 4020 return 0; 4021 } 4022 4023 static struct of_device_id ucc_geth_match[] = { 4024 { 4025 .type = "network", 4026 .compatible = "ucc_geth", 4027 }, 4028 {}, 4029 }; 4030 4031 MODULE_DEVICE_TABLE(of, ucc_geth_match); 4032 4033 static struct platform_driver ucc_geth_driver = { 4034 .driver = { 4035 .name = DRV_NAME, 4036 .owner = THIS_MODULE, 4037 .of_match_table = ucc_geth_match, 4038 }, 4039 .probe = ucc_geth_probe, 4040 .remove = ucc_geth_remove, 4041 .suspend = ucc_geth_suspend, 4042 .resume = ucc_geth_resume, 4043 }; 4044 4045 static int __init ucc_geth_init(void) 4046 { 4047 int i, ret; 4048 4049 if (netif_msg_drv(&debug)) 4050 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 4051 for (i = 0; i < 8; i++) 4052 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 4053 sizeof(ugeth_primary_info)); 4054 4055 ret = platform_driver_register(&ucc_geth_driver); 4056 4057 return ret; 4058 } 4059 4060 static void __exit ucc_geth_exit(void) 4061 { 4062 platform_driver_unregister(&ucc_geth_driver); 4063 } 4064 4065 module_init(ucc_geth_init); 4066 module_exit(ucc_geth_exit); 4067 4068 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 4069 MODULE_DESCRIPTION(DRV_DESC); 4070 MODULE_VERSION(DRV_VERSION); 4071 MODULE_LICENSE("GPL"); 4072