1 /* 2 * Driver for (BCM4706)? GBit MAC core on BCMA bus. 3 * 4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 * 6 * Licensed under the GNU/GPL. See COPYING for details. 7 */ 8 9 #include "bgmac.h" 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/delay.h> 14 #include <linux/etherdevice.h> 15 #include <linux/mii.h> 16 #include <linux/phy.h> 17 #include <linux/interrupt.h> 18 #include <linux/dma-mapping.h> 19 #include <bcm47xx_nvram.h> 20 21 static const struct bcma_device_id bgmac_bcma_tbl[] = { 22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 24 BCMA_CORETABLE_END 25 }; 26 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); 27 28 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, 29 u32 value, int timeout) 30 { 31 u32 val; 32 int i; 33 34 for (i = 0; i < timeout / 10; i++) { 35 val = bcma_read32(core, reg); 36 if ((val & mask) == value) 37 return true; 38 udelay(10); 39 } 40 pr_err("Timeout waiting for reg 0x%X\n", reg); 41 return false; 42 } 43 44 /************************************************** 45 * DMA 46 **************************************************/ 47 48 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 49 { 50 u32 val; 51 int i; 52 53 if (!ring->mmio_base) 54 return; 55 56 /* Suspend DMA TX ring first. 57 * bgmac_wait_value doesn't support waiting for any of few values, so 58 * implement whole loop here. 59 */ 60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 61 BGMAC_DMA_TX_SUSPEND); 62 for (i = 0; i < 10000 / 10; i++) { 63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 64 val &= BGMAC_DMA_TX_STAT; 65 if (val == BGMAC_DMA_TX_STAT_DISABLED || 66 val == BGMAC_DMA_TX_STAT_IDLEWAIT || 67 val == BGMAC_DMA_TX_STAT_STOPPED) { 68 i = 0; 69 break; 70 } 71 udelay(10); 72 } 73 if (i) 74 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", 75 ring->mmio_base, val); 76 77 /* Remove SUSPEND bit */ 78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); 79 if (!bgmac_wait_value(bgmac->core, 80 ring->mmio_base + BGMAC_DMA_TX_STATUS, 81 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, 82 10000)) { 83 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", 84 ring->mmio_base); 85 udelay(300); 86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 87 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) 88 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", 89 ring->mmio_base); 90 } 91 } 92 93 static void bgmac_dma_tx_enable(struct bgmac *bgmac, 94 struct bgmac_dma_ring *ring) 95 { 96 u32 ctl; 97 98 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); 99 ctl |= BGMAC_DMA_TX_ENABLE; 100 ctl |= BGMAC_DMA_TX_PARITY_DISABLE; 101 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); 102 } 103 104 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, 105 struct bgmac_dma_ring *ring, 106 struct sk_buff *skb) 107 { 108 struct device *dma_dev = bgmac->core->dma_dev; 109 struct net_device *net_dev = bgmac->net_dev; 110 struct bgmac_dma_desc *dma_desc; 111 struct bgmac_slot_info *slot; 112 u32 ctl0, ctl1; 113 int free_slots; 114 115 if (skb->len > BGMAC_DESC_CTL1_LEN) { 116 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); 117 goto err_stop_drop; 118 } 119 120 if (ring->start <= ring->end) 121 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS; 122 else 123 free_slots = ring->start - ring->end; 124 if (free_slots == 1) { 125 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); 126 netif_stop_queue(net_dev); 127 return NETDEV_TX_BUSY; 128 } 129 130 slot = &ring->slots[ring->end]; 131 slot->skb = skb; 132 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len, 133 DMA_TO_DEVICE); 134 if (dma_mapping_error(dma_dev, slot->dma_addr)) { 135 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", 136 ring->mmio_base); 137 goto err_stop_drop; 138 } 139 140 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF; 141 if (ring->end == ring->num_slots - 1) 142 ctl0 |= BGMAC_DESC_CTL0_EOT; 143 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN; 144 145 dma_desc = ring->cpu_base; 146 dma_desc += ring->end; 147 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); 148 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); 149 dma_desc->ctl0 = cpu_to_le32(ctl0); 150 dma_desc->ctl1 = cpu_to_le32(ctl1); 151 152 netdev_sent_queue(net_dev, skb->len); 153 154 wmb(); 155 156 /* Increase ring->end to point empty slot. We tell hardware the first 157 * slot it should *not* read. 158 */ 159 if (++ring->end >= BGMAC_TX_RING_SLOTS) 160 ring->end = 0; 161 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 162 ring->index_base + 163 ring->end * sizeof(struct bgmac_dma_desc)); 164 165 /* Always keep one slot free to allow detecting bugged calls. */ 166 if (--free_slots == 1) 167 netif_stop_queue(net_dev); 168 169 return NETDEV_TX_OK; 170 171 err_stop_drop: 172 netif_stop_queue(net_dev); 173 dev_kfree_skb(skb); 174 return NETDEV_TX_OK; 175 } 176 177 /* Free transmitted packets */ 178 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 179 { 180 struct device *dma_dev = bgmac->core->dma_dev; 181 int empty_slot; 182 bool freed = false; 183 unsigned bytes_compl = 0, pkts_compl = 0; 184 185 /* The last slot that hardware didn't consume yet */ 186 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 187 empty_slot &= BGMAC_DMA_TX_STATDPTR; 188 empty_slot -= ring->index_base; 189 empty_slot &= BGMAC_DMA_TX_STATDPTR; 190 empty_slot /= sizeof(struct bgmac_dma_desc); 191 192 while (ring->start != empty_slot) { 193 struct bgmac_slot_info *slot = &ring->slots[ring->start]; 194 195 if (slot->skb) { 196 /* Unmap no longer used buffer */ 197 dma_unmap_single(dma_dev, slot->dma_addr, 198 slot->skb->len, DMA_TO_DEVICE); 199 slot->dma_addr = 0; 200 201 bytes_compl += slot->skb->len; 202 pkts_compl++; 203 204 /* Free memory! :) */ 205 dev_kfree_skb(slot->skb); 206 slot->skb = NULL; 207 } else { 208 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", 209 ring->start, ring->end); 210 } 211 212 if (++ring->start >= BGMAC_TX_RING_SLOTS) 213 ring->start = 0; 214 freed = true; 215 } 216 217 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); 218 219 if (freed && netif_queue_stopped(bgmac->net_dev)) 220 netif_wake_queue(bgmac->net_dev); 221 } 222 223 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 224 { 225 if (!ring->mmio_base) 226 return; 227 228 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); 229 if (!bgmac_wait_value(bgmac->core, 230 ring->mmio_base + BGMAC_DMA_RX_STATUS, 231 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, 232 10000)) 233 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", 234 ring->mmio_base); 235 } 236 237 static void bgmac_dma_rx_enable(struct bgmac *bgmac, 238 struct bgmac_dma_ring *ring) 239 { 240 u32 ctl; 241 242 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); 243 ctl &= BGMAC_DMA_RX_ADDREXT_MASK; 244 ctl |= BGMAC_DMA_RX_ENABLE; 245 ctl |= BGMAC_DMA_RX_PARITY_DISABLE; 246 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; 247 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; 248 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); 249 } 250 251 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, 252 struct bgmac_slot_info *slot) 253 { 254 struct device *dma_dev = bgmac->core->dma_dev; 255 struct sk_buff *skb; 256 dma_addr_t dma_addr; 257 struct bgmac_rx_header *rx; 258 259 /* Alloc skb */ 260 skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); 261 if (!skb) 262 return -ENOMEM; 263 264 /* Poison - if everything goes fine, hardware will overwrite it */ 265 rx = (struct bgmac_rx_header *)skb->data; 266 rx->len = cpu_to_le16(0xdead); 267 rx->flags = cpu_to_le16(0xbeef); 268 269 /* Map skb for the DMA */ 270 dma_addr = dma_map_single(dma_dev, skb->data, 271 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 272 if (dma_mapping_error(dma_dev, dma_addr)) { 273 bgmac_err(bgmac, "DMA mapping error\n"); 274 dev_kfree_skb(skb); 275 return -ENOMEM; 276 } 277 278 /* Update the slot */ 279 slot->skb = skb; 280 slot->dma_addr = dma_addr; 281 282 if (slot->dma_addr & 0xC0000000) 283 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 284 285 return 0; 286 } 287 288 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, 289 struct bgmac_dma_ring *ring, int desc_idx) 290 { 291 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; 292 u32 ctl0 = 0, ctl1 = 0; 293 294 if (desc_idx == ring->num_slots - 1) 295 ctl0 |= BGMAC_DESC_CTL0_EOT; 296 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; 297 /* Is there any BGMAC device that requires extension? */ 298 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & 299 * B43_DMA64_DCTL1_ADDREXT_MASK; 300 */ 301 302 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); 303 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); 304 dma_desc->ctl0 = cpu_to_le32(ctl0); 305 dma_desc->ctl1 = cpu_to_le32(ctl1); 306 } 307 308 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, 309 int weight) 310 { 311 u32 end_slot; 312 int handled = 0; 313 314 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 315 end_slot &= BGMAC_DMA_RX_STATDPTR; 316 end_slot -= ring->index_base; 317 end_slot &= BGMAC_DMA_RX_STATDPTR; 318 end_slot /= sizeof(struct bgmac_dma_desc); 319 320 ring->end = end_slot; 321 322 while (ring->start != ring->end) { 323 struct device *dma_dev = bgmac->core->dma_dev; 324 struct bgmac_slot_info *slot = &ring->slots[ring->start]; 325 struct sk_buff *skb = slot->skb; 326 struct bgmac_rx_header *rx; 327 u16 len, flags; 328 329 /* Unmap buffer to make it accessible to the CPU */ 330 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, 331 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 332 333 /* Get info from the header */ 334 rx = (struct bgmac_rx_header *)skb->data; 335 len = le16_to_cpu(rx->len); 336 flags = le16_to_cpu(rx->flags); 337 338 do { 339 dma_addr_t old_dma_addr = slot->dma_addr; 340 int err; 341 342 /* Check for poison and drop or pass the packet */ 343 if (len == 0xdead && flags == 0xbeef) { 344 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", 345 ring->start); 346 dma_sync_single_for_device(dma_dev, 347 slot->dma_addr, 348 BGMAC_RX_BUF_SIZE, 349 DMA_FROM_DEVICE); 350 break; 351 } 352 353 /* Omit CRC. */ 354 len -= ETH_FCS_LEN; 355 356 /* Prepare new skb as replacement */ 357 err = bgmac_dma_rx_skb_for_slot(bgmac, slot); 358 if (err) { 359 /* Poison the old skb */ 360 rx->len = cpu_to_le16(0xdead); 361 rx->flags = cpu_to_le16(0xbeef); 362 363 dma_sync_single_for_device(dma_dev, 364 slot->dma_addr, 365 BGMAC_RX_BUF_SIZE, 366 DMA_FROM_DEVICE); 367 break; 368 } 369 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); 370 371 /* Unmap old skb, we'll pass it to the netfif */ 372 dma_unmap_single(dma_dev, old_dma_addr, 373 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 374 375 skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); 376 skb_pull(skb, BGMAC_RX_FRAME_OFFSET); 377 378 skb_checksum_none_assert(skb); 379 skb->protocol = eth_type_trans(skb, bgmac->net_dev); 380 netif_receive_skb(skb); 381 handled++; 382 } while (0); 383 384 if (++ring->start >= BGMAC_RX_RING_SLOTS) 385 ring->start = 0; 386 387 if (handled >= weight) /* Should never be greater */ 388 break; 389 } 390 391 return handled; 392 } 393 394 /* Does ring support unaligned addressing? */ 395 static bool bgmac_dma_unaligned(struct bgmac *bgmac, 396 struct bgmac_dma_ring *ring, 397 enum bgmac_dma_ring_type ring_type) 398 { 399 switch (ring_type) { 400 case BGMAC_DMA_RING_TX: 401 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 402 0xff0); 403 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO)) 404 return true; 405 break; 406 case BGMAC_DMA_RING_RX: 407 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 408 0xff0); 409 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) 410 return true; 411 break; 412 } 413 return false; 414 } 415 416 static void bgmac_dma_ring_free(struct bgmac *bgmac, 417 struct bgmac_dma_ring *ring) 418 { 419 struct device *dma_dev = bgmac->core->dma_dev; 420 struct bgmac_slot_info *slot; 421 int size; 422 int i; 423 424 for (i = 0; i < ring->num_slots; i++) { 425 slot = &ring->slots[i]; 426 if (slot->skb) { 427 if (slot->dma_addr) 428 dma_unmap_single(dma_dev, slot->dma_addr, 429 slot->skb->len, DMA_TO_DEVICE); 430 dev_kfree_skb(slot->skb); 431 } 432 } 433 434 if (ring->cpu_base) { 435 /* Free ring of descriptors */ 436 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 437 dma_free_coherent(dma_dev, size, ring->cpu_base, 438 ring->dma_base); 439 } 440 } 441 442 static void bgmac_dma_free(struct bgmac *bgmac) 443 { 444 int i; 445 446 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 447 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]); 448 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 449 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]); 450 } 451 452 static int bgmac_dma_alloc(struct bgmac *bgmac) 453 { 454 struct device *dma_dev = bgmac->core->dma_dev; 455 struct bgmac_dma_ring *ring; 456 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, 457 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; 458 int size; /* ring size: different for Tx and Rx */ 459 int err; 460 int i; 461 462 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); 463 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); 464 465 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { 466 bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); 467 return -ENOTSUPP; 468 } 469 470 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 471 ring = &bgmac->tx_ring[i]; 472 ring->num_slots = BGMAC_TX_RING_SLOTS; 473 ring->mmio_base = ring_base[i]; 474 475 /* Alloc ring of descriptors */ 476 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 477 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 478 &ring->dma_base, 479 GFP_KERNEL); 480 if (!ring->cpu_base) { 481 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", 482 ring->mmio_base); 483 goto err_dma_free; 484 } 485 if (ring->dma_base & 0xC0000000) 486 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 487 488 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 489 BGMAC_DMA_RING_TX); 490 if (ring->unaligned) 491 ring->index_base = lower_32_bits(ring->dma_base); 492 else 493 ring->index_base = 0; 494 495 /* No need to alloc TX slots yet */ 496 } 497 498 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 499 int j; 500 501 ring = &bgmac->rx_ring[i]; 502 ring->num_slots = BGMAC_RX_RING_SLOTS; 503 ring->mmio_base = ring_base[i]; 504 505 /* Alloc ring of descriptors */ 506 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 507 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 508 &ring->dma_base, 509 GFP_KERNEL); 510 if (!ring->cpu_base) { 511 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", 512 ring->mmio_base); 513 err = -ENOMEM; 514 goto err_dma_free; 515 } 516 if (ring->dma_base & 0xC0000000) 517 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 518 519 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 520 BGMAC_DMA_RING_RX); 521 if (ring->unaligned) 522 ring->index_base = lower_32_bits(ring->dma_base); 523 else 524 ring->index_base = 0; 525 526 /* Alloc RX slots */ 527 for (j = 0; j < ring->num_slots; j++) { 528 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); 529 if (err) { 530 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n"); 531 goto err_dma_free; 532 } 533 } 534 } 535 536 return 0; 537 538 err_dma_free: 539 bgmac_dma_free(bgmac); 540 return -ENOMEM; 541 } 542 543 static void bgmac_dma_init(struct bgmac *bgmac) 544 { 545 struct bgmac_dma_ring *ring; 546 int i; 547 548 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 549 ring = &bgmac->tx_ring[i]; 550 551 if (!ring->unaligned) 552 bgmac_dma_tx_enable(bgmac, ring); 553 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 554 lower_32_bits(ring->dma_base)); 555 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 556 upper_32_bits(ring->dma_base)); 557 if (ring->unaligned) 558 bgmac_dma_tx_enable(bgmac, ring); 559 560 ring->start = 0; 561 ring->end = 0; /* Points the slot that should *not* be read */ 562 } 563 564 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 565 int j; 566 567 ring = &bgmac->rx_ring[i]; 568 569 if (!ring->unaligned) 570 bgmac_dma_rx_enable(bgmac, ring); 571 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 572 lower_32_bits(ring->dma_base)); 573 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 574 upper_32_bits(ring->dma_base)); 575 if (ring->unaligned) 576 bgmac_dma_rx_enable(bgmac, ring); 577 578 for (j = 0; j < ring->num_slots; j++) 579 bgmac_dma_rx_setup_desc(bgmac, ring, j); 580 581 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 582 ring->index_base + 583 ring->num_slots * sizeof(struct bgmac_dma_desc)); 584 585 ring->start = 0; 586 ring->end = 0; 587 } 588 } 589 590 /************************************************** 591 * PHY ops 592 **************************************************/ 593 594 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) 595 { 596 struct bcma_device *core; 597 u16 phy_access_addr; 598 u16 phy_ctl_addr; 599 u32 tmp; 600 601 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); 602 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); 603 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); 604 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); 605 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); 606 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); 607 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); 608 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); 609 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); 610 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); 611 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); 612 613 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { 614 core = bgmac->core->bus->drv_gmac_cmn.core; 615 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; 616 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; 617 } else { 618 core = bgmac->core; 619 phy_access_addr = BGMAC_PHY_ACCESS; 620 phy_ctl_addr = BGMAC_PHY_CNTL; 621 } 622 623 tmp = bcma_read32(core, phy_ctl_addr); 624 tmp &= ~BGMAC_PC_EPA_MASK; 625 tmp |= phyaddr; 626 bcma_write32(core, phy_ctl_addr, tmp); 627 628 tmp = BGMAC_PA_START; 629 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; 630 tmp |= reg << BGMAC_PA_REG_SHIFT; 631 bcma_write32(core, phy_access_addr, tmp); 632 633 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { 634 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", 635 phyaddr, reg); 636 return 0xffff; 637 } 638 639 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; 640 } 641 642 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ 643 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) 644 { 645 struct bcma_device *core; 646 u16 phy_access_addr; 647 u16 phy_ctl_addr; 648 u32 tmp; 649 650 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { 651 core = bgmac->core->bus->drv_gmac_cmn.core; 652 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; 653 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; 654 } else { 655 core = bgmac->core; 656 phy_access_addr = BGMAC_PHY_ACCESS; 657 phy_ctl_addr = BGMAC_PHY_CNTL; 658 } 659 660 tmp = bcma_read32(core, phy_ctl_addr); 661 tmp &= ~BGMAC_PC_EPA_MASK; 662 tmp |= phyaddr; 663 bcma_write32(core, phy_ctl_addr, tmp); 664 665 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); 666 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) 667 bgmac_warn(bgmac, "Error setting MDIO int\n"); 668 669 tmp = BGMAC_PA_START; 670 tmp |= BGMAC_PA_WRITE; 671 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; 672 tmp |= reg << BGMAC_PA_REG_SHIFT; 673 tmp |= value; 674 bcma_write32(core, phy_access_addr, tmp); 675 676 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { 677 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", 678 phyaddr, reg); 679 return -ETIMEDOUT; 680 } 681 682 return 0; 683 } 684 685 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */ 686 static void bgmac_phy_force(struct bgmac *bgmac) 687 { 688 u16 ctl; 689 u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB | 690 BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX); 691 692 if (bgmac->phyaddr == BGMAC_PHY_NOREGS) 693 return; 694 695 if (bgmac->autoneg) 696 return; 697 698 ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL); 699 ctl &= mask; 700 if (bgmac->full_duplex) 701 ctl |= BGMAC_PHY_CTL_DUPLEX; 702 if (bgmac->speed == BGMAC_SPEED_100) 703 ctl |= BGMAC_PHY_CTL_SPEED_100; 704 else if (bgmac->speed == BGMAC_SPEED_1000) 705 ctl |= BGMAC_PHY_CTL_SPEED_1000; 706 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl); 707 } 708 709 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */ 710 static void bgmac_phy_advertise(struct bgmac *bgmac) 711 { 712 u16 adv; 713 714 if (bgmac->phyaddr == BGMAC_PHY_NOREGS) 715 return; 716 717 if (!bgmac->autoneg) 718 return; 719 720 /* Adv selected 10/100 speeds */ 721 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV); 722 adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL | 723 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL); 724 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10) 725 adv |= BGMAC_PHY_ADV_10HALF; 726 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100) 727 adv |= BGMAC_PHY_ADV_100HALF; 728 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10) 729 adv |= BGMAC_PHY_ADV_10FULL; 730 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100) 731 adv |= BGMAC_PHY_ADV_100FULL; 732 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv); 733 734 /* Adv selected 1000 speeds */ 735 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2); 736 adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL); 737 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000) 738 adv |= BGMAC_PHY_ADV2_1000HALF; 739 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000) 740 adv |= BGMAC_PHY_ADV2_1000FULL; 741 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv); 742 743 /* Restart */ 744 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, 745 bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) | 746 BGMAC_PHY_CTL_RESTART); 747 } 748 749 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ 750 static void bgmac_phy_init(struct bgmac *bgmac) 751 { 752 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; 753 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 754 u8 i; 755 756 if (ci->id == BCMA_CHIP_ID_BCM5356) { 757 for (i = 0; i < 5; i++) { 758 bgmac_phy_write(bgmac, i, 0x1f, 0x008b); 759 bgmac_phy_write(bgmac, i, 0x15, 0x0100); 760 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 761 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); 762 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 763 } 764 } 765 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || 766 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || 767 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { 768 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); 769 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); 770 for (i = 0; i < 5; i++) { 771 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 772 bgmac_phy_write(bgmac, i, 0x16, 0x5284); 773 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 774 bgmac_phy_write(bgmac, i, 0x17, 0x0010); 775 bgmac_phy_write(bgmac, i, 0x1f, 0x000f); 776 bgmac_phy_write(bgmac, i, 0x16, 0x5296); 777 bgmac_phy_write(bgmac, i, 0x17, 0x1073); 778 bgmac_phy_write(bgmac, i, 0x17, 0x9073); 779 bgmac_phy_write(bgmac, i, 0x16, 0x52b6); 780 bgmac_phy_write(bgmac, i, 0x17, 0x9273); 781 bgmac_phy_write(bgmac, i, 0x1f, 0x000b); 782 } 783 } 784 } 785 786 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ 787 static void bgmac_phy_reset(struct bgmac *bgmac) 788 { 789 if (bgmac->phyaddr == BGMAC_PHY_NOREGS) 790 return; 791 792 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, 793 BGMAC_PHY_CTL_RESET); 794 udelay(100); 795 if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) & 796 BGMAC_PHY_CTL_RESET) 797 bgmac_err(bgmac, "PHY reset failed\n"); 798 bgmac_phy_init(bgmac); 799 } 800 801 /************************************************** 802 * Chip ops 803 **************************************************/ 804 805 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is 806 * nothing to change? Try if after stabilizng driver. 807 */ 808 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, 809 bool force) 810 { 811 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); 812 u32 new_val = (cmdcfg & mask) | set; 813 814 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR); 815 udelay(2); 816 817 if (new_val != cmdcfg || force) 818 bgmac_write(bgmac, BGMAC_CMDCFG, new_val); 819 820 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR); 821 udelay(2); 822 } 823 824 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) 825 { 826 u32 tmp; 827 828 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 829 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp); 830 tmp = (addr[4] << 8) | addr[5]; 831 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp); 832 } 833 834 static void bgmac_set_rx_mode(struct net_device *net_dev) 835 { 836 struct bgmac *bgmac = netdev_priv(net_dev); 837 838 if (net_dev->flags & IFF_PROMISC) 839 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true); 840 else 841 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true); 842 } 843 844 #if 0 /* We don't use that regs yet */ 845 static void bgmac_chip_stats_update(struct bgmac *bgmac) 846 { 847 int i; 848 849 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { 850 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) 851 bgmac->mib_tx_regs[i] = 852 bgmac_read(bgmac, 853 BGMAC_TX_GOOD_OCTETS + (i * 4)); 854 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) 855 bgmac->mib_rx_regs[i] = 856 bgmac_read(bgmac, 857 BGMAC_RX_GOOD_OCTETS + (i * 4)); 858 } 859 860 /* TODO: what else? how to handle BCM4706? Specs are needed */ 861 } 862 #endif 863 864 static void bgmac_clear_mib(struct bgmac *bgmac) 865 { 866 int i; 867 868 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) 869 return; 870 871 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); 872 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) 873 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); 874 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) 875 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); 876 } 877 878 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ 879 static void bgmac_speed(struct bgmac *bgmac, int speed) 880 { 881 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD); 882 u32 set = 0; 883 884 if (speed & BGMAC_SPEED_10) 885 set |= BGMAC_CMDCFG_ES_10; 886 if (speed & BGMAC_SPEED_100) 887 set |= BGMAC_CMDCFG_ES_100; 888 if (speed & BGMAC_SPEED_1000) 889 set |= BGMAC_CMDCFG_ES_1000; 890 if (!bgmac->full_duplex) 891 set |= BGMAC_CMDCFG_HD; 892 bgmac_cmdcfg_maskset(bgmac, mask, set, true); 893 } 894 895 static void bgmac_miiconfig(struct bgmac *bgmac) 896 { 897 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 898 BGMAC_DS_MM_SHIFT; 899 if (imode == 0 || imode == 1) { 900 if (bgmac->autoneg) 901 bgmac_speed(bgmac, BGMAC_SPEED_100); 902 else 903 bgmac_speed(bgmac, bgmac->speed); 904 } 905 } 906 907 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ 908 static void bgmac_chip_reset(struct bgmac *bgmac) 909 { 910 struct bcma_device *core = bgmac->core; 911 struct bcma_bus *bus = core->bus; 912 struct bcma_chipinfo *ci = &bus->chipinfo; 913 u32 flags = 0; 914 u32 iost; 915 int i; 916 917 if (bcma_core_is_enabled(core)) { 918 if (!bgmac->stats_grabbed) { 919 /* bgmac_chip_stats_update(bgmac); */ 920 bgmac->stats_grabbed = true; 921 } 922 923 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 924 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); 925 926 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); 927 udelay(1); 928 929 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 930 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); 931 932 /* TODO: Clear software multicast filter list */ 933 } 934 935 iost = bcma_aread32(core, BCMA_IOST); 936 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) || 937 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || 938 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) 939 iost &= ~BGMAC_BCMA_IOST_ATTACHED; 940 941 if (iost & BGMAC_BCMA_IOST_ATTACHED) { 942 flags = BGMAC_BCMA_IOCTL_SW_CLKEN; 943 if (!bgmac->has_robosw) 944 flags |= BGMAC_BCMA_IOCTL_SW_RESET; 945 } 946 947 bcma_core_enable(core, flags); 948 949 if (core->id.rev > 2) { 950 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8); 951 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24, 952 1000); 953 } 954 955 if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 || 956 ci->id == BCMA_CHIP_ID_BCM53572) { 957 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 958 u8 et_swtype = 0; 959 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | 960 BGMAC_CHIPCTL_1_IF_TYPE_MII; 961 char buf[4]; 962 963 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { 964 if (kstrtou8(buf, 0, &et_swtype)) 965 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", 966 buf); 967 et_swtype &= 0x0f; 968 et_swtype <<= 4; 969 sw_type = et_swtype; 970 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) { 971 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; 972 } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) || 973 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) { 974 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | 975 BGMAC_CHIPCTL_1_SW_TYPE_RGMII; 976 } 977 bcma_chipco_chipctl_maskset(cc, 1, 978 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | 979 BGMAC_CHIPCTL_1_SW_TYPE_MASK), 980 sw_type); 981 } 982 983 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) 984 bcma_awrite32(core, BCMA_IOCTL, 985 bcma_aread32(core, BCMA_IOCTL) & 986 ~BGMAC_BCMA_IOCTL_SW_RESET); 987 988 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset 989 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine 990 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to 991 * be keps until taking MAC out of the reset. 992 */ 993 bgmac_cmdcfg_maskset(bgmac, 994 ~(BGMAC_CMDCFG_TE | 995 BGMAC_CMDCFG_RE | 996 BGMAC_CMDCFG_RPI | 997 BGMAC_CMDCFG_TAI | 998 BGMAC_CMDCFG_HD | 999 BGMAC_CMDCFG_ML | 1000 BGMAC_CMDCFG_CFE | 1001 BGMAC_CMDCFG_RL | 1002 BGMAC_CMDCFG_RED | 1003 BGMAC_CMDCFG_PE | 1004 BGMAC_CMDCFG_TPI | 1005 BGMAC_CMDCFG_PAD_EN | 1006 BGMAC_CMDCFG_PF), 1007 BGMAC_CMDCFG_PROM | 1008 BGMAC_CMDCFG_NLC | 1009 BGMAC_CMDCFG_CFE | 1010 BGMAC_CMDCFG_SR, 1011 false); 1012 1013 bgmac_clear_mib(bgmac); 1014 if (core->id.id == BCMA_CORE_4706_MAC_GBIT) 1015 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, 1016 BCMA_GMAC_CMN_PC_MTE); 1017 else 1018 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); 1019 bgmac_miiconfig(bgmac); 1020 bgmac_phy_init(bgmac); 1021 1022 netdev_reset_queue(bgmac->net_dev); 1023 1024 bgmac->int_status = 0; 1025 } 1026 1027 static void bgmac_chip_intrs_on(struct bgmac *bgmac) 1028 { 1029 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask); 1030 } 1031 1032 static void bgmac_chip_intrs_off(struct bgmac *bgmac) 1033 { 1034 bgmac_write(bgmac, BGMAC_INT_MASK, 0); 1035 bgmac_read(bgmac, BGMAC_INT_MASK); 1036 } 1037 1038 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ 1039 static void bgmac_enable(struct bgmac *bgmac) 1040 { 1041 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; 1042 u32 cmdcfg; 1043 u32 mode; 1044 u32 rxq_ctl; 1045 u32 fl_ctl; 1046 u16 bp_clk; 1047 u8 mdp; 1048 1049 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); 1050 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), 1051 BGMAC_CMDCFG_SR, true); 1052 udelay(2); 1053 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; 1054 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); 1055 1056 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 1057 BGMAC_DS_MM_SHIFT; 1058 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) 1059 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 1060 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) 1061 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, 1062 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); 1063 1064 switch (ci->id) { 1065 case BCMA_CHIP_ID_BCM5357: 1066 case BCMA_CHIP_ID_BCM4749: 1067 case BCMA_CHIP_ID_BCM53572: 1068 case BCMA_CHIP_ID_BCM4716: 1069 case BCMA_CHIP_ID_BCM47162: 1070 fl_ctl = 0x03cb04cb; 1071 if (ci->id == BCMA_CHIP_ID_BCM5357 || 1072 ci->id == BCMA_CHIP_ID_BCM4749 || 1073 ci->id == BCMA_CHIP_ID_BCM53572) 1074 fl_ctl = 0x2300e1; 1075 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); 1076 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); 1077 break; 1078 } 1079 1080 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); 1081 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; 1082 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000; 1083 mdp = (bp_clk * 128 / 1000) - 3; 1084 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); 1085 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); 1086 } 1087 1088 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ 1089 static void bgmac_chip_init(struct bgmac *bgmac, bool full_init) 1090 { 1091 struct bgmac_dma_ring *ring; 1092 int i; 1093 1094 /* 1 interrupt per received frame */ 1095 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); 1096 1097 /* Enable 802.3x tx flow control (honor received PAUSE frames) */ 1098 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true); 1099 1100 bgmac_set_rx_mode(bgmac->net_dev); 1101 1102 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); 1103 1104 if (bgmac->loopback) 1105 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); 1106 else 1107 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false); 1108 1109 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); 1110 1111 if (!bgmac->autoneg) { 1112 bgmac_speed(bgmac, bgmac->speed); 1113 bgmac_phy_force(bgmac); 1114 } else if (bgmac->speed) { /* if there is anything to adv */ 1115 bgmac_phy_advertise(bgmac); 1116 } 1117 1118 if (full_init) { 1119 bgmac_dma_init(bgmac); 1120 if (1) /* FIXME: is there any case we don't want IRQs? */ 1121 bgmac_chip_intrs_on(bgmac); 1122 } else { 1123 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 1124 ring = &bgmac->rx_ring[i]; 1125 bgmac_dma_rx_enable(bgmac, ring); 1126 } 1127 } 1128 1129 bgmac_enable(bgmac); 1130 } 1131 1132 static irqreturn_t bgmac_interrupt(int irq, void *dev_id) 1133 { 1134 struct bgmac *bgmac = netdev_priv(dev_id); 1135 1136 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS); 1137 int_status &= bgmac->int_mask; 1138 1139 if (!int_status) 1140 return IRQ_NONE; 1141 1142 /* Ack */ 1143 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status); 1144 1145 /* Disable new interrupts until handling existing ones */ 1146 bgmac_chip_intrs_off(bgmac); 1147 1148 bgmac->int_status = int_status; 1149 1150 napi_schedule(&bgmac->napi); 1151 1152 return IRQ_HANDLED; 1153 } 1154 1155 static int bgmac_poll(struct napi_struct *napi, int weight) 1156 { 1157 struct bgmac *bgmac = container_of(napi, struct bgmac, napi); 1158 struct bgmac_dma_ring *ring; 1159 int handled = 0; 1160 1161 if (bgmac->int_status & BGMAC_IS_TX0) { 1162 ring = &bgmac->tx_ring[0]; 1163 bgmac_dma_tx_free(bgmac, ring); 1164 bgmac->int_status &= ~BGMAC_IS_TX0; 1165 } 1166 1167 if (bgmac->int_status & BGMAC_IS_RX) { 1168 ring = &bgmac->rx_ring[0]; 1169 handled += bgmac_dma_rx_read(bgmac, ring, weight); 1170 bgmac->int_status &= ~BGMAC_IS_RX; 1171 } 1172 1173 if (bgmac->int_status) { 1174 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status); 1175 bgmac->int_status = 0; 1176 } 1177 1178 if (handled < weight) 1179 napi_complete(napi); 1180 1181 bgmac_chip_intrs_on(bgmac); 1182 1183 return handled; 1184 } 1185 1186 /************************************************** 1187 * net_device_ops 1188 **************************************************/ 1189 1190 static int bgmac_open(struct net_device *net_dev) 1191 { 1192 struct bgmac *bgmac = netdev_priv(net_dev); 1193 int err = 0; 1194 1195 bgmac_chip_reset(bgmac); 1196 /* Specs say about reclaiming rings here, but we do that in DMA init */ 1197 bgmac_chip_init(bgmac, true); 1198 1199 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, 1200 KBUILD_MODNAME, net_dev); 1201 if (err < 0) { 1202 bgmac_err(bgmac, "IRQ request error: %d!\n", err); 1203 goto err_out; 1204 } 1205 napi_enable(&bgmac->napi); 1206 1207 netif_carrier_on(net_dev); 1208 1209 err_out: 1210 return err; 1211 } 1212 1213 static int bgmac_stop(struct net_device *net_dev) 1214 { 1215 struct bgmac *bgmac = netdev_priv(net_dev); 1216 1217 netif_carrier_off(net_dev); 1218 1219 napi_disable(&bgmac->napi); 1220 bgmac_chip_intrs_off(bgmac); 1221 free_irq(bgmac->core->irq, net_dev); 1222 1223 bgmac_chip_reset(bgmac); 1224 1225 return 0; 1226 } 1227 1228 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, 1229 struct net_device *net_dev) 1230 { 1231 struct bgmac *bgmac = netdev_priv(net_dev); 1232 struct bgmac_dma_ring *ring; 1233 1234 /* No QOS support yet */ 1235 ring = &bgmac->tx_ring[0]; 1236 return bgmac_dma_tx_add(bgmac, ring, skb); 1237 } 1238 1239 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) 1240 { 1241 struct bgmac *bgmac = netdev_priv(net_dev); 1242 int ret; 1243 1244 ret = eth_prepare_mac_addr_change(net_dev, addr); 1245 if (ret < 0) 1246 return ret; 1247 bgmac_write_mac_address(bgmac, (u8 *)addr); 1248 eth_commit_mac_addr_change(net_dev, addr); 1249 return 0; 1250 } 1251 1252 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1253 { 1254 struct bgmac *bgmac = netdev_priv(net_dev); 1255 struct mii_ioctl_data *data = if_mii(ifr); 1256 1257 switch (cmd) { 1258 case SIOCGMIIPHY: 1259 data->phy_id = bgmac->phyaddr; 1260 /* fallthru */ 1261 case SIOCGMIIREG: 1262 if (!netif_running(net_dev)) 1263 return -EAGAIN; 1264 data->val_out = bgmac_phy_read(bgmac, data->phy_id, 1265 data->reg_num & 0x1f); 1266 return 0; 1267 case SIOCSMIIREG: 1268 if (!netif_running(net_dev)) 1269 return -EAGAIN; 1270 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f, 1271 data->val_in); 1272 return 0; 1273 default: 1274 return -EOPNOTSUPP; 1275 } 1276 } 1277 1278 static const struct net_device_ops bgmac_netdev_ops = { 1279 .ndo_open = bgmac_open, 1280 .ndo_stop = bgmac_stop, 1281 .ndo_start_xmit = bgmac_start_xmit, 1282 .ndo_set_rx_mode = bgmac_set_rx_mode, 1283 .ndo_set_mac_address = bgmac_set_mac_address, 1284 .ndo_validate_addr = eth_validate_addr, 1285 .ndo_do_ioctl = bgmac_ioctl, 1286 }; 1287 1288 /************************************************** 1289 * ethtool_ops 1290 **************************************************/ 1291 1292 static int bgmac_get_settings(struct net_device *net_dev, 1293 struct ethtool_cmd *cmd) 1294 { 1295 struct bgmac *bgmac = netdev_priv(net_dev); 1296 1297 cmd->supported = SUPPORTED_10baseT_Half | 1298 SUPPORTED_10baseT_Full | 1299 SUPPORTED_100baseT_Half | 1300 SUPPORTED_100baseT_Full | 1301 SUPPORTED_1000baseT_Half | 1302 SUPPORTED_1000baseT_Full | 1303 SUPPORTED_Autoneg; 1304 1305 if (bgmac->autoneg) { 1306 WARN_ON(cmd->advertising); 1307 if (bgmac->full_duplex) { 1308 if (bgmac->speed & BGMAC_SPEED_10) 1309 cmd->advertising |= ADVERTISED_10baseT_Full; 1310 if (bgmac->speed & BGMAC_SPEED_100) 1311 cmd->advertising |= ADVERTISED_100baseT_Full; 1312 if (bgmac->speed & BGMAC_SPEED_1000) 1313 cmd->advertising |= ADVERTISED_1000baseT_Full; 1314 } else { 1315 if (bgmac->speed & BGMAC_SPEED_10) 1316 cmd->advertising |= ADVERTISED_10baseT_Half; 1317 if (bgmac->speed & BGMAC_SPEED_100) 1318 cmd->advertising |= ADVERTISED_100baseT_Half; 1319 if (bgmac->speed & BGMAC_SPEED_1000) 1320 cmd->advertising |= ADVERTISED_1000baseT_Half; 1321 } 1322 } else { 1323 switch (bgmac->speed) { 1324 case BGMAC_SPEED_10: 1325 ethtool_cmd_speed_set(cmd, SPEED_10); 1326 break; 1327 case BGMAC_SPEED_100: 1328 ethtool_cmd_speed_set(cmd, SPEED_100); 1329 break; 1330 case BGMAC_SPEED_1000: 1331 ethtool_cmd_speed_set(cmd, SPEED_1000); 1332 break; 1333 } 1334 } 1335 1336 cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1337 1338 cmd->autoneg = bgmac->autoneg; 1339 1340 return 0; 1341 } 1342 1343 #if 0 1344 static int bgmac_set_settings(struct net_device *net_dev, 1345 struct ethtool_cmd *cmd) 1346 { 1347 struct bgmac *bgmac = netdev_priv(net_dev); 1348 1349 return -1; 1350 } 1351 #endif 1352 1353 static void bgmac_get_drvinfo(struct net_device *net_dev, 1354 struct ethtool_drvinfo *info) 1355 { 1356 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1357 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); 1358 } 1359 1360 static const struct ethtool_ops bgmac_ethtool_ops = { 1361 .get_settings = bgmac_get_settings, 1362 .get_drvinfo = bgmac_get_drvinfo, 1363 }; 1364 1365 /************************************************** 1366 * MII 1367 **************************************************/ 1368 1369 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) 1370 { 1371 return bgmac_phy_read(bus->priv, mii_id, regnum); 1372 } 1373 1374 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, 1375 u16 value) 1376 { 1377 return bgmac_phy_write(bus->priv, mii_id, regnum, value); 1378 } 1379 1380 static int bgmac_mii_register(struct bgmac *bgmac) 1381 { 1382 struct mii_bus *mii_bus; 1383 int i, err = 0; 1384 1385 mii_bus = mdiobus_alloc(); 1386 if (!mii_bus) 1387 return -ENOMEM; 1388 1389 mii_bus->name = "bgmac mii bus"; 1390 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, 1391 bgmac->core->core_unit); 1392 mii_bus->priv = bgmac; 1393 mii_bus->read = bgmac_mii_read; 1394 mii_bus->write = bgmac_mii_write; 1395 mii_bus->parent = &bgmac->core->dev; 1396 mii_bus->phy_mask = ~(1 << bgmac->phyaddr); 1397 1398 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); 1399 if (!mii_bus->irq) { 1400 err = -ENOMEM; 1401 goto err_free_bus; 1402 } 1403 for (i = 0; i < PHY_MAX_ADDR; i++) 1404 mii_bus->irq[i] = PHY_POLL; 1405 1406 err = mdiobus_register(mii_bus); 1407 if (err) { 1408 bgmac_err(bgmac, "Registration of mii bus failed\n"); 1409 goto err_free_irq; 1410 } 1411 1412 bgmac->mii_bus = mii_bus; 1413 1414 return err; 1415 1416 err_free_irq: 1417 kfree(mii_bus->irq); 1418 err_free_bus: 1419 mdiobus_free(mii_bus); 1420 return err; 1421 } 1422 1423 static void bgmac_mii_unregister(struct bgmac *bgmac) 1424 { 1425 struct mii_bus *mii_bus = bgmac->mii_bus; 1426 1427 mdiobus_unregister(mii_bus); 1428 kfree(mii_bus->irq); 1429 mdiobus_free(mii_bus); 1430 } 1431 1432 /************************************************** 1433 * BCMA bus ops 1434 **************************************************/ 1435 1436 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ 1437 static int bgmac_probe(struct bcma_device *core) 1438 { 1439 struct net_device *net_dev; 1440 struct bgmac *bgmac; 1441 struct ssb_sprom *sprom = &core->bus->sprom; 1442 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac; 1443 int err; 1444 1445 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */ 1446 if (core->core_unit > 1) { 1447 pr_err("Unsupported core_unit %d\n", core->core_unit); 1448 return -ENOTSUPP; 1449 } 1450 1451 if (!is_valid_ether_addr(mac)) { 1452 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); 1453 eth_random_addr(mac); 1454 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1455 } 1456 1457 /* Allocation and references */ 1458 net_dev = alloc_etherdev(sizeof(*bgmac)); 1459 if (!net_dev) 1460 return -ENOMEM; 1461 net_dev->netdev_ops = &bgmac_netdev_ops; 1462 net_dev->irq = core->irq; 1463 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); 1464 bgmac = netdev_priv(net_dev); 1465 bgmac->net_dev = net_dev; 1466 bgmac->core = core; 1467 bcma_set_drvdata(core, bgmac); 1468 1469 /* Defaults */ 1470 bgmac->autoneg = true; 1471 bgmac->full_duplex = true; 1472 bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000; 1473 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); 1474 1475 /* On BCM4706 we need common core to access PHY */ 1476 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 1477 !core->bus->drv_gmac_cmn.core) { 1478 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); 1479 err = -ENODEV; 1480 goto err_netdev_free; 1481 } 1482 bgmac->cmn = core->bus->drv_gmac_cmn.core; 1483 1484 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr : 1485 sprom->et0phyaddr; 1486 bgmac->phyaddr &= BGMAC_PHY_MASK; 1487 if (bgmac->phyaddr == BGMAC_PHY_MASK) { 1488 bgmac_err(bgmac, "No PHY found\n"); 1489 err = -ENODEV; 1490 goto err_netdev_free; 1491 } 1492 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, 1493 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); 1494 1495 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { 1496 bgmac_err(bgmac, "PCI setup not implemented\n"); 1497 err = -ENOTSUPP; 1498 goto err_netdev_free; 1499 } 1500 1501 bgmac_chip_reset(bgmac); 1502 1503 err = bgmac_dma_alloc(bgmac); 1504 if (err) { 1505 bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); 1506 goto err_netdev_free; 1507 } 1508 1509 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; 1510 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) 1511 bgmac->int_mask &= ~BGMAC_IS_TX_MASK; 1512 1513 /* TODO: reset the external phy. Specs are needed */ 1514 bgmac_phy_reset(bgmac); 1515 1516 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & 1517 BGMAC_BFL_ENETROBO); 1518 if (bgmac->has_robosw) 1519 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); 1520 1521 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 1522 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); 1523 1524 err = bgmac_mii_register(bgmac); 1525 if (err) { 1526 bgmac_err(bgmac, "Cannot register MDIO\n"); 1527 err = -ENOTSUPP; 1528 goto err_dma_free; 1529 } 1530 1531 err = register_netdev(bgmac->net_dev); 1532 if (err) { 1533 bgmac_err(bgmac, "Cannot register net device\n"); 1534 err = -ENOTSUPP; 1535 goto err_mii_unregister; 1536 } 1537 1538 netif_carrier_off(net_dev); 1539 1540 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); 1541 1542 return 0; 1543 1544 err_mii_unregister: 1545 bgmac_mii_unregister(bgmac); 1546 err_dma_free: 1547 bgmac_dma_free(bgmac); 1548 1549 err_netdev_free: 1550 bcma_set_drvdata(core, NULL); 1551 free_netdev(net_dev); 1552 1553 return err; 1554 } 1555 1556 static void bgmac_remove(struct bcma_device *core) 1557 { 1558 struct bgmac *bgmac = bcma_get_drvdata(core); 1559 1560 netif_napi_del(&bgmac->napi); 1561 unregister_netdev(bgmac->net_dev); 1562 bgmac_mii_unregister(bgmac); 1563 bgmac_dma_free(bgmac); 1564 bcma_set_drvdata(core, NULL); 1565 free_netdev(bgmac->net_dev); 1566 } 1567 1568 static struct bcma_driver bgmac_bcma_driver = { 1569 .name = KBUILD_MODNAME, 1570 .id_table = bgmac_bcma_tbl, 1571 .probe = bgmac_probe, 1572 .remove = bgmac_remove, 1573 }; 1574 1575 static int __init bgmac_init(void) 1576 { 1577 int err; 1578 1579 err = bcma_driver_register(&bgmac_bcma_driver); 1580 if (err) 1581 return err; 1582 pr_info("Broadcom 47xx GBit MAC driver loaded\n"); 1583 1584 return 0; 1585 } 1586 1587 static void __exit bgmac_exit(void) 1588 { 1589 bcma_driver_unregister(&bgmac_bcma_driver); 1590 } 1591 1592 module_init(bgmac_init) 1593 module_exit(bgmac_exit) 1594 1595 MODULE_AUTHOR("Rafał Miłecki"); 1596 MODULE_LICENSE("GPL"); 1597