1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (C) 2018 Microchip Technology Inc. */ 3 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/crc32.h> 9 #include <linux/microchipphy.h> 10 #include <linux/net_tstamp.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 #include <linux/phy.h> 14 #include <linux/phy_fixed.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/iopoll.h> 17 #include <linux/crc16.h> 18 #include "lan743x_main.h" 19 #include "lan743x_ethtool.h" 20 21 #define MMD_ACCESS_ADDRESS 0 22 #define MMD_ACCESS_WRITE 1 23 #define MMD_ACCESS_READ 2 24 #define MMD_ACCESS_READ_INC 3 25 #define PCS_POWER_STATE_DOWN 0x6 26 #define PCS_POWER_STATE_UP 0x4 27 28 static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter) 29 { 30 u32 chip_rev; 31 u32 cfg_load; 32 u32 hw_cfg; 33 u32 strap; 34 int ret; 35 36 /* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */ 37 ret = lan743x_hs_syslock_acquire(adapter, 100); 38 if (ret < 0) { 39 netif_err(adapter, drv, adapter->netdev, 40 "Sys Lock acquire failed ret:%d\n", ret); 41 return; 42 } 43 44 cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG); 45 lan743x_hs_syslock_release(adapter); 46 hw_cfg = lan743x_csr_read(adapter, HW_CFG); 47 48 if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ || 49 hw_cfg & HW_CFG_RST_PROTECT_) { 50 strap = lan743x_csr_read(adapter, STRAP_READ); 51 if (strap & STRAP_READ_SGMII_EN_) 52 adapter->is_sgmii_en = true; 53 else 54 adapter->is_sgmii_en = false; 55 } else { 56 chip_rev = lan743x_csr_read(adapter, FPGA_REV); 57 if (chip_rev) { 58 if (chip_rev & FPGA_SGMII_OP) 59 adapter->is_sgmii_en = true; 60 else 61 adapter->is_sgmii_en = false; 62 } else { 63 adapter->is_sgmii_en = false; 64 } 65 } 66 netif_dbg(adapter, drv, adapter->netdev, 67 "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis"); 68 } 69 70 static bool is_pci11x1x_chip(struct lan743x_adapter *adapter) 71 { 72 struct lan743x_csr *csr = &adapter->csr; 73 u32 id_rev = csr->id_rev; 74 75 if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) || 76 ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) { 77 return true; 78 } 79 return false; 80 } 81 82 static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) 83 { 84 pci_release_selected_regions(adapter->pdev, 85 pci_select_bars(adapter->pdev, 86 IORESOURCE_MEM)); 87 pci_disable_device(adapter->pdev); 88 } 89 90 static int lan743x_pci_init(struct lan743x_adapter *adapter, 91 struct pci_dev *pdev) 92 { 93 unsigned long bars = 0; 94 int ret; 95 96 adapter->pdev = pdev; 97 ret = pci_enable_device_mem(pdev); 98 if (ret) 99 goto return_error; 100 101 netif_info(adapter, probe, adapter->netdev, 102 "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", 103 pdev->vendor, pdev->device); 104 bars = pci_select_bars(pdev, IORESOURCE_MEM); 105 if (!test_bit(0, &bars)) 106 goto disable_device; 107 108 ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); 109 if (ret) 110 goto disable_device; 111 112 pci_set_master(pdev); 113 return 0; 114 115 disable_device: 116 pci_disable_device(adapter->pdev); 117 118 return_error: 119 return ret; 120 } 121 122 u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) 123 { 124 return ioread32(&adapter->csr.csr_address[offset]); 125 } 126 127 void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, 128 u32 data) 129 { 130 iowrite32(data, &adapter->csr.csr_address[offset]); 131 } 132 133 #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) 134 135 static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) 136 { 137 u32 data; 138 139 data = lan743x_csr_read(adapter, HW_CFG); 140 data |= HW_CFG_LRST_; 141 lan743x_csr_write(adapter, HW_CFG, data); 142 143 return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data, 144 !(data & HW_CFG_LRST_), 100000, 10000000); 145 } 146 147 static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, 148 int offset, u32 bit_mask, 149 int target_value, int usleep_min, 150 int usleep_max, int count) 151 { 152 u32 data; 153 154 return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data, 155 target_value == ((data & bit_mask) ? 1 : 0), 156 usleep_max, usleep_min * count); 157 } 158 159 static int lan743x_csr_init(struct lan743x_adapter *adapter) 160 { 161 struct lan743x_csr *csr = &adapter->csr; 162 resource_size_t bar_start, bar_length; 163 int result; 164 165 bar_start = pci_resource_start(adapter->pdev, 0); 166 bar_length = pci_resource_len(adapter->pdev, 0); 167 csr->csr_address = devm_ioremap(&adapter->pdev->dev, 168 bar_start, bar_length); 169 if (!csr->csr_address) { 170 result = -ENOMEM; 171 goto clean_up; 172 } 173 174 csr->id_rev = lan743x_csr_read(adapter, ID_REV); 175 csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); 176 netif_info(adapter, probe, adapter->netdev, 177 "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", 178 csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev), 179 FPGA_REV_GET_MINOR_(csr->fpga_rev)); 180 if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) { 181 result = -ENODEV; 182 goto clean_up; 183 } 184 185 csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 186 switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { 187 case ID_REV_CHIP_REV_A0_: 188 csr->flags |= LAN743X_CSR_FLAG_IS_A0; 189 csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 190 break; 191 case ID_REV_CHIP_REV_B0_: 192 csr->flags |= LAN743X_CSR_FLAG_IS_B0; 193 break; 194 } 195 196 result = lan743x_csr_light_reset(adapter); 197 if (result) 198 goto clean_up; 199 return 0; 200 clean_up: 201 return result; 202 } 203 204 static void lan743x_intr_software_isr(struct lan743x_adapter *adapter) 205 { 206 struct lan743x_intr *intr = &adapter->intr; 207 208 /* disable the interrupt to prevent repeated re-triggering */ 209 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 210 intr->software_isr_flag = true; 211 wake_up(&intr->software_isr_wq); 212 } 213 214 static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) 215 { 216 struct lan743x_tx *tx = context; 217 struct lan743x_adapter *adapter = tx->adapter; 218 bool enable_flag = true; 219 220 lan743x_csr_read(adapter, INT_EN_SET); 221 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 222 lan743x_csr_write(adapter, INT_EN_CLR, 223 INT_BIT_DMA_TX_(tx->channel_number)); 224 } 225 226 if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { 227 u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 228 u32 dmac_int_sts; 229 u32 dmac_int_en; 230 231 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 232 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 233 else 234 dmac_int_sts = ioc_bit; 235 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 236 dmac_int_en = lan743x_csr_read(adapter, 237 DMAC_INT_EN_SET); 238 else 239 dmac_int_en = ioc_bit; 240 241 dmac_int_en &= ioc_bit; 242 dmac_int_sts &= dmac_int_en; 243 if (dmac_int_sts & ioc_bit) { 244 napi_schedule(&tx->napi); 245 enable_flag = false;/* poll func will enable later */ 246 } 247 } 248 249 if (enable_flag) 250 /* enable isr */ 251 lan743x_csr_write(adapter, INT_EN_SET, 252 INT_BIT_DMA_TX_(tx->channel_number)); 253 } 254 255 static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) 256 { 257 struct lan743x_rx *rx = context; 258 struct lan743x_adapter *adapter = rx->adapter; 259 bool enable_flag = true; 260 261 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 262 lan743x_csr_write(adapter, INT_EN_CLR, 263 INT_BIT_DMA_RX_(rx->channel_number)); 264 } 265 266 if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { 267 u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); 268 u32 dmac_int_sts; 269 u32 dmac_int_en; 270 271 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 272 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 273 else 274 dmac_int_sts = rx_frame_bit; 275 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 276 dmac_int_en = lan743x_csr_read(adapter, 277 DMAC_INT_EN_SET); 278 else 279 dmac_int_en = rx_frame_bit; 280 281 dmac_int_en &= rx_frame_bit; 282 dmac_int_sts &= dmac_int_en; 283 if (dmac_int_sts & rx_frame_bit) { 284 napi_schedule(&rx->napi); 285 enable_flag = false;/* poll funct will enable later */ 286 } 287 } 288 289 if (enable_flag) { 290 /* enable isr */ 291 lan743x_csr_write(adapter, INT_EN_SET, 292 INT_BIT_DMA_RX_(rx->channel_number)); 293 } 294 } 295 296 static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) 297 { 298 struct lan743x_adapter *adapter = context; 299 unsigned int channel; 300 301 if (int_sts & INT_BIT_ALL_RX_) { 302 for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; 303 channel++) { 304 u32 int_bit = INT_BIT_DMA_RX_(channel); 305 306 if (int_sts & int_bit) { 307 lan743x_rx_isr(&adapter->rx[channel], 308 int_bit, flags); 309 int_sts &= ~int_bit; 310 } 311 } 312 } 313 if (int_sts & INT_BIT_ALL_TX_) { 314 for (channel = 0; channel < adapter->used_tx_channels; 315 channel++) { 316 u32 int_bit = INT_BIT_DMA_TX_(channel); 317 318 if (int_sts & int_bit) { 319 lan743x_tx_isr(&adapter->tx[channel], 320 int_bit, flags); 321 int_sts &= ~int_bit; 322 } 323 } 324 } 325 if (int_sts & INT_BIT_ALL_OTHER_) { 326 if (int_sts & INT_BIT_SW_GP_) { 327 lan743x_intr_software_isr(adapter); 328 int_sts &= ~INT_BIT_SW_GP_; 329 } 330 if (int_sts & INT_BIT_1588_) { 331 lan743x_ptp_isr(adapter); 332 int_sts &= ~INT_BIT_1588_; 333 } 334 } 335 if (int_sts) 336 lan743x_csr_write(adapter, INT_EN_CLR, int_sts); 337 } 338 339 static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) 340 { 341 struct lan743x_vector *vector = ptr; 342 struct lan743x_adapter *adapter = vector->adapter; 343 irqreturn_t result = IRQ_NONE; 344 u32 int_enables; 345 u32 int_sts; 346 347 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { 348 int_sts = lan743x_csr_read(adapter, INT_STS); 349 } else if (vector->flags & 350 (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | 351 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { 352 int_sts = lan743x_csr_read(adapter, INT_STS_R2C); 353 } else { 354 /* use mask as implied status */ 355 int_sts = vector->int_mask | INT_BIT_MAS_; 356 } 357 358 if (!(int_sts & INT_BIT_MAS_)) 359 goto irq_done; 360 361 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) 362 /* disable vector interrupt */ 363 lan743x_csr_write(adapter, 364 INT_VEC_EN_CLR, 365 INT_VEC_EN_(vector->vector_index)); 366 367 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) 368 /* disable master interrupt */ 369 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 370 371 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { 372 int_enables = lan743x_csr_read(adapter, INT_EN_SET); 373 } else { 374 /* use vector mask as implied enable mask */ 375 int_enables = vector->int_mask; 376 } 377 378 int_sts &= int_enables; 379 int_sts &= vector->int_mask; 380 if (int_sts) { 381 if (vector->handler) { 382 vector->handler(vector->context, 383 int_sts, vector->flags); 384 } else { 385 /* disable interrupts on this vector */ 386 lan743x_csr_write(adapter, INT_EN_CLR, 387 vector->int_mask); 388 } 389 result = IRQ_HANDLED; 390 } 391 392 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) 393 /* enable master interrupt */ 394 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 395 396 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) 397 /* enable vector interrupt */ 398 lan743x_csr_write(adapter, 399 INT_VEC_EN_SET, 400 INT_VEC_EN_(vector->vector_index)); 401 irq_done: 402 return result; 403 } 404 405 static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) 406 { 407 struct lan743x_intr *intr = &adapter->intr; 408 int ret; 409 410 intr->software_isr_flag = false; 411 412 /* enable and activate test interrupt */ 413 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); 414 lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); 415 416 ret = wait_event_timeout(intr->software_isr_wq, 417 intr->software_isr_flag, 418 msecs_to_jiffies(200)); 419 420 /* disable test interrupt */ 421 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 422 423 return ret > 0 ? 0 : -ENODEV; 424 } 425 426 static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, 427 int vector_index, u32 flags, 428 u32 int_mask, 429 lan743x_vector_handler handler, 430 void *context) 431 { 432 struct lan743x_vector *vector = &adapter->intr.vector_list 433 [vector_index]; 434 int ret; 435 436 vector->adapter = adapter; 437 vector->flags = flags; 438 vector->vector_index = vector_index; 439 vector->int_mask = int_mask; 440 vector->handler = handler; 441 vector->context = context; 442 443 ret = request_irq(vector->irq, 444 lan743x_intr_entry_isr, 445 (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? 446 IRQF_SHARED : 0, DRIVER_NAME, vector); 447 if (ret) { 448 vector->handler = NULL; 449 vector->context = NULL; 450 vector->int_mask = 0; 451 vector->flags = 0; 452 } 453 return ret; 454 } 455 456 static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, 457 int vector_index) 458 { 459 struct lan743x_vector *vector = &adapter->intr.vector_list 460 [vector_index]; 461 462 free_irq(vector->irq, vector); 463 vector->handler = NULL; 464 vector->context = NULL; 465 vector->int_mask = 0; 466 vector->flags = 0; 467 } 468 469 static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, 470 u32 int_mask) 471 { 472 int index; 473 474 for (index = 0; index < adapter->max_vector_count; index++) { 475 if (adapter->intr.vector_list[index].int_mask & int_mask) 476 return adapter->intr.vector_list[index].flags; 477 } 478 return 0; 479 } 480 481 static void lan743x_intr_close(struct lan743x_adapter *adapter) 482 { 483 struct lan743x_intr *intr = &adapter->intr; 484 int index = 0; 485 486 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 487 if (adapter->is_pci11x1x) 488 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF); 489 else 490 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); 491 492 for (index = 0; index < intr->number_of_vectors; index++) { 493 if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { 494 lan743x_intr_unregister_isr(adapter, index); 495 intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); 496 } 497 } 498 499 if (intr->flags & INTR_FLAG_MSI_ENABLED) { 500 pci_disable_msi(adapter->pdev); 501 intr->flags &= ~INTR_FLAG_MSI_ENABLED; 502 } 503 504 if (intr->flags & INTR_FLAG_MSIX_ENABLED) { 505 pci_disable_msix(adapter->pdev); 506 intr->flags &= ~INTR_FLAG_MSIX_ENABLED; 507 } 508 } 509 510 static int lan743x_intr_open(struct lan743x_adapter *adapter) 511 { 512 struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT]; 513 struct lan743x_intr *intr = &adapter->intr; 514 unsigned int used_tx_channels; 515 u32 int_vec_en_auto_clr = 0; 516 u8 max_vector_count; 517 u32 int_vec_map0 = 0; 518 u32 int_vec_map1 = 0; 519 int ret = -ENODEV; 520 int index = 0; 521 u32 flags = 0; 522 523 intr->number_of_vectors = 0; 524 525 /* Try to set up MSIX interrupts */ 526 max_vector_count = adapter->max_vector_count; 527 memset(&msix_entries[0], 0, 528 sizeof(struct msix_entry) * max_vector_count); 529 for (index = 0; index < max_vector_count; index++) 530 msix_entries[index].entry = index; 531 used_tx_channels = adapter->used_tx_channels; 532 ret = pci_enable_msix_range(adapter->pdev, 533 msix_entries, 1, 534 1 + used_tx_channels + 535 LAN743X_USED_RX_CHANNELS); 536 537 if (ret > 0) { 538 intr->flags |= INTR_FLAG_MSIX_ENABLED; 539 intr->number_of_vectors = ret; 540 intr->using_vectors = true; 541 for (index = 0; index < intr->number_of_vectors; index++) 542 intr->vector_list[index].irq = msix_entries 543 [index].vector; 544 netif_info(adapter, ifup, adapter->netdev, 545 "using MSIX interrupts, number of vectors = %d\n", 546 intr->number_of_vectors); 547 } 548 549 /* If MSIX failed try to setup using MSI interrupts */ 550 if (!intr->number_of_vectors) { 551 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 552 if (!pci_enable_msi(adapter->pdev)) { 553 intr->flags |= INTR_FLAG_MSI_ENABLED; 554 intr->number_of_vectors = 1; 555 intr->using_vectors = true; 556 intr->vector_list[0].irq = 557 adapter->pdev->irq; 558 netif_info(adapter, ifup, adapter->netdev, 559 "using MSI interrupts, number of vectors = %d\n", 560 intr->number_of_vectors); 561 } 562 } 563 } 564 565 /* If MSIX, and MSI failed, setup using legacy interrupt */ 566 if (!intr->number_of_vectors) { 567 intr->number_of_vectors = 1; 568 intr->using_vectors = false; 569 intr->vector_list[0].irq = intr->irq; 570 netif_info(adapter, ifup, adapter->netdev, 571 "using legacy interrupts\n"); 572 } 573 574 /* At this point we must have at least one irq */ 575 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); 576 577 /* map all interrupts to vector 0 */ 578 lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); 579 lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); 580 lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); 581 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 582 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 583 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 584 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 585 586 if (intr->using_vectors) { 587 flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 588 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 589 } else { 590 flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | 591 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | 592 LAN743X_VECTOR_FLAG_IRQ_SHARED; 593 } 594 595 if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 596 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; 597 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; 598 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 599 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; 600 flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; 601 flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; 602 } 603 604 init_waitqueue_head(&intr->software_isr_wq); 605 606 ret = lan743x_intr_register_isr(adapter, 0, flags, 607 INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | 608 INT_BIT_ALL_OTHER_, 609 lan743x_intr_shared_isr, adapter); 610 if (ret) 611 goto clean_up; 612 intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); 613 614 if (intr->using_vectors) 615 lan743x_csr_write(adapter, INT_VEC_EN_SET, 616 INT_VEC_EN_(0)); 617 618 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 619 lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); 620 lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); 621 lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); 622 lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); 623 lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); 624 lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); 625 lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); 626 lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); 627 if (adapter->is_pci11x1x) { 628 lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD); 629 lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD); 630 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654); 631 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210); 632 } else { 633 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); 634 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); 635 } 636 lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); 637 } 638 639 /* enable interrupts */ 640 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 641 ret = lan743x_intr_test_isr(adapter); 642 if (ret) 643 goto clean_up; 644 645 if (intr->number_of_vectors > 1) { 646 int number_of_tx_vectors = intr->number_of_vectors - 1; 647 648 if (number_of_tx_vectors > used_tx_channels) 649 number_of_tx_vectors = used_tx_channels; 650 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 651 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 652 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 653 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 654 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 655 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 656 657 if (adapter->csr.flags & 658 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 659 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 660 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 661 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 662 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 663 } 664 665 for (index = 0; index < number_of_tx_vectors; index++) { 666 u32 int_bit = INT_BIT_DMA_TX_(index); 667 int vector = index + 1; 668 669 /* map TX interrupt to vector */ 670 int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); 671 lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); 672 673 /* Remove TX interrupt from shared mask */ 674 intr->vector_list[0].int_mask &= ~int_bit; 675 ret = lan743x_intr_register_isr(adapter, vector, flags, 676 int_bit, lan743x_tx_isr, 677 &adapter->tx[index]); 678 if (ret) 679 goto clean_up; 680 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 681 if (!(flags & 682 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) 683 lan743x_csr_write(adapter, INT_VEC_EN_SET, 684 INT_VEC_EN_(vector)); 685 } 686 } 687 if ((intr->number_of_vectors - used_tx_channels) > 1) { 688 int number_of_rx_vectors = intr->number_of_vectors - 689 used_tx_channels - 1; 690 691 if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) 692 number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; 693 694 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 695 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 696 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 697 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 698 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 699 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 700 701 if (adapter->csr.flags & 702 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 703 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | 704 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 705 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 706 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 707 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 708 } 709 for (index = 0; index < number_of_rx_vectors; index++) { 710 int vector = index + 1 + used_tx_channels; 711 u32 int_bit = INT_BIT_DMA_RX_(index); 712 713 /* map RX interrupt to vector */ 714 int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); 715 lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); 716 if (flags & 717 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { 718 int_vec_en_auto_clr |= INT_VEC_EN_(vector); 719 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, 720 int_vec_en_auto_clr); 721 } 722 723 /* Remove RX interrupt from shared mask */ 724 intr->vector_list[0].int_mask &= ~int_bit; 725 ret = lan743x_intr_register_isr(adapter, vector, flags, 726 int_bit, lan743x_rx_isr, 727 &adapter->rx[index]); 728 if (ret) 729 goto clean_up; 730 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 731 732 lan743x_csr_write(adapter, INT_VEC_EN_SET, 733 INT_VEC_EN_(vector)); 734 } 735 } 736 return 0; 737 738 clean_up: 739 lan743x_intr_close(adapter); 740 return ret; 741 } 742 743 static int lan743x_dp_write(struct lan743x_adapter *adapter, 744 u32 select, u32 addr, u32 length, u32 *buf) 745 { 746 u32 dp_sel; 747 int i; 748 749 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 750 1, 40, 100, 100)) 751 return -EIO; 752 dp_sel = lan743x_csr_read(adapter, DP_SEL); 753 dp_sel &= ~DP_SEL_MASK_; 754 dp_sel |= select; 755 lan743x_csr_write(adapter, DP_SEL, dp_sel); 756 757 for (i = 0; i < length; i++) { 758 lan743x_csr_write(adapter, DP_ADDR, addr + i); 759 lan743x_csr_write(adapter, DP_DATA_0, buf[i]); 760 lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); 761 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 762 1, 40, 100, 100)) 763 return -EIO; 764 } 765 766 return 0; 767 } 768 769 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) 770 { 771 u32 ret; 772 773 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 774 MAC_MII_ACC_PHY_ADDR_MASK_; 775 ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & 776 MAC_MII_ACC_MIIRINDA_MASK_; 777 778 if (read) 779 ret |= MAC_MII_ACC_MII_READ_; 780 else 781 ret |= MAC_MII_ACC_MII_WRITE_; 782 ret |= MAC_MII_ACC_MII_BUSY_; 783 784 return ret; 785 } 786 787 static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) 788 { 789 u32 data; 790 791 return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data, 792 !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); 793 } 794 795 static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index) 796 { 797 struct lan743x_adapter *adapter = bus->priv; 798 u32 val, mii_access; 799 int ret; 800 801 /* comfirm MII not busy */ 802 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 803 if (ret < 0) 804 return ret; 805 806 /* set the address, index & direction (read from PHY) */ 807 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); 808 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 809 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 810 if (ret < 0) 811 return ret; 812 813 val = lan743x_csr_read(adapter, MAC_MII_DATA); 814 return (int)(val & 0xFFFF); 815 } 816 817 static int lan743x_mdiobus_write_c22(struct mii_bus *bus, 818 int phy_id, int index, u16 regval) 819 { 820 struct lan743x_adapter *adapter = bus->priv; 821 u32 val, mii_access; 822 int ret; 823 824 /* confirm MII not busy */ 825 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 826 if (ret < 0) 827 return ret; 828 val = (u32)regval; 829 lan743x_csr_write(adapter, MAC_MII_DATA, val); 830 831 /* set the address, index & direction (write to PHY) */ 832 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); 833 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 834 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 835 return ret; 836 } 837 838 static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op) 839 { 840 u32 ret; 841 842 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 843 MAC_MII_ACC_PHY_ADDR_MASK_; 844 ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) & 845 MAC_MII_ACC_MIIMMD_MASK_; 846 if (op == MMD_ACCESS_WRITE) 847 ret |= MAC_MII_ACC_MIICMD_WRITE_; 848 else if (op == MMD_ACCESS_READ) 849 ret |= MAC_MII_ACC_MIICMD_READ_; 850 else if (op == MMD_ACCESS_READ_INC) 851 ret |= MAC_MII_ACC_MIICMD_READ_INC_; 852 else 853 ret |= MAC_MII_ACC_MIICMD_ADDR_; 854 ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_); 855 856 return ret; 857 } 858 859 static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id, 860 int dev_addr, int index) 861 { 862 struct lan743x_adapter *adapter = bus->priv; 863 u32 mmd_access; 864 int ret; 865 866 /* comfirm MII not busy */ 867 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 868 if (ret < 0) 869 return ret; 870 871 /* Load Register Address */ 872 lan743x_csr_write(adapter, MAC_MII_DATA, index); 873 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 874 MMD_ACCESS_ADDRESS); 875 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 876 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 877 if (ret < 0) 878 return ret; 879 880 /* Read Data */ 881 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 882 MMD_ACCESS_READ); 883 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 884 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 885 if (ret < 0) 886 return ret; 887 888 ret = lan743x_csr_read(adapter, MAC_MII_DATA); 889 return (int)(ret & 0xFFFF); 890 } 891 892 static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id, 893 int dev_addr, int index, u16 regval) 894 { 895 struct lan743x_adapter *adapter = bus->priv; 896 u32 mmd_access; 897 int ret; 898 899 /* confirm MII not busy */ 900 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 901 if (ret < 0) 902 return ret; 903 904 /* Load Register Address */ 905 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index); 906 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 907 MMD_ACCESS_ADDRESS); 908 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 909 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 910 if (ret < 0) 911 return ret; 912 913 /* Write Data */ 914 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); 915 mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, 916 MMD_ACCESS_WRITE); 917 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 918 919 return lan743x_mac_mii_wait_till_not_busy(adapter); 920 } 921 922 static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter) 923 { 924 u32 data; 925 int ret; 926 927 ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data, 928 !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000); 929 if (ret < 0) 930 netif_err(adapter, drv, adapter->netdev, 931 "%s: error %d sgmii wait timeout\n", __func__, ret); 932 933 return ret; 934 } 935 936 int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr) 937 { 938 u32 mmd_access; 939 int ret; 940 u32 val; 941 942 if (mmd > 31) { 943 netif_err(adapter, probe, adapter->netdev, 944 "%s mmd should <= 31\n", __func__); 945 return -EINVAL; 946 } 947 948 mutex_lock(&adapter->sgmii_rw_lock); 949 /* Load Register Address */ 950 mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_; 951 mmd_access |= (addr | SGMII_ACC_SGMII_BZY_); 952 lan743x_csr_write(adapter, SGMII_ACC, mmd_access); 953 ret = lan743x_sgmii_wait_till_not_busy(adapter); 954 if (ret < 0) 955 goto sgmii_unlock; 956 957 val = lan743x_csr_read(adapter, SGMII_DATA); 958 ret = (int)(val & SGMII_DATA_MASK_); 959 960 sgmii_unlock: 961 mutex_unlock(&adapter->sgmii_rw_lock); 962 963 return ret; 964 } 965 966 static int lan743x_sgmii_write(struct lan743x_adapter *adapter, 967 u8 mmd, u16 addr, u16 val) 968 { 969 u32 mmd_access; 970 int ret; 971 972 if (mmd > 31) { 973 netif_err(adapter, probe, adapter->netdev, 974 "%s mmd should <= 31\n", __func__); 975 return -EINVAL; 976 } 977 mutex_lock(&adapter->sgmii_rw_lock); 978 /* Load Register Data */ 979 lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_)); 980 /* Load Register Address */ 981 mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_; 982 mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_); 983 lan743x_csr_write(adapter, SGMII_ACC, mmd_access); 984 ret = lan743x_sgmii_wait_till_not_busy(adapter); 985 mutex_unlock(&adapter->sgmii_rw_lock); 986 987 return ret; 988 } 989 990 static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter, 991 u16 baud) 992 { 993 int mpllctrl0; 994 int mpllctrl1; 995 int miscctrl1; 996 int ret; 997 998 mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 999 VR_MII_GEN2_4_MPLL_CTRL0); 1000 if (mpllctrl0 < 0) 1001 return mpllctrl0; 1002 1003 mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_; 1004 if (baud == VR_MII_BAUD_RATE_1P25GBPS) { 1005 mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100; 1006 /* mpll_baud_clk/4 */ 1007 miscctrl1 = 0xA; 1008 } else { 1009 mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125; 1010 /* mpll_baud_clk/2 */ 1011 miscctrl1 = 0x5; 1012 } 1013 1014 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1015 VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0); 1016 if (ret < 0) 1017 return ret; 1018 1019 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1020 VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1); 1021 if (ret < 0) 1022 return ret; 1023 1024 return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1025 VR_MII_GEN2_4_MISC_CTRL1, miscctrl1); 1026 } 1027 1028 static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter, 1029 bool enable) 1030 { 1031 if (enable) 1032 return lan743x_sgmii_mpll_set(adapter, 1033 VR_MII_BAUD_RATE_3P125GBPS); 1034 else 1035 return lan743x_sgmii_mpll_set(adapter, 1036 VR_MII_BAUD_RATE_1P25GBPS); 1037 } 1038 1039 static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter, 1040 bool *status) 1041 { 1042 int ret; 1043 1044 ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1045 VR_MII_GEN2_4_MPLL_CTRL1); 1046 if (ret < 0) 1047 return ret; 1048 1049 if (ret == VR_MII_MPLL_MULTIPLIER_125 || 1050 ret == VR_MII_MPLL_MULTIPLIER_50) 1051 *status = true; 1052 else 1053 *status = false; 1054 1055 return 0; 1056 } 1057 1058 static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter) 1059 { 1060 enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd; 1061 int mii_ctrl; 1062 int dgt_ctrl; 1063 int an_ctrl; 1064 int ret; 1065 1066 if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) 1067 /* Switch to 2.5 Gbps */ 1068 ret = lan743x_sgmii_2_5G_mode_set(adapter, true); 1069 else 1070 /* Switch to 10/100/1000 Mbps clock */ 1071 ret = lan743x_sgmii_2_5G_mode_set(adapter, false); 1072 if (ret < 0) 1073 return ret; 1074 1075 /* Enable SGMII Auto NEG */ 1076 mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); 1077 if (mii_ctrl < 0) 1078 return mii_ctrl; 1079 1080 an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL); 1081 if (an_ctrl < 0) 1082 return an_ctrl; 1083 1084 dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1085 VR_MII_DIG_CTRL1); 1086 if (dgt_ctrl < 0) 1087 return dgt_ctrl; 1088 1089 if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) { 1090 mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100); 1091 mii_ctrl |= BMCR_SPEED1000; 1092 dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_; 1093 dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_; 1094 /* In order for Auto-Negotiation to operate properly at 1095 * 2.5 Gbps the 1.6ms link timer values must be adjusted 1096 * The VR_MII_LINK_TIMER_CTRL Register must be set to 1097 * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the 1098 * VR_MII_DIG_CTRL1 Register set to 1 1099 */ 1100 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1101 VR_MII_LINK_TIMER_CTRL, 0x7A1); 1102 if (ret < 0) 1103 return ret; 1104 } else { 1105 mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); 1106 an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_; 1107 dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_; 1108 dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_; 1109 } 1110 1111 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, 1112 mii_ctrl); 1113 if (ret < 0) 1114 return ret; 1115 1116 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1117 VR_MII_DIG_CTRL1, dgt_ctrl); 1118 if (ret < 0) 1119 return ret; 1120 1121 return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, 1122 VR_MII_AN_CTRL, an_ctrl); 1123 } 1124 1125 static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state) 1126 { 1127 u8 wait_cnt = 0; 1128 u32 dig_sts; 1129 1130 do { 1131 dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, 1132 VR_MII_DIG_STS); 1133 if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >> 1134 VR_MII_DIG_STS_PSEQ_STATE_POS_) == state) 1135 break; 1136 usleep_range(1000, 2000); 1137 } while (wait_cnt++ < 10); 1138 1139 if (wait_cnt >= 10) 1140 return -ETIMEDOUT; 1141 1142 return 0; 1143 } 1144 1145 static int lan743x_sgmii_config(struct lan743x_adapter *adapter) 1146 { 1147 struct net_device *netdev = adapter->netdev; 1148 struct phy_device *phydev = netdev->phydev; 1149 enum lan743x_sgmii_lsd lsd = POWER_DOWN; 1150 int mii_ctl; 1151 bool status; 1152 int ret; 1153 1154 switch (phydev->speed) { 1155 case SPEED_2500: 1156 if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) 1157 lsd = LINK_2500_MASTER; 1158 else 1159 lsd = LINK_2500_SLAVE; 1160 break; 1161 case SPEED_1000: 1162 if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER) 1163 lsd = LINK_1000_MASTER; 1164 else 1165 lsd = LINK_1000_SLAVE; 1166 break; 1167 case SPEED_100: 1168 if (phydev->duplex) 1169 lsd = LINK_100FD; 1170 else 1171 lsd = LINK_100HD; 1172 break; 1173 case SPEED_10: 1174 if (phydev->duplex) 1175 lsd = LINK_10FD; 1176 else 1177 lsd = LINK_10HD; 1178 break; 1179 default: 1180 netif_err(adapter, drv, adapter->netdev, 1181 "Invalid speed %d\n", phydev->speed); 1182 return -EINVAL; 1183 } 1184 1185 adapter->sgmii_lsd = lsd; 1186 ret = lan743x_sgmii_aneg_update(adapter); 1187 if (ret < 0) { 1188 netif_err(adapter, drv, adapter->netdev, 1189 "error %d SGMII cfg failed\n", ret); 1190 return ret; 1191 } 1192 1193 ret = lan743x_is_sgmii_2_5G_mode(adapter, &status); 1194 if (ret < 0) { 1195 netif_err(adapter, drv, adapter->netdev, 1196 "erro %d SGMII get mode failed\n", ret); 1197 return ret; 1198 } 1199 1200 if (status) 1201 netif_dbg(adapter, drv, adapter->netdev, 1202 "SGMII 2.5G mode enable\n"); 1203 else 1204 netif_dbg(adapter, drv, adapter->netdev, 1205 "SGMII 1G mode enable\n"); 1206 1207 /* SGMII/1000/2500BASE-X PCS power down */ 1208 mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR); 1209 if (mii_ctl < 0) 1210 return mii_ctl; 1211 1212 mii_ctl |= BMCR_PDOWN; 1213 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl); 1214 if (ret < 0) 1215 return ret; 1216 1217 ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN); 1218 if (ret < 0) 1219 return ret; 1220 1221 /* SGMII/1000/2500BASE-X PCS power up */ 1222 mii_ctl &= ~BMCR_PDOWN; 1223 ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl); 1224 if (ret < 0) 1225 return ret; 1226 1227 ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP); 1228 if (ret < 0) 1229 return ret; 1230 1231 return 0; 1232 } 1233 1234 static void lan743x_mac_set_address(struct lan743x_adapter *adapter, 1235 u8 *addr) 1236 { 1237 u32 addr_lo, addr_hi; 1238 1239 addr_lo = addr[0] | 1240 addr[1] << 8 | 1241 addr[2] << 16 | 1242 addr[3] << 24; 1243 addr_hi = addr[4] | 1244 addr[5] << 8; 1245 lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); 1246 lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); 1247 1248 ether_addr_copy(adapter->mac_address, addr); 1249 netif_info(adapter, drv, adapter->netdev, 1250 "MAC address set to %pM\n", addr); 1251 } 1252 1253 static int lan743x_mac_init(struct lan743x_adapter *adapter) 1254 { 1255 bool mac_address_valid = true; 1256 struct net_device *netdev; 1257 u32 mac_addr_hi = 0; 1258 u32 mac_addr_lo = 0; 1259 u32 data; 1260 1261 netdev = adapter->netdev; 1262 1263 /* disable auto duplex, and speed detection. Phylib does that */ 1264 data = lan743x_csr_read(adapter, MAC_CR); 1265 data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_); 1266 data |= MAC_CR_CNTR_RST_; 1267 lan743x_csr_write(adapter, MAC_CR, data); 1268 1269 if (!is_valid_ether_addr(adapter->mac_address)) { 1270 mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); 1271 mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); 1272 adapter->mac_address[0] = mac_addr_lo & 0xFF; 1273 adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; 1274 adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; 1275 adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; 1276 adapter->mac_address[4] = mac_addr_hi & 0xFF; 1277 adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; 1278 1279 if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && 1280 mac_addr_lo == 0xFFFFFFFF) { 1281 mac_address_valid = false; 1282 } else if (!is_valid_ether_addr(adapter->mac_address)) { 1283 mac_address_valid = false; 1284 } 1285 1286 if (!mac_address_valid) 1287 eth_random_addr(adapter->mac_address); 1288 } 1289 lan743x_mac_set_address(adapter, adapter->mac_address); 1290 eth_hw_addr_set(netdev, adapter->mac_address); 1291 1292 return 0; 1293 } 1294 1295 static int lan743x_mac_open(struct lan743x_adapter *adapter) 1296 { 1297 u32 temp; 1298 1299 temp = lan743x_csr_read(adapter, MAC_RX); 1300 lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); 1301 temp = lan743x_csr_read(adapter, MAC_TX); 1302 lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); 1303 return 0; 1304 } 1305 1306 static void lan743x_mac_close(struct lan743x_adapter *adapter) 1307 { 1308 u32 temp; 1309 1310 temp = lan743x_csr_read(adapter, MAC_TX); 1311 temp &= ~MAC_TX_TXEN_; 1312 lan743x_csr_write(adapter, MAC_TX, temp); 1313 lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, 1314 1, 1000, 20000, 100); 1315 1316 temp = lan743x_csr_read(adapter, MAC_RX); 1317 temp &= ~MAC_RX_RXEN_; 1318 lan743x_csr_write(adapter, MAC_RX, temp); 1319 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 1320 1, 1000, 20000, 100); 1321 } 1322 1323 void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, 1324 bool tx_enable, bool rx_enable) 1325 { 1326 u32 flow_setting = 0; 1327 1328 /* set maximum pause time because when fifo space frees 1329 * up a zero value pause frame will be sent to release the pause 1330 */ 1331 flow_setting = MAC_FLOW_CR_FCPT_MASK_; 1332 if (tx_enable) 1333 flow_setting |= MAC_FLOW_CR_TX_FCEN_; 1334 if (rx_enable) 1335 flow_setting |= MAC_FLOW_CR_RX_FCEN_; 1336 lan743x_csr_write(adapter, MAC_FLOW, flow_setting); 1337 } 1338 1339 static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) 1340 { 1341 int enabled = 0; 1342 u32 mac_rx = 0; 1343 1344 mac_rx = lan743x_csr_read(adapter, MAC_RX); 1345 if (mac_rx & MAC_RX_RXEN_) { 1346 enabled = 1; 1347 if (mac_rx & MAC_RX_RXD_) { 1348 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1349 mac_rx &= ~MAC_RX_RXD_; 1350 } 1351 mac_rx &= ~MAC_RX_RXEN_; 1352 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1353 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 1354 1, 1000, 20000, 100); 1355 lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); 1356 } 1357 1358 mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); 1359 mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN) 1360 << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); 1361 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1362 1363 if (enabled) { 1364 mac_rx |= MAC_RX_RXEN_; 1365 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1366 } 1367 return 0; 1368 } 1369 1370 /* PHY */ 1371 static int lan743x_phy_reset(struct lan743x_adapter *adapter) 1372 { 1373 u32 data; 1374 1375 /* Only called with in probe, and before mdiobus_register */ 1376 1377 data = lan743x_csr_read(adapter, PMT_CTL); 1378 data |= PMT_CTL_ETH_PHY_RST_; 1379 lan743x_csr_write(adapter, PMT_CTL, data); 1380 1381 return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data, 1382 (!(data & PMT_CTL_ETH_PHY_RST_) && 1383 (data & PMT_CTL_READY_)), 1384 50000, 1000000); 1385 } 1386 1387 static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, 1388 u16 local_adv, u16 remote_adv) 1389 { 1390 struct lan743x_phy *phy = &adapter->phy; 1391 u8 cap; 1392 1393 if (phy->fc_autoneg) 1394 cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); 1395 else 1396 cap = phy->fc_request_control; 1397 1398 lan743x_mac_flow_ctrl_set_enables(adapter, 1399 cap & FLOW_CTRL_TX, 1400 cap & FLOW_CTRL_RX); 1401 } 1402 1403 static int lan743x_phy_init(struct lan743x_adapter *adapter) 1404 { 1405 return lan743x_phy_reset(adapter); 1406 } 1407 1408 static void lan743x_phy_link_status_change(struct net_device *netdev) 1409 { 1410 struct lan743x_adapter *adapter = netdev_priv(netdev); 1411 struct phy_device *phydev = netdev->phydev; 1412 u32 data; 1413 1414 phy_print_status(phydev); 1415 if (phydev->state == PHY_RUNNING) { 1416 int remote_advertisement = 0; 1417 int local_advertisement = 0; 1418 1419 data = lan743x_csr_read(adapter, MAC_CR); 1420 1421 /* set duplex mode */ 1422 if (phydev->duplex) 1423 data |= MAC_CR_DPX_; 1424 else 1425 data &= ~MAC_CR_DPX_; 1426 1427 /* set bus speed */ 1428 switch (phydev->speed) { 1429 case SPEED_10: 1430 data &= ~MAC_CR_CFG_H_; 1431 data &= ~MAC_CR_CFG_L_; 1432 break; 1433 case SPEED_100: 1434 data &= ~MAC_CR_CFG_H_; 1435 data |= MAC_CR_CFG_L_; 1436 break; 1437 case SPEED_1000: 1438 data |= MAC_CR_CFG_H_; 1439 data &= ~MAC_CR_CFG_L_; 1440 break; 1441 case SPEED_2500: 1442 data |= MAC_CR_CFG_H_; 1443 data |= MAC_CR_CFG_L_; 1444 break; 1445 } 1446 lan743x_csr_write(adapter, MAC_CR, data); 1447 1448 local_advertisement = 1449 linkmode_adv_to_mii_adv_t(phydev->advertising); 1450 remote_advertisement = 1451 linkmode_adv_to_mii_adv_t(phydev->lp_advertising); 1452 1453 lan743x_phy_update_flowcontrol(adapter, local_advertisement, 1454 remote_advertisement); 1455 lan743x_ptp_update_latency(adapter, phydev->speed); 1456 if (phydev->interface == PHY_INTERFACE_MODE_SGMII || 1457 phydev->interface == PHY_INTERFACE_MODE_1000BASEX || 1458 phydev->interface == PHY_INTERFACE_MODE_2500BASEX) 1459 lan743x_sgmii_config(adapter); 1460 } 1461 } 1462 1463 static void lan743x_phy_close(struct lan743x_adapter *adapter) 1464 { 1465 struct net_device *netdev = adapter->netdev; 1466 1467 phy_stop(netdev->phydev); 1468 phy_disconnect(netdev->phydev); 1469 netdev->phydev = NULL; 1470 } 1471 1472 static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) 1473 { 1474 u32 id_rev; 1475 u32 data; 1476 1477 data = lan743x_csr_read(adapter, MAC_CR); 1478 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; 1479 1480 if (adapter->is_pci11x1x && adapter->is_sgmii_en) 1481 adapter->phy_interface = PHY_INTERFACE_MODE_SGMII; 1482 else if (id_rev == ID_REV_ID_LAN7430_) 1483 adapter->phy_interface = PHY_INTERFACE_MODE_GMII; 1484 else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_)) 1485 adapter->phy_interface = PHY_INTERFACE_MODE_MII; 1486 else 1487 adapter->phy_interface = PHY_INTERFACE_MODE_RGMII; 1488 } 1489 1490 static int lan743x_phy_open(struct lan743x_adapter *adapter) 1491 { 1492 struct net_device *netdev = adapter->netdev; 1493 struct lan743x_phy *phy = &adapter->phy; 1494 struct fixed_phy_status fphy_status = { 1495 .link = 1, 1496 .speed = SPEED_1000, 1497 .duplex = DUPLEX_FULL, 1498 }; 1499 struct phy_device *phydev; 1500 int ret = -EIO; 1501 1502 /* try devicetree phy, or fixed link */ 1503 phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, 1504 lan743x_phy_link_status_change); 1505 1506 if (!phydev) { 1507 /* try internal phy */ 1508 phydev = phy_find_first(adapter->mdiobus); 1509 if (!phydev) { 1510 if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == 1511 ID_REV_ID_LAN7431_) { 1512 phydev = fixed_phy_register(PHY_POLL, 1513 &fphy_status, NULL); 1514 if (IS_ERR(phydev)) { 1515 netdev_err(netdev, "No PHY/fixed_PHY found\n"); 1516 return -EIO; 1517 } 1518 } else { 1519 goto return_error; 1520 } 1521 } 1522 1523 lan743x_phy_interface_select(adapter); 1524 1525 ret = phy_connect_direct(netdev, phydev, 1526 lan743x_phy_link_status_change, 1527 adapter->phy_interface); 1528 if (ret) 1529 goto return_error; 1530 } 1531 1532 /* MAC doesn't support 1000T Half */ 1533 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1534 1535 /* support both flow controls */ 1536 phy_support_asym_pause(phydev); 1537 phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); 1538 phy->fc_autoneg = phydev->autoneg; 1539 1540 phy_start(phydev); 1541 phy_start_aneg(phydev); 1542 phy_attached_info(phydev); 1543 return 0; 1544 1545 return_error: 1546 return ret; 1547 } 1548 1549 static void lan743x_rfe_open(struct lan743x_adapter *adapter) 1550 { 1551 lan743x_csr_write(adapter, RFE_RSS_CFG, 1552 RFE_RSS_CFG_UDP_IPV6_EX_ | 1553 RFE_RSS_CFG_TCP_IPV6_EX_ | 1554 RFE_RSS_CFG_IPV6_EX_ | 1555 RFE_RSS_CFG_UDP_IPV6_ | 1556 RFE_RSS_CFG_TCP_IPV6_ | 1557 RFE_RSS_CFG_IPV6_ | 1558 RFE_RSS_CFG_UDP_IPV4_ | 1559 RFE_RSS_CFG_TCP_IPV4_ | 1560 RFE_RSS_CFG_IPV4_ | 1561 RFE_RSS_CFG_VALID_HASH_BITS_ | 1562 RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | 1563 RFE_RSS_CFG_RSS_HASH_STORE_ | 1564 RFE_RSS_CFG_RSS_ENABLE_); 1565 } 1566 1567 static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) 1568 { 1569 u8 *mac_addr; 1570 u32 mac_addr_hi = 0; 1571 u32 mac_addr_lo = 0; 1572 1573 /* Add mac address to perfect Filter */ 1574 mac_addr = adapter->mac_address; 1575 mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | 1576 (((u32)(mac_addr[1])) << 8) | 1577 (((u32)(mac_addr[2])) << 16) | 1578 (((u32)(mac_addr[3])) << 24)); 1579 mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | 1580 (((u32)(mac_addr[5])) << 8)); 1581 1582 lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); 1583 lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), 1584 mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); 1585 } 1586 1587 static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) 1588 { 1589 struct net_device *netdev = adapter->netdev; 1590 u32 hash_table[DP_SEL_VHF_HASH_LEN]; 1591 u32 rfctl; 1592 u32 data; 1593 1594 rfctl = lan743x_csr_read(adapter, RFE_CTL); 1595 rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | 1596 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); 1597 rfctl |= RFE_CTL_AB_; 1598 if (netdev->flags & IFF_PROMISC) { 1599 rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; 1600 } else { 1601 if (netdev->flags & IFF_ALLMULTI) 1602 rfctl |= RFE_CTL_AM_; 1603 } 1604 1605 if (netdev->features & NETIF_F_RXCSUM) 1606 rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_; 1607 1608 memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); 1609 if (netdev_mc_count(netdev)) { 1610 struct netdev_hw_addr *ha; 1611 int i; 1612 1613 rfctl |= RFE_CTL_DA_PERFECT_; 1614 i = 1; 1615 netdev_for_each_mc_addr(ha, netdev) { 1616 /* set first 32 into Perfect Filter */ 1617 if (i < 33) { 1618 lan743x_csr_write(adapter, 1619 RFE_ADDR_FILT_HI(i), 0); 1620 data = ha->addr[3]; 1621 data = ha->addr[2] | (data << 8); 1622 data = ha->addr[1] | (data << 8); 1623 data = ha->addr[0] | (data << 8); 1624 lan743x_csr_write(adapter, 1625 RFE_ADDR_FILT_LO(i), data); 1626 data = ha->addr[5]; 1627 data = ha->addr[4] | (data << 8); 1628 data |= RFE_ADDR_FILT_HI_VALID_; 1629 lan743x_csr_write(adapter, 1630 RFE_ADDR_FILT_HI(i), data); 1631 } else { 1632 u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> 1633 23) & 0x1FF; 1634 hash_table[bitnum / 32] |= (1 << (bitnum % 32)); 1635 rfctl |= RFE_CTL_MCAST_HASH_; 1636 } 1637 i++; 1638 } 1639 } 1640 1641 lan743x_dp_write(adapter, DP_SEL_RFE_RAM, 1642 DP_SEL_VHF_VLAN_LEN, 1643 DP_SEL_VHF_HASH_LEN, hash_table); 1644 lan743x_csr_write(adapter, RFE_CTL, rfctl); 1645 } 1646 1647 static int lan743x_dmac_init(struct lan743x_adapter *adapter) 1648 { 1649 u32 data = 0; 1650 1651 lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); 1652 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, 1653 0, 1000, 20000, 100); 1654 switch (DEFAULT_DMA_DESCRIPTOR_SPACING) { 1655 case DMA_DESCRIPTOR_SPACING_16: 1656 data = DMAC_CFG_MAX_DSPACE_16_; 1657 break; 1658 case DMA_DESCRIPTOR_SPACING_32: 1659 data = DMAC_CFG_MAX_DSPACE_32_; 1660 break; 1661 case DMA_DESCRIPTOR_SPACING_64: 1662 data = DMAC_CFG_MAX_DSPACE_64_; 1663 break; 1664 case DMA_DESCRIPTOR_SPACING_128: 1665 data = DMAC_CFG_MAX_DSPACE_128_; 1666 break; 1667 default: 1668 return -EPERM; 1669 } 1670 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 1671 data |= DMAC_CFG_COAL_EN_; 1672 data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; 1673 data |= DMAC_CFG_MAX_READ_REQ_SET_(6); 1674 lan743x_csr_write(adapter, DMAC_CFG, data); 1675 data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); 1676 data |= DMAC_COAL_CFG_TIMER_TX_START_; 1677 data |= DMAC_COAL_CFG_FLUSH_INTS_; 1678 data |= DMAC_COAL_CFG_INT_EXIT_COAL_; 1679 data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; 1680 data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); 1681 data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); 1682 lan743x_csr_write(adapter, DMAC_COAL_CFG, data); 1683 data = DMAC_OBFF_TX_THRES_SET_(0x08); 1684 data |= DMAC_OBFF_RX_THRES_SET_(0x0A); 1685 lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); 1686 return 0; 1687 } 1688 1689 static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, 1690 int tx_channel) 1691 { 1692 u32 dmac_cmd = 0; 1693 1694 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1695 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1696 DMAC_CMD_START_T_(tx_channel)), 1697 (dmac_cmd & 1698 DMAC_CMD_STOP_T_(tx_channel))); 1699 } 1700 1701 static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, 1702 int tx_channel) 1703 { 1704 int timeout = 100; 1705 int result = 0; 1706 1707 while (timeout && 1708 ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == 1709 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1710 usleep_range(1000, 20000); 1711 timeout--; 1712 } 1713 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1714 result = -ENODEV; 1715 return result; 1716 } 1717 1718 static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, 1719 int rx_channel) 1720 { 1721 u32 dmac_cmd = 0; 1722 1723 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1724 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1725 DMAC_CMD_START_R_(rx_channel)), 1726 (dmac_cmd & 1727 DMAC_CMD_STOP_R_(rx_channel))); 1728 } 1729 1730 static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, 1731 int rx_channel) 1732 { 1733 int timeout = 100; 1734 int result = 0; 1735 1736 while (timeout && 1737 ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == 1738 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1739 usleep_range(1000, 20000); 1740 timeout--; 1741 } 1742 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1743 result = -ENODEV; 1744 return result; 1745 } 1746 1747 static void lan743x_tx_release_desc(struct lan743x_tx *tx, 1748 int descriptor_index, bool cleanup) 1749 { 1750 struct lan743x_tx_buffer_info *buffer_info = NULL; 1751 struct lan743x_tx_descriptor *descriptor = NULL; 1752 u32 descriptor_type = 0; 1753 bool ignore_sync; 1754 1755 descriptor = &tx->ring_cpu_ptr[descriptor_index]; 1756 buffer_info = &tx->buffer_info[descriptor_index]; 1757 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) 1758 goto done; 1759 1760 descriptor_type = le32_to_cpu(descriptor->data0) & 1761 TX_DESC_DATA0_DTYPE_MASK_; 1762 if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) 1763 goto clean_up_data_descriptor; 1764 else 1765 goto clear_active; 1766 1767 clean_up_data_descriptor: 1768 if (buffer_info->dma_ptr) { 1769 if (buffer_info->flags & 1770 TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { 1771 dma_unmap_page(&tx->adapter->pdev->dev, 1772 buffer_info->dma_ptr, 1773 buffer_info->buffer_length, 1774 DMA_TO_DEVICE); 1775 } else { 1776 dma_unmap_single(&tx->adapter->pdev->dev, 1777 buffer_info->dma_ptr, 1778 buffer_info->buffer_length, 1779 DMA_TO_DEVICE); 1780 } 1781 buffer_info->dma_ptr = 0; 1782 buffer_info->buffer_length = 0; 1783 } 1784 if (!buffer_info->skb) 1785 goto clear_active; 1786 1787 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) { 1788 dev_kfree_skb_any(buffer_info->skb); 1789 goto clear_skb; 1790 } 1791 1792 if (cleanup) { 1793 lan743x_ptp_unrequest_tx_timestamp(tx->adapter); 1794 dev_kfree_skb_any(buffer_info->skb); 1795 } else { 1796 ignore_sync = (buffer_info->flags & 1797 TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0; 1798 lan743x_ptp_tx_timestamp_skb(tx->adapter, 1799 buffer_info->skb, ignore_sync); 1800 } 1801 1802 clear_skb: 1803 buffer_info->skb = NULL; 1804 1805 clear_active: 1806 buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; 1807 1808 done: 1809 memset(buffer_info, 0, sizeof(*buffer_info)); 1810 memset(descriptor, 0, sizeof(*descriptor)); 1811 } 1812 1813 static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) 1814 { 1815 return ((++index) % tx->ring_size); 1816 } 1817 1818 static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) 1819 { 1820 while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) { 1821 lan743x_tx_release_desc(tx, tx->last_head, false); 1822 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1823 } 1824 } 1825 1826 static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) 1827 { 1828 u32 original_head = 0; 1829 1830 original_head = tx->last_head; 1831 do { 1832 lan743x_tx_release_desc(tx, tx->last_head, true); 1833 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1834 } while (tx->last_head != original_head); 1835 memset(tx->ring_cpu_ptr, 0, 1836 sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); 1837 memset(tx->buffer_info, 0, 1838 sizeof(*tx->buffer_info) * (tx->ring_size)); 1839 } 1840 1841 static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, 1842 struct sk_buff *skb) 1843 { 1844 int result = 1; /* 1 for the main skb buffer */ 1845 int nr_frags = 0; 1846 1847 if (skb_is_gso(skb)) 1848 result++; /* requires an extension descriptor */ 1849 nr_frags = skb_shinfo(skb)->nr_frags; 1850 result += nr_frags; /* 1 for each fragment buffer */ 1851 return result; 1852 } 1853 1854 static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) 1855 { 1856 int last_head = tx->last_head; 1857 int last_tail = tx->last_tail; 1858 1859 if (last_tail >= last_head) 1860 return tx->ring_size - last_tail + last_head - 1; 1861 else 1862 return last_head - last_tail - 1; 1863 } 1864 1865 void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, 1866 bool enable_timestamping, 1867 bool enable_onestep_sync) 1868 { 1869 if (enable_timestamping) 1870 tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED; 1871 else 1872 tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED; 1873 if (enable_onestep_sync) 1874 tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC; 1875 else 1876 tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC; 1877 } 1878 1879 static int lan743x_tx_frame_start(struct lan743x_tx *tx, 1880 unsigned char *first_buffer, 1881 unsigned int first_buffer_length, 1882 unsigned int frame_length, 1883 bool time_stamp, 1884 bool check_sum) 1885 { 1886 /* called only from within lan743x_tx_xmit_frame. 1887 * assuming tx->ring_lock has already been acquired. 1888 */ 1889 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1890 struct lan743x_tx_buffer_info *buffer_info = NULL; 1891 struct lan743x_adapter *adapter = tx->adapter; 1892 struct device *dev = &adapter->pdev->dev; 1893 dma_addr_t dma_ptr; 1894 1895 tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; 1896 tx->frame_first = tx->last_tail; 1897 tx->frame_tail = tx->frame_first; 1898 1899 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1900 buffer_info = &tx->buffer_info[tx->frame_tail]; 1901 dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, 1902 DMA_TO_DEVICE); 1903 if (dma_mapping_error(dev, dma_ptr)) 1904 return -ENOMEM; 1905 1906 tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); 1907 tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); 1908 tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & 1909 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); 1910 1911 buffer_info->skb = NULL; 1912 buffer_info->dma_ptr = dma_ptr; 1913 buffer_info->buffer_length = first_buffer_length; 1914 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1915 1916 tx->frame_data0 = (first_buffer_length & 1917 TX_DESC_DATA0_BUF_LENGTH_MASK_) | 1918 TX_DESC_DATA0_DTYPE_DATA_ | 1919 TX_DESC_DATA0_FS_ | 1920 TX_DESC_DATA0_FCS_; 1921 if (time_stamp) 1922 tx->frame_data0 |= TX_DESC_DATA0_TSE_; 1923 1924 if (check_sum) 1925 tx->frame_data0 |= TX_DESC_DATA0_ICE_ | 1926 TX_DESC_DATA0_IPE_ | 1927 TX_DESC_DATA0_TPE_; 1928 1929 /* data0 will be programmed in one of other frame assembler functions */ 1930 return 0; 1931 } 1932 1933 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, 1934 unsigned int frame_length, 1935 int nr_frags) 1936 { 1937 /* called only from within lan743x_tx_xmit_frame. 1938 * assuming tx->ring_lock has already been acquired. 1939 */ 1940 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1941 struct lan743x_tx_buffer_info *buffer_info = NULL; 1942 1943 /* wrap up previous descriptor */ 1944 tx->frame_data0 |= TX_DESC_DATA0_EXT_; 1945 if (nr_frags <= 0) { 1946 tx->frame_data0 |= TX_DESC_DATA0_LS_; 1947 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 1948 } 1949 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1950 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 1951 1952 /* move to next descriptor */ 1953 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1954 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1955 buffer_info = &tx->buffer_info[tx->frame_tail]; 1956 1957 /* add extension descriptor */ 1958 tx_descriptor->data1 = 0; 1959 tx_descriptor->data2 = 0; 1960 tx_descriptor->data3 = 0; 1961 1962 buffer_info->skb = NULL; 1963 buffer_info->dma_ptr = 0; 1964 buffer_info->buffer_length = 0; 1965 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1966 1967 tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | 1968 TX_DESC_DATA0_DTYPE_EXT_ | 1969 TX_DESC_DATA0_EXT_LSO_; 1970 1971 /* data0 will be programmed in one of other frame assembler functions */ 1972 } 1973 1974 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, 1975 const skb_frag_t *fragment, 1976 unsigned int frame_length) 1977 { 1978 /* called only from within lan743x_tx_xmit_frame 1979 * assuming tx->ring_lock has already been acquired 1980 */ 1981 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1982 struct lan743x_tx_buffer_info *buffer_info = NULL; 1983 struct lan743x_adapter *adapter = tx->adapter; 1984 struct device *dev = &adapter->pdev->dev; 1985 unsigned int fragment_length = 0; 1986 dma_addr_t dma_ptr; 1987 1988 fragment_length = skb_frag_size(fragment); 1989 if (!fragment_length) 1990 return 0; 1991 1992 /* wrap up previous descriptor */ 1993 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1994 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 1995 1996 /* move to next descriptor */ 1997 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1998 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1999 buffer_info = &tx->buffer_info[tx->frame_tail]; 2000 dma_ptr = skb_frag_dma_map(dev, fragment, 2001 0, fragment_length, 2002 DMA_TO_DEVICE); 2003 if (dma_mapping_error(dev, dma_ptr)) { 2004 int desc_index; 2005 2006 /* cleanup all previously setup descriptors */ 2007 desc_index = tx->frame_first; 2008 while (desc_index != tx->frame_tail) { 2009 lan743x_tx_release_desc(tx, desc_index, true); 2010 desc_index = lan743x_tx_next_index(tx, desc_index); 2011 } 2012 dma_wmb(); 2013 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 2014 tx->frame_first = 0; 2015 tx->frame_data0 = 0; 2016 tx->frame_tail = 0; 2017 return -ENOMEM; 2018 } 2019 2020 tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); 2021 tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); 2022 tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & 2023 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); 2024 2025 buffer_info->skb = NULL; 2026 buffer_info->dma_ptr = dma_ptr; 2027 buffer_info->buffer_length = fragment_length; 2028 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 2029 buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; 2030 2031 tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | 2032 TX_DESC_DATA0_DTYPE_DATA_ | 2033 TX_DESC_DATA0_FCS_; 2034 2035 /* data0 will be programmed in one of other frame assembler functions */ 2036 return 0; 2037 } 2038 2039 static void lan743x_tx_frame_end(struct lan743x_tx *tx, 2040 struct sk_buff *skb, 2041 bool time_stamp, 2042 bool ignore_sync) 2043 { 2044 /* called only from within lan743x_tx_xmit_frame 2045 * assuming tx->ring_lock has already been acquired 2046 */ 2047 struct lan743x_tx_descriptor *tx_descriptor = NULL; 2048 struct lan743x_tx_buffer_info *buffer_info = NULL; 2049 struct lan743x_adapter *adapter = tx->adapter; 2050 u32 tx_tail_flags = 0; 2051 2052 /* wrap up previous descriptor */ 2053 if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == 2054 TX_DESC_DATA0_DTYPE_DATA_) { 2055 tx->frame_data0 |= TX_DESC_DATA0_LS_; 2056 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 2057 } 2058 2059 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 2060 buffer_info = &tx->buffer_info[tx->frame_tail]; 2061 buffer_info->skb = skb; 2062 if (time_stamp) 2063 buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; 2064 if (ignore_sync) 2065 buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; 2066 2067 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 2068 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 2069 tx->last_tail = tx->frame_tail; 2070 2071 dma_wmb(); 2072 2073 if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 2074 tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; 2075 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) 2076 tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | 2077 TX_TAIL_SET_TOP_INT_EN_; 2078 2079 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 2080 tx_tail_flags | tx->frame_tail); 2081 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 2082 } 2083 2084 static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, 2085 struct sk_buff *skb) 2086 { 2087 int required_number_of_descriptors = 0; 2088 unsigned int start_frame_length = 0; 2089 netdev_tx_t retval = NETDEV_TX_OK; 2090 unsigned int frame_length = 0; 2091 unsigned int head_length = 0; 2092 unsigned long irq_flags = 0; 2093 bool do_timestamp = false; 2094 bool ignore_sync = false; 2095 struct netdev_queue *txq; 2096 int nr_frags = 0; 2097 bool gso = false; 2098 int j; 2099 2100 required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); 2101 2102 spin_lock_irqsave(&tx->ring_lock, irq_flags); 2103 if (required_number_of_descriptors > 2104 lan743x_tx_get_avail_desc(tx)) { 2105 if (required_number_of_descriptors > (tx->ring_size - 1)) { 2106 dev_kfree_skb_irq(skb); 2107 } else { 2108 /* save how many descriptors we needed to restart the queue */ 2109 tx->rqd_descriptors = required_number_of_descriptors; 2110 retval = NETDEV_TX_BUSY; 2111 txq = netdev_get_tx_queue(tx->adapter->netdev, 2112 tx->channel_number); 2113 netif_tx_stop_queue(txq); 2114 } 2115 goto unlock; 2116 } 2117 2118 /* space available, transmit skb */ 2119 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2120 (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) && 2121 (lan743x_ptp_request_tx_timestamp(tx->adapter))) { 2122 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2123 do_timestamp = true; 2124 if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC) 2125 ignore_sync = true; 2126 } 2127 head_length = skb_headlen(skb); 2128 frame_length = skb_pagelen(skb); 2129 nr_frags = skb_shinfo(skb)->nr_frags; 2130 start_frame_length = frame_length; 2131 gso = skb_is_gso(skb); 2132 if (gso) { 2133 start_frame_length = max(skb_shinfo(skb)->gso_size, 2134 (unsigned short)8); 2135 } 2136 2137 if (lan743x_tx_frame_start(tx, 2138 skb->data, head_length, 2139 start_frame_length, 2140 do_timestamp, 2141 skb->ip_summed == CHECKSUM_PARTIAL)) { 2142 dev_kfree_skb_irq(skb); 2143 goto unlock; 2144 } 2145 tx->frame_count++; 2146 2147 if (gso) 2148 lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); 2149 2150 if (nr_frags <= 0) 2151 goto finish; 2152 2153 for (j = 0; j < nr_frags; j++) { 2154 const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]); 2155 2156 if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { 2157 /* upon error no need to call 2158 * lan743x_tx_frame_end 2159 * frame assembler clean up was performed inside 2160 * lan743x_tx_frame_add_fragment 2161 */ 2162 dev_kfree_skb_irq(skb); 2163 goto unlock; 2164 } 2165 } 2166 2167 finish: 2168 lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync); 2169 2170 unlock: 2171 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 2172 return retval; 2173 } 2174 2175 static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) 2176 { 2177 struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); 2178 struct lan743x_adapter *adapter = tx->adapter; 2179 unsigned long irq_flags = 0; 2180 struct netdev_queue *txq; 2181 u32 ioc_bit = 0; 2182 2183 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 2184 lan743x_csr_read(adapter, DMAC_INT_STS); 2185 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) 2186 lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); 2187 spin_lock_irqsave(&tx->ring_lock, irq_flags); 2188 2189 /* clean up tx ring */ 2190 lan743x_tx_release_completed_descriptors(tx); 2191 txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number); 2192 if (netif_tx_queue_stopped(txq)) { 2193 if (tx->rqd_descriptors) { 2194 if (tx->rqd_descriptors <= 2195 lan743x_tx_get_avail_desc(tx)) { 2196 tx->rqd_descriptors = 0; 2197 netif_tx_wake_queue(txq); 2198 } 2199 } else { 2200 netif_tx_wake_queue(txq); 2201 } 2202 } 2203 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 2204 2205 if (!napi_complete(napi)) 2206 goto done; 2207 2208 /* enable isr */ 2209 lan743x_csr_write(adapter, INT_EN_SET, 2210 INT_BIT_DMA_TX_(tx->channel_number)); 2211 lan743x_csr_read(adapter, INT_STS); 2212 2213 done: 2214 return 0; 2215 } 2216 2217 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) 2218 { 2219 if (tx->head_cpu_ptr) { 2220 dma_free_coherent(&tx->adapter->pdev->dev, 2221 sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr, 2222 tx->head_dma_ptr); 2223 tx->head_cpu_ptr = NULL; 2224 tx->head_dma_ptr = 0; 2225 } 2226 kfree(tx->buffer_info); 2227 tx->buffer_info = NULL; 2228 2229 if (tx->ring_cpu_ptr) { 2230 dma_free_coherent(&tx->adapter->pdev->dev, 2231 tx->ring_allocation_size, tx->ring_cpu_ptr, 2232 tx->ring_dma_ptr); 2233 tx->ring_allocation_size = 0; 2234 tx->ring_cpu_ptr = NULL; 2235 tx->ring_dma_ptr = 0; 2236 } 2237 tx->ring_size = 0; 2238 } 2239 2240 static int lan743x_tx_ring_init(struct lan743x_tx *tx) 2241 { 2242 size_t ring_allocation_size = 0; 2243 void *cpu_ptr = NULL; 2244 dma_addr_t dma_ptr; 2245 int ret = -ENOMEM; 2246 2247 tx->ring_size = LAN743X_TX_RING_SIZE; 2248 if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { 2249 ret = -EINVAL; 2250 goto cleanup; 2251 } 2252 if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, 2253 DMA_BIT_MASK(64))) { 2254 dev_warn(&tx->adapter->pdev->dev, 2255 "lan743x_: No suitable DMA available\n"); 2256 ret = -ENOMEM; 2257 goto cleanup; 2258 } 2259 ring_allocation_size = ALIGN(tx->ring_size * 2260 sizeof(struct lan743x_tx_descriptor), 2261 PAGE_SIZE); 2262 dma_ptr = 0; 2263 cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, 2264 ring_allocation_size, &dma_ptr, GFP_KERNEL); 2265 if (!cpu_ptr) { 2266 ret = -ENOMEM; 2267 goto cleanup; 2268 } 2269 2270 tx->ring_allocation_size = ring_allocation_size; 2271 tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; 2272 tx->ring_dma_ptr = dma_ptr; 2273 2274 cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); 2275 if (!cpu_ptr) { 2276 ret = -ENOMEM; 2277 goto cleanup; 2278 } 2279 tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; 2280 dma_ptr = 0; 2281 cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, 2282 sizeof(*tx->head_cpu_ptr), &dma_ptr, 2283 GFP_KERNEL); 2284 if (!cpu_ptr) { 2285 ret = -ENOMEM; 2286 goto cleanup; 2287 } 2288 2289 tx->head_cpu_ptr = cpu_ptr; 2290 tx->head_dma_ptr = dma_ptr; 2291 if (tx->head_dma_ptr & 0x3) { 2292 ret = -ENOMEM; 2293 goto cleanup; 2294 } 2295 2296 return 0; 2297 2298 cleanup: 2299 lan743x_tx_ring_cleanup(tx); 2300 return ret; 2301 } 2302 2303 static void lan743x_tx_close(struct lan743x_tx *tx) 2304 { 2305 struct lan743x_adapter *adapter = tx->adapter; 2306 2307 lan743x_csr_write(adapter, 2308 DMAC_CMD, 2309 DMAC_CMD_STOP_T_(tx->channel_number)); 2310 lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); 2311 2312 lan743x_csr_write(adapter, 2313 DMAC_INT_EN_CLR, 2314 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 2315 lan743x_csr_write(adapter, INT_EN_CLR, 2316 INT_BIT_DMA_TX_(tx->channel_number)); 2317 napi_disable(&tx->napi); 2318 netif_napi_del(&tx->napi); 2319 2320 lan743x_csr_write(adapter, FCT_TX_CTL, 2321 FCT_TX_CTL_DIS_(tx->channel_number)); 2322 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 2323 FCT_TX_CTL_EN_(tx->channel_number), 2324 0, 1000, 20000, 100); 2325 2326 lan743x_tx_release_all_descriptors(tx); 2327 2328 tx->rqd_descriptors = 0; 2329 2330 lan743x_tx_ring_cleanup(tx); 2331 } 2332 2333 static int lan743x_tx_open(struct lan743x_tx *tx) 2334 { 2335 struct lan743x_adapter *adapter = NULL; 2336 u32 data = 0; 2337 int ret; 2338 2339 adapter = tx->adapter; 2340 ret = lan743x_tx_ring_init(tx); 2341 if (ret) 2342 return ret; 2343 2344 /* initialize fifo */ 2345 lan743x_csr_write(adapter, FCT_TX_CTL, 2346 FCT_TX_CTL_RESET_(tx->channel_number)); 2347 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 2348 FCT_TX_CTL_RESET_(tx->channel_number), 2349 0, 1000, 20000, 100); 2350 2351 /* enable fifo */ 2352 lan743x_csr_write(adapter, FCT_TX_CTL, 2353 FCT_TX_CTL_EN_(tx->channel_number)); 2354 2355 /* reset tx channel */ 2356 lan743x_csr_write(adapter, DMAC_CMD, 2357 DMAC_CMD_TX_SWR_(tx->channel_number)); 2358 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2359 DMAC_CMD_TX_SWR_(tx->channel_number), 2360 0, 1000, 20000, 100); 2361 2362 /* Write TX_BASE_ADDR */ 2363 lan743x_csr_write(adapter, 2364 TX_BASE_ADDRH(tx->channel_number), 2365 DMA_ADDR_HIGH32(tx->ring_dma_ptr)); 2366 lan743x_csr_write(adapter, 2367 TX_BASE_ADDRL(tx->channel_number), 2368 DMA_ADDR_LOW32(tx->ring_dma_ptr)); 2369 2370 /* Write TX_CFG_B */ 2371 data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); 2372 data &= ~TX_CFG_B_TX_RING_LEN_MASK_; 2373 data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); 2374 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 2375 data |= TX_CFG_B_TDMABL_512_; 2376 lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); 2377 2378 /* Write TX_CFG_A */ 2379 data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; 2380 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2381 data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; 2382 data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); 2383 data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); 2384 data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); 2385 } 2386 lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); 2387 2388 /* Write TX_HEAD_WRITEBACK_ADDR */ 2389 lan743x_csr_write(adapter, 2390 TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), 2391 DMA_ADDR_HIGH32(tx->head_dma_ptr)); 2392 lan743x_csr_write(adapter, 2393 TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), 2394 DMA_ADDR_LOW32(tx->head_dma_ptr)); 2395 2396 /* set last head */ 2397 tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); 2398 2399 /* write TX_TAIL */ 2400 tx->last_tail = 0; 2401 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 2402 (u32)(tx->last_tail)); 2403 tx->vector_flags = lan743x_intr_get_vector_flags(adapter, 2404 INT_BIT_DMA_TX_ 2405 (tx->channel_number)); 2406 netif_napi_add_tx_weight(adapter->netdev, 2407 &tx->napi, lan743x_tx_napi_poll, 2408 NAPI_POLL_WEIGHT); 2409 napi_enable(&tx->napi); 2410 2411 data = 0; 2412 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 2413 data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; 2414 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 2415 data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; 2416 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 2417 data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; 2418 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 2419 data |= TX_CFG_C_TX_INT_EN_R2C_; 2420 lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); 2421 2422 if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) 2423 lan743x_csr_write(adapter, INT_EN_SET, 2424 INT_BIT_DMA_TX_(tx->channel_number)); 2425 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 2426 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 2427 2428 /* start dmac channel */ 2429 lan743x_csr_write(adapter, DMAC_CMD, 2430 DMAC_CMD_START_T_(tx->channel_number)); 2431 return 0; 2432 } 2433 2434 static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) 2435 { 2436 return ((++index) % rx->ring_size); 2437 } 2438 2439 static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index) 2440 { 2441 /* update the tail once per 8 descriptors */ 2442 if ((index & 7) == 7) 2443 lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number), 2444 index); 2445 } 2446 2447 static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, 2448 gfp_t gfp) 2449 { 2450 struct net_device *netdev = rx->adapter->netdev; 2451 struct device *dev = &rx->adapter->pdev->dev; 2452 struct lan743x_rx_buffer_info *buffer_info; 2453 unsigned int buffer_length, used_length; 2454 struct lan743x_rx_descriptor *descriptor; 2455 struct sk_buff *skb; 2456 dma_addr_t dma_ptr; 2457 2458 buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING; 2459 2460 descriptor = &rx->ring_cpu_ptr[index]; 2461 buffer_info = &rx->buffer_info[index]; 2462 skb = __netdev_alloc_skb(netdev, buffer_length, gfp); 2463 if (!skb) 2464 return -ENOMEM; 2465 dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); 2466 if (dma_mapping_error(dev, dma_ptr)) { 2467 dev_kfree_skb_any(skb); 2468 return -ENOMEM; 2469 } 2470 if (buffer_info->dma_ptr) { 2471 /* sync used area of buffer only */ 2472 if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) 2473 /* frame length is valid only if LS bit is set. 2474 * it's a safe upper bound for the used area in this 2475 * buffer. 2476 */ 2477 used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_ 2478 (le32_to_cpu(descriptor->data0)), 2479 buffer_info->buffer_length); 2480 else 2481 used_length = buffer_info->buffer_length; 2482 dma_sync_single_for_cpu(dev, buffer_info->dma_ptr, 2483 used_length, 2484 DMA_FROM_DEVICE); 2485 dma_unmap_single_attrs(dev, buffer_info->dma_ptr, 2486 buffer_info->buffer_length, 2487 DMA_FROM_DEVICE, 2488 DMA_ATTR_SKIP_CPU_SYNC); 2489 } 2490 2491 buffer_info->skb = skb; 2492 buffer_info->dma_ptr = dma_ptr; 2493 buffer_info->buffer_length = buffer_length; 2494 descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); 2495 descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); 2496 descriptor->data3 = 0; 2497 descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | 2498 (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_))); 2499 lan743x_rx_update_tail(rx, index); 2500 2501 return 0; 2502 } 2503 2504 static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) 2505 { 2506 struct lan743x_rx_buffer_info *buffer_info; 2507 struct lan743x_rx_descriptor *descriptor; 2508 2509 descriptor = &rx->ring_cpu_ptr[index]; 2510 buffer_info = &rx->buffer_info[index]; 2511 2512 descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); 2513 descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); 2514 descriptor->data3 = 0; 2515 descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | 2516 ((buffer_info->buffer_length) & 2517 RX_DESC_DATA0_BUF_LENGTH_MASK_))); 2518 lan743x_rx_update_tail(rx, index); 2519 } 2520 2521 static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) 2522 { 2523 struct lan743x_rx_buffer_info *buffer_info; 2524 struct lan743x_rx_descriptor *descriptor; 2525 2526 descriptor = &rx->ring_cpu_ptr[index]; 2527 buffer_info = &rx->buffer_info[index]; 2528 2529 memset(descriptor, 0, sizeof(*descriptor)); 2530 2531 if (buffer_info->dma_ptr) { 2532 dma_unmap_single(&rx->adapter->pdev->dev, 2533 buffer_info->dma_ptr, 2534 buffer_info->buffer_length, 2535 DMA_FROM_DEVICE); 2536 buffer_info->dma_ptr = 0; 2537 } 2538 2539 if (buffer_info->skb) { 2540 dev_kfree_skb(buffer_info->skb); 2541 buffer_info->skb = NULL; 2542 } 2543 2544 memset(buffer_info, 0, sizeof(*buffer_info)); 2545 } 2546 2547 static struct sk_buff * 2548 lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length) 2549 { 2550 if (skb_linearize(skb)) { 2551 dev_kfree_skb_irq(skb); 2552 return NULL; 2553 } 2554 frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN); 2555 if (skb->len > frame_length) { 2556 skb->tail -= skb->len - frame_length; 2557 skb->len = frame_length; 2558 } 2559 return skb; 2560 } 2561 2562 static int lan743x_rx_process_buffer(struct lan743x_rx *rx) 2563 { 2564 int current_head_index = le32_to_cpu(*rx->head_cpu_ptr); 2565 struct lan743x_rx_descriptor *descriptor, *desc_ext; 2566 struct net_device *netdev = rx->adapter->netdev; 2567 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 2568 struct lan743x_rx_buffer_info *buffer_info; 2569 int frame_length, buffer_length; 2570 bool is_ice, is_tce, is_icsm; 2571 int extension_index = -1; 2572 bool is_last, is_first; 2573 struct sk_buff *skb; 2574 2575 if (current_head_index < 0 || current_head_index >= rx->ring_size) 2576 goto done; 2577 2578 if (rx->last_head < 0 || rx->last_head >= rx->ring_size) 2579 goto done; 2580 2581 if (rx->last_head == current_head_index) 2582 goto done; 2583 2584 descriptor = &rx->ring_cpu_ptr[rx->last_head]; 2585 if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_) 2586 goto done; 2587 buffer_info = &rx->buffer_info[rx->last_head]; 2588 2589 is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_; 2590 is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_; 2591 2592 if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) { 2593 /* extension is expected to follow */ 2594 int index = lan743x_rx_next_index(rx, rx->last_head); 2595 2596 if (index == current_head_index) 2597 /* extension not yet available */ 2598 goto done; 2599 desc_ext = &rx->ring_cpu_ptr[index]; 2600 if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_) 2601 /* extension not yet available */ 2602 goto done; 2603 if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_)) 2604 goto move_forward; 2605 extension_index = index; 2606 } 2607 2608 /* Only the last buffer in a multi-buffer frame contains the total frame 2609 * length. The chip occasionally sends more buffers than strictly 2610 * required to reach the total frame length. 2611 * Handle this by adding all buffers to the skb in their entirety. 2612 * Once the real frame length is known, trim the skb. 2613 */ 2614 frame_length = 2615 RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0)); 2616 buffer_length = buffer_info->buffer_length; 2617 is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_; 2618 is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_; 2619 is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_; 2620 2621 netdev_dbg(netdev, "%s%schunk: %d/%d", 2622 is_first ? "first " : " ", 2623 is_last ? "last " : " ", 2624 frame_length, buffer_length); 2625 2626 /* save existing skb, allocate new skb and map to dma */ 2627 skb = buffer_info->skb; 2628 if (lan743x_rx_init_ring_element(rx, rx->last_head, 2629 GFP_ATOMIC | GFP_DMA)) { 2630 /* failed to allocate next skb. 2631 * Memory is very low. 2632 * Drop this packet and reuse buffer. 2633 */ 2634 lan743x_rx_reuse_ring_element(rx, rx->last_head); 2635 /* drop packet that was being assembled */ 2636 dev_kfree_skb_irq(rx->skb_head); 2637 rx->skb_head = NULL; 2638 goto process_extension; 2639 } 2640 2641 /* add buffers to skb via skb->frag_list */ 2642 if (is_first) { 2643 skb_reserve(skb, RX_HEAD_PADDING); 2644 skb_put(skb, buffer_length - RX_HEAD_PADDING); 2645 if (rx->skb_head) 2646 dev_kfree_skb_irq(rx->skb_head); 2647 rx->skb_head = skb; 2648 } else if (rx->skb_head) { 2649 skb_put(skb, buffer_length); 2650 if (skb_shinfo(rx->skb_head)->frag_list) 2651 rx->skb_tail->next = skb; 2652 else 2653 skb_shinfo(rx->skb_head)->frag_list = skb; 2654 rx->skb_tail = skb; 2655 rx->skb_head->len += skb->len; 2656 rx->skb_head->data_len += skb->len; 2657 rx->skb_head->truesize += skb->truesize; 2658 } else { 2659 /* packet to assemble has already been dropped because one or 2660 * more of its buffers could not be allocated 2661 */ 2662 netdev_dbg(netdev, "drop buffer intended for dropped packet"); 2663 dev_kfree_skb_irq(skb); 2664 } 2665 2666 process_extension: 2667 if (extension_index >= 0) { 2668 u32 ts_sec; 2669 u32 ts_nsec; 2670 2671 ts_sec = le32_to_cpu(desc_ext->data1); 2672 ts_nsec = (le32_to_cpu(desc_ext->data2) & 2673 RX_DESC_DATA2_TS_NS_MASK_); 2674 if (rx->skb_head) 2675 skb_hwtstamps(rx->skb_head)->hwtstamp = 2676 ktime_set(ts_sec, ts_nsec); 2677 lan743x_rx_reuse_ring_element(rx, extension_index); 2678 rx->last_head = extension_index; 2679 netdev_dbg(netdev, "process extension"); 2680 } 2681 2682 if (is_last && rx->skb_head) 2683 rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length); 2684 2685 if (is_last && rx->skb_head) { 2686 rx->skb_head->protocol = eth_type_trans(rx->skb_head, 2687 rx->adapter->netdev); 2688 if (rx->adapter->netdev->features & NETIF_F_RXCSUM) { 2689 if (!is_ice && !is_tce && !is_icsm) 2690 skb->ip_summed = CHECKSUM_UNNECESSARY; 2691 } 2692 netdev_dbg(netdev, "sending %d byte frame to OS", 2693 rx->skb_head->len); 2694 napi_gro_receive(&rx->napi, rx->skb_head); 2695 rx->skb_head = NULL; 2696 } 2697 2698 move_forward: 2699 /* push tail and head forward */ 2700 rx->last_tail = rx->last_head; 2701 rx->last_head = lan743x_rx_next_index(rx, rx->last_head); 2702 result = RX_PROCESS_RESULT_BUFFER_RECEIVED; 2703 done: 2704 return result; 2705 } 2706 2707 static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) 2708 { 2709 struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); 2710 struct lan743x_adapter *adapter = rx->adapter; 2711 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 2712 u32 rx_tail_flags = 0; 2713 int count; 2714 2715 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { 2716 /* clear int status bit before reading packet */ 2717 lan743x_csr_write(adapter, DMAC_INT_STS, 2718 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2719 } 2720 for (count = 0; count < weight; count++) { 2721 result = lan743x_rx_process_buffer(rx); 2722 if (result == RX_PROCESS_RESULT_NOTHING_TO_DO) 2723 break; 2724 } 2725 rx->frame_count += count; 2726 if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED) 2727 return weight; 2728 2729 if (!napi_complete_done(napi, count)) 2730 return count; 2731 2732 /* re-arm interrupts, must write to rx tail on some chip variants */ 2733 if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 2734 rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; 2735 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { 2736 rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; 2737 } else { 2738 lan743x_csr_write(adapter, INT_EN_SET, 2739 INT_BIT_DMA_RX_(rx->channel_number)); 2740 } 2741 2742 if (rx_tail_flags) 2743 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2744 rx_tail_flags | rx->last_tail); 2745 2746 return count; 2747 } 2748 2749 static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) 2750 { 2751 if (rx->buffer_info && rx->ring_cpu_ptr) { 2752 int index; 2753 2754 for (index = 0; index < rx->ring_size; index++) 2755 lan743x_rx_release_ring_element(rx, index); 2756 } 2757 2758 if (rx->head_cpu_ptr) { 2759 dma_free_coherent(&rx->adapter->pdev->dev, 2760 sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr, 2761 rx->head_dma_ptr); 2762 rx->head_cpu_ptr = NULL; 2763 rx->head_dma_ptr = 0; 2764 } 2765 2766 kfree(rx->buffer_info); 2767 rx->buffer_info = NULL; 2768 2769 if (rx->ring_cpu_ptr) { 2770 dma_free_coherent(&rx->adapter->pdev->dev, 2771 rx->ring_allocation_size, rx->ring_cpu_ptr, 2772 rx->ring_dma_ptr); 2773 rx->ring_allocation_size = 0; 2774 rx->ring_cpu_ptr = NULL; 2775 rx->ring_dma_ptr = 0; 2776 } 2777 2778 rx->ring_size = 0; 2779 rx->last_head = 0; 2780 } 2781 2782 static int lan743x_rx_ring_init(struct lan743x_rx *rx) 2783 { 2784 size_t ring_allocation_size = 0; 2785 dma_addr_t dma_ptr = 0; 2786 void *cpu_ptr = NULL; 2787 int ret = -ENOMEM; 2788 int index = 0; 2789 2790 rx->ring_size = LAN743X_RX_RING_SIZE; 2791 if (rx->ring_size <= 1) { 2792 ret = -EINVAL; 2793 goto cleanup; 2794 } 2795 if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { 2796 ret = -EINVAL; 2797 goto cleanup; 2798 } 2799 if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, 2800 DMA_BIT_MASK(64))) { 2801 dev_warn(&rx->adapter->pdev->dev, 2802 "lan743x_: No suitable DMA available\n"); 2803 ret = -ENOMEM; 2804 goto cleanup; 2805 } 2806 ring_allocation_size = ALIGN(rx->ring_size * 2807 sizeof(struct lan743x_rx_descriptor), 2808 PAGE_SIZE); 2809 dma_ptr = 0; 2810 cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, 2811 ring_allocation_size, &dma_ptr, GFP_KERNEL); 2812 if (!cpu_ptr) { 2813 ret = -ENOMEM; 2814 goto cleanup; 2815 } 2816 rx->ring_allocation_size = ring_allocation_size; 2817 rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; 2818 rx->ring_dma_ptr = dma_ptr; 2819 2820 cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), 2821 GFP_KERNEL); 2822 if (!cpu_ptr) { 2823 ret = -ENOMEM; 2824 goto cleanup; 2825 } 2826 rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; 2827 dma_ptr = 0; 2828 cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, 2829 sizeof(*rx->head_cpu_ptr), &dma_ptr, 2830 GFP_KERNEL); 2831 if (!cpu_ptr) { 2832 ret = -ENOMEM; 2833 goto cleanup; 2834 } 2835 2836 rx->head_cpu_ptr = cpu_ptr; 2837 rx->head_dma_ptr = dma_ptr; 2838 if (rx->head_dma_ptr & 0x3) { 2839 ret = -ENOMEM; 2840 goto cleanup; 2841 } 2842 2843 rx->last_head = 0; 2844 for (index = 0; index < rx->ring_size; index++) { 2845 ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL); 2846 if (ret) 2847 goto cleanup; 2848 } 2849 return 0; 2850 2851 cleanup: 2852 netif_warn(rx->adapter, ifup, rx->adapter->netdev, 2853 "Error allocating memory for LAN743x\n"); 2854 2855 lan743x_rx_ring_cleanup(rx); 2856 return ret; 2857 } 2858 2859 static void lan743x_rx_close(struct lan743x_rx *rx) 2860 { 2861 struct lan743x_adapter *adapter = rx->adapter; 2862 2863 lan743x_csr_write(adapter, FCT_RX_CTL, 2864 FCT_RX_CTL_DIS_(rx->channel_number)); 2865 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2866 FCT_RX_CTL_EN_(rx->channel_number), 2867 0, 1000, 20000, 100); 2868 2869 lan743x_csr_write(adapter, DMAC_CMD, 2870 DMAC_CMD_STOP_R_(rx->channel_number)); 2871 lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); 2872 2873 lan743x_csr_write(adapter, DMAC_INT_EN_CLR, 2874 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2875 lan743x_csr_write(adapter, INT_EN_CLR, 2876 INT_BIT_DMA_RX_(rx->channel_number)); 2877 napi_disable(&rx->napi); 2878 2879 netif_napi_del(&rx->napi); 2880 2881 lan743x_rx_ring_cleanup(rx); 2882 } 2883 2884 static int lan743x_rx_open(struct lan743x_rx *rx) 2885 { 2886 struct lan743x_adapter *adapter = rx->adapter; 2887 u32 data = 0; 2888 int ret; 2889 2890 rx->frame_count = 0; 2891 ret = lan743x_rx_ring_init(rx); 2892 if (ret) 2893 goto return_error; 2894 2895 netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll); 2896 2897 lan743x_csr_write(adapter, DMAC_CMD, 2898 DMAC_CMD_RX_SWR_(rx->channel_number)); 2899 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2900 DMAC_CMD_RX_SWR_(rx->channel_number), 2901 0, 1000, 20000, 100); 2902 2903 /* set ring base address */ 2904 lan743x_csr_write(adapter, 2905 RX_BASE_ADDRH(rx->channel_number), 2906 DMA_ADDR_HIGH32(rx->ring_dma_ptr)); 2907 lan743x_csr_write(adapter, 2908 RX_BASE_ADDRL(rx->channel_number), 2909 DMA_ADDR_LOW32(rx->ring_dma_ptr)); 2910 2911 /* set rx write back address */ 2912 lan743x_csr_write(adapter, 2913 RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), 2914 DMA_ADDR_HIGH32(rx->head_dma_ptr)); 2915 lan743x_csr_write(adapter, 2916 RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), 2917 DMA_ADDR_LOW32(rx->head_dma_ptr)); 2918 data = RX_CFG_A_RX_HP_WB_EN_; 2919 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2920 data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | 2921 RX_CFG_A_RX_WB_THRES_SET_(0x7) | 2922 RX_CFG_A_RX_PF_THRES_SET_(16) | 2923 RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); 2924 } 2925 2926 /* set RX_CFG_A */ 2927 lan743x_csr_write(adapter, 2928 RX_CFG_A(rx->channel_number), data); 2929 2930 /* set RX_CFG_B */ 2931 data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); 2932 data &= ~RX_CFG_B_RX_PAD_MASK_; 2933 if (!RX_HEAD_PADDING) 2934 data |= RX_CFG_B_RX_PAD_0_; 2935 else 2936 data |= RX_CFG_B_RX_PAD_2_; 2937 data &= ~RX_CFG_B_RX_RING_LEN_MASK_; 2938 data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); 2939 data |= RX_CFG_B_TS_ALL_RX_; 2940 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 2941 data |= RX_CFG_B_RDMABL_512_; 2942 2943 lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); 2944 rx->vector_flags = lan743x_intr_get_vector_flags(adapter, 2945 INT_BIT_DMA_RX_ 2946 (rx->channel_number)); 2947 2948 /* set RX_CFG_C */ 2949 data = 0; 2950 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 2951 data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; 2952 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 2953 data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; 2954 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 2955 data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; 2956 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 2957 data |= RX_CFG_C_RX_INT_EN_R2C_; 2958 lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); 2959 2960 rx->last_tail = ((u32)(rx->ring_size - 1)); 2961 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2962 rx->last_tail); 2963 rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); 2964 if (rx->last_head) { 2965 ret = -EIO; 2966 goto napi_delete; 2967 } 2968 2969 napi_enable(&rx->napi); 2970 2971 lan743x_csr_write(adapter, INT_EN_SET, 2972 INT_BIT_DMA_RX_(rx->channel_number)); 2973 lan743x_csr_write(adapter, DMAC_INT_STS, 2974 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2975 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 2976 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2977 lan743x_csr_write(adapter, DMAC_CMD, 2978 DMAC_CMD_START_R_(rx->channel_number)); 2979 2980 /* initialize fifo */ 2981 lan743x_csr_write(adapter, FCT_RX_CTL, 2982 FCT_RX_CTL_RESET_(rx->channel_number)); 2983 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2984 FCT_RX_CTL_RESET_(rx->channel_number), 2985 0, 1000, 20000, 100); 2986 lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), 2987 FCT_FLOW_CTL_REQ_EN_ | 2988 FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | 2989 FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); 2990 2991 /* enable fifo */ 2992 lan743x_csr_write(adapter, FCT_RX_CTL, 2993 FCT_RX_CTL_EN_(rx->channel_number)); 2994 return 0; 2995 2996 napi_delete: 2997 netif_napi_del(&rx->napi); 2998 lan743x_rx_ring_cleanup(rx); 2999 3000 return_error: 3001 return ret; 3002 } 3003 3004 static int lan743x_netdev_close(struct net_device *netdev) 3005 { 3006 struct lan743x_adapter *adapter = netdev_priv(netdev); 3007 int index; 3008 3009 for (index = 0; index < adapter->used_tx_channels; index++) 3010 lan743x_tx_close(&adapter->tx[index]); 3011 3012 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) 3013 lan743x_rx_close(&adapter->rx[index]); 3014 3015 lan743x_ptp_close(adapter); 3016 3017 lan743x_phy_close(adapter); 3018 3019 lan743x_mac_close(adapter); 3020 3021 lan743x_intr_close(adapter); 3022 3023 return 0; 3024 } 3025 3026 static int lan743x_netdev_open(struct net_device *netdev) 3027 { 3028 struct lan743x_adapter *adapter = netdev_priv(netdev); 3029 int index; 3030 int ret; 3031 3032 ret = lan743x_intr_open(adapter); 3033 if (ret) 3034 goto return_error; 3035 3036 ret = lan743x_mac_open(adapter); 3037 if (ret) 3038 goto close_intr; 3039 3040 ret = lan743x_phy_open(adapter); 3041 if (ret) 3042 goto close_mac; 3043 3044 ret = lan743x_ptp_open(adapter); 3045 if (ret) 3046 goto close_phy; 3047 3048 lan743x_rfe_open(adapter); 3049 3050 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 3051 ret = lan743x_rx_open(&adapter->rx[index]); 3052 if (ret) 3053 goto close_rx; 3054 } 3055 3056 for (index = 0; index < adapter->used_tx_channels; index++) { 3057 ret = lan743x_tx_open(&adapter->tx[index]); 3058 if (ret) 3059 goto close_tx; 3060 } 3061 return 0; 3062 3063 close_tx: 3064 for (index = 0; index < adapter->used_tx_channels; index++) { 3065 if (adapter->tx[index].ring_cpu_ptr) 3066 lan743x_tx_close(&adapter->tx[index]); 3067 } 3068 3069 close_rx: 3070 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 3071 if (adapter->rx[index].ring_cpu_ptr) 3072 lan743x_rx_close(&adapter->rx[index]); 3073 } 3074 lan743x_ptp_close(adapter); 3075 3076 close_phy: 3077 lan743x_phy_close(adapter); 3078 3079 close_mac: 3080 lan743x_mac_close(adapter); 3081 3082 close_intr: 3083 lan743x_intr_close(adapter); 3084 3085 return_error: 3086 netif_warn(adapter, ifup, adapter->netdev, 3087 "Error opening LAN743x\n"); 3088 return ret; 3089 } 3090 3091 static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, 3092 struct net_device *netdev) 3093 { 3094 struct lan743x_adapter *adapter = netdev_priv(netdev); 3095 u8 ch = 0; 3096 3097 if (adapter->is_pci11x1x) 3098 ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS; 3099 3100 return lan743x_tx_xmit_frame(&adapter->tx[ch], skb); 3101 } 3102 3103 static int lan743x_netdev_ioctl(struct net_device *netdev, 3104 struct ifreq *ifr, int cmd) 3105 { 3106 if (!netif_running(netdev)) 3107 return -EINVAL; 3108 if (cmd == SIOCSHWTSTAMP) 3109 return lan743x_ptp_ioctl(netdev, ifr, cmd); 3110 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 3111 } 3112 3113 static void lan743x_netdev_set_multicast(struct net_device *netdev) 3114 { 3115 struct lan743x_adapter *adapter = netdev_priv(netdev); 3116 3117 lan743x_rfe_set_multicast(adapter); 3118 } 3119 3120 static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) 3121 { 3122 struct lan743x_adapter *adapter = netdev_priv(netdev); 3123 int ret = 0; 3124 3125 ret = lan743x_mac_set_mtu(adapter, new_mtu); 3126 if (!ret) 3127 netdev->mtu = new_mtu; 3128 return ret; 3129 } 3130 3131 static void lan743x_netdev_get_stats64(struct net_device *netdev, 3132 struct rtnl_link_stats64 *stats) 3133 { 3134 struct lan743x_adapter *adapter = netdev_priv(netdev); 3135 3136 stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); 3137 stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); 3138 stats->rx_bytes = lan743x_csr_read(adapter, 3139 STAT_RX_UNICAST_BYTE_COUNT) + 3140 lan743x_csr_read(adapter, 3141 STAT_RX_BROADCAST_BYTE_COUNT) + 3142 lan743x_csr_read(adapter, 3143 STAT_RX_MULTICAST_BYTE_COUNT); 3144 stats->tx_bytes = lan743x_csr_read(adapter, 3145 STAT_TX_UNICAST_BYTE_COUNT) + 3146 lan743x_csr_read(adapter, 3147 STAT_TX_BROADCAST_BYTE_COUNT) + 3148 lan743x_csr_read(adapter, 3149 STAT_TX_MULTICAST_BYTE_COUNT); 3150 stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + 3151 lan743x_csr_read(adapter, 3152 STAT_RX_ALIGNMENT_ERRORS) + 3153 lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + 3154 lan743x_csr_read(adapter, 3155 STAT_RX_UNDERSIZE_FRAME_ERRORS) + 3156 lan743x_csr_read(adapter, 3157 STAT_RX_OVERSIZE_FRAME_ERRORS); 3158 stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + 3159 lan743x_csr_read(adapter, 3160 STAT_TX_EXCESS_DEFERRAL_ERRORS) + 3161 lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); 3162 stats->rx_dropped = lan743x_csr_read(adapter, 3163 STAT_RX_DROPPED_FRAMES); 3164 stats->tx_dropped = lan743x_csr_read(adapter, 3165 STAT_TX_EXCESSIVE_COLLISION); 3166 stats->multicast = lan743x_csr_read(adapter, 3167 STAT_RX_MULTICAST_FRAMES) + 3168 lan743x_csr_read(adapter, 3169 STAT_TX_MULTICAST_FRAMES); 3170 stats->collisions = lan743x_csr_read(adapter, 3171 STAT_TX_SINGLE_COLLISIONS) + 3172 lan743x_csr_read(adapter, 3173 STAT_TX_MULTIPLE_COLLISIONS) + 3174 lan743x_csr_read(adapter, 3175 STAT_TX_LATE_COLLISIONS); 3176 } 3177 3178 static int lan743x_netdev_set_mac_address(struct net_device *netdev, 3179 void *addr) 3180 { 3181 struct lan743x_adapter *adapter = netdev_priv(netdev); 3182 struct sockaddr *sock_addr = addr; 3183 int ret; 3184 3185 ret = eth_prepare_mac_addr_change(netdev, sock_addr); 3186 if (ret) 3187 return ret; 3188 eth_hw_addr_set(netdev, sock_addr->sa_data); 3189 lan743x_mac_set_address(adapter, sock_addr->sa_data); 3190 lan743x_rfe_update_mac_address(adapter); 3191 return 0; 3192 } 3193 3194 static const struct net_device_ops lan743x_netdev_ops = { 3195 .ndo_open = lan743x_netdev_open, 3196 .ndo_stop = lan743x_netdev_close, 3197 .ndo_start_xmit = lan743x_netdev_xmit_frame, 3198 .ndo_eth_ioctl = lan743x_netdev_ioctl, 3199 .ndo_set_rx_mode = lan743x_netdev_set_multicast, 3200 .ndo_change_mtu = lan743x_netdev_change_mtu, 3201 .ndo_get_stats64 = lan743x_netdev_get_stats64, 3202 .ndo_set_mac_address = lan743x_netdev_set_mac_address, 3203 }; 3204 3205 static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) 3206 { 3207 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 3208 } 3209 3210 static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) 3211 { 3212 mdiobus_unregister(adapter->mdiobus); 3213 } 3214 3215 static void lan743x_full_cleanup(struct lan743x_adapter *adapter) 3216 { 3217 unregister_netdev(adapter->netdev); 3218 3219 lan743x_mdiobus_cleanup(adapter); 3220 lan743x_hardware_cleanup(adapter); 3221 lan743x_pci_cleanup(adapter); 3222 } 3223 3224 static int lan743x_hardware_init(struct lan743x_adapter *adapter, 3225 struct pci_dev *pdev) 3226 { 3227 struct lan743x_tx *tx; 3228 int index; 3229 int ret; 3230 3231 adapter->is_pci11x1x = is_pci11x1x_chip(adapter); 3232 if (adapter->is_pci11x1x) { 3233 adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS; 3234 adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS; 3235 adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT; 3236 pci11x1x_strap_get_status(adapter); 3237 spin_lock_init(&adapter->eth_syslock_spinlock); 3238 mutex_init(&adapter->sgmii_rw_lock); 3239 } else { 3240 adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; 3241 adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; 3242 adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT; 3243 } 3244 3245 adapter->intr.irq = adapter->pdev->irq; 3246 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 3247 3248 ret = lan743x_gpio_init(adapter); 3249 if (ret) 3250 return ret; 3251 3252 ret = lan743x_mac_init(adapter); 3253 if (ret) 3254 return ret; 3255 3256 ret = lan743x_phy_init(adapter); 3257 if (ret) 3258 return ret; 3259 3260 ret = lan743x_ptp_init(adapter); 3261 if (ret) 3262 return ret; 3263 3264 lan743x_rfe_update_mac_address(adapter); 3265 3266 ret = lan743x_dmac_init(adapter); 3267 if (ret) 3268 return ret; 3269 3270 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 3271 adapter->rx[index].adapter = adapter; 3272 adapter->rx[index].channel_number = index; 3273 } 3274 3275 for (index = 0; index < adapter->used_tx_channels; index++) { 3276 tx = &adapter->tx[index]; 3277 tx->adapter = adapter; 3278 tx->channel_number = index; 3279 spin_lock_init(&tx->ring_lock); 3280 } 3281 3282 return 0; 3283 } 3284 3285 static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) 3286 { 3287 u32 sgmii_ctl; 3288 int ret; 3289 3290 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 3291 if (!(adapter->mdiobus)) { 3292 ret = -ENOMEM; 3293 goto return_error; 3294 } 3295 3296 adapter->mdiobus->priv = (void *)adapter; 3297 if (adapter->is_pci11x1x) { 3298 if (adapter->is_sgmii_en) { 3299 sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); 3300 sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; 3301 sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; 3302 lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); 3303 netif_dbg(adapter, drv, adapter->netdev, 3304 "SGMII operation\n"); 3305 adapter->mdiobus->read = lan743x_mdiobus_read_c22; 3306 adapter->mdiobus->write = lan743x_mdiobus_write_c22; 3307 adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45; 3308 adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45; 3309 adapter->mdiobus->name = "lan743x-mdiobus-c45"; 3310 netif_dbg(adapter, drv, adapter->netdev, 3311 "lan743x-mdiobus-c45\n"); 3312 } else { 3313 sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); 3314 sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; 3315 sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; 3316 lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); 3317 netif_dbg(adapter, drv, adapter->netdev, 3318 "RGMII operation\n"); 3319 // Only C22 support when RGMII I/F 3320 adapter->mdiobus->read = lan743x_mdiobus_read_c22; 3321 adapter->mdiobus->write = lan743x_mdiobus_write_c22; 3322 adapter->mdiobus->name = "lan743x-mdiobus"; 3323 netif_dbg(adapter, drv, adapter->netdev, 3324 "lan743x-mdiobus\n"); 3325 } 3326 } else { 3327 adapter->mdiobus->read = lan743x_mdiobus_read_c22; 3328 adapter->mdiobus->write = lan743x_mdiobus_write_c22; 3329 adapter->mdiobus->name = "lan743x-mdiobus"; 3330 netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); 3331 } 3332 3333 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, 3334 "pci-%s", pci_name(adapter->pdev)); 3335 3336 if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_) 3337 /* LAN7430 uses internal phy at address 1 */ 3338 adapter->mdiobus->phy_mask = ~(u32)BIT(1); 3339 3340 /* register mdiobus */ 3341 ret = mdiobus_register(adapter->mdiobus); 3342 if (ret < 0) 3343 goto return_error; 3344 return 0; 3345 3346 return_error: 3347 return ret; 3348 } 3349 3350 /* lan743x_pcidev_probe - Device Initialization Routine 3351 * @pdev: PCI device information struct 3352 * @id: entry in lan743x_pci_tbl 3353 * 3354 * Returns 0 on success, negative on failure 3355 * 3356 * initializes an adapter identified by a pci_dev structure. 3357 * The OS initialization, configuring of the adapter private structure, 3358 * and a hardware reset occur. 3359 **/ 3360 static int lan743x_pcidev_probe(struct pci_dev *pdev, 3361 const struct pci_device_id *id) 3362 { 3363 struct lan743x_adapter *adapter = NULL; 3364 struct net_device *netdev = NULL; 3365 int ret = -ENODEV; 3366 3367 if (id->device == PCI_DEVICE_ID_SMSC_A011 || 3368 id->device == PCI_DEVICE_ID_SMSC_A041) { 3369 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 3370 sizeof(struct lan743x_adapter), 3371 PCI11X1X_USED_TX_CHANNELS, 3372 LAN743X_USED_RX_CHANNELS); 3373 } else { 3374 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 3375 sizeof(struct lan743x_adapter), 3376 LAN743X_USED_TX_CHANNELS, 3377 LAN743X_USED_RX_CHANNELS); 3378 } 3379 3380 if (!netdev) 3381 goto return_error; 3382 3383 SET_NETDEV_DEV(netdev, &pdev->dev); 3384 pci_set_drvdata(pdev, netdev); 3385 adapter = netdev_priv(netdev); 3386 adapter->netdev = netdev; 3387 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 3388 NETIF_MSG_LINK | NETIF_MSG_IFUP | 3389 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 3390 netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; 3391 3392 of_get_mac_address(pdev->dev.of_node, adapter->mac_address); 3393 3394 ret = lan743x_pci_init(adapter, pdev); 3395 if (ret) 3396 goto return_error; 3397 3398 ret = lan743x_csr_init(adapter); 3399 if (ret) 3400 goto cleanup_pci; 3401 3402 ret = lan743x_hardware_init(adapter, pdev); 3403 if (ret) 3404 goto cleanup_pci; 3405 3406 ret = lan743x_mdiobus_init(adapter); 3407 if (ret) 3408 goto cleanup_hardware; 3409 3410 adapter->netdev->netdev_ops = &lan743x_netdev_ops; 3411 adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; 3412 adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | 3413 NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 3414 adapter->netdev->hw_features = adapter->netdev->features; 3415 3416 /* carrier off reporting is important to ethtool even BEFORE open */ 3417 netif_carrier_off(netdev); 3418 3419 ret = register_netdev(adapter->netdev); 3420 if (ret < 0) 3421 goto cleanup_mdiobus; 3422 return 0; 3423 3424 cleanup_mdiobus: 3425 lan743x_mdiobus_cleanup(adapter); 3426 3427 cleanup_hardware: 3428 lan743x_hardware_cleanup(adapter); 3429 3430 cleanup_pci: 3431 lan743x_pci_cleanup(adapter); 3432 3433 return_error: 3434 pr_warn("Initialization failed\n"); 3435 return ret; 3436 } 3437 3438 /** 3439 * lan743x_pcidev_remove - Device Removal Routine 3440 * @pdev: PCI device information struct 3441 * 3442 * this is called by the PCI subsystem to alert the driver 3443 * that it should release a PCI device. This could be caused by a 3444 * Hot-Plug event, or because the driver is going to be removed from 3445 * memory. 3446 **/ 3447 static void lan743x_pcidev_remove(struct pci_dev *pdev) 3448 { 3449 struct net_device *netdev = pci_get_drvdata(pdev); 3450 struct lan743x_adapter *adapter = netdev_priv(netdev); 3451 3452 lan743x_full_cleanup(adapter); 3453 } 3454 3455 static void lan743x_pcidev_shutdown(struct pci_dev *pdev) 3456 { 3457 struct net_device *netdev = pci_get_drvdata(pdev); 3458 struct lan743x_adapter *adapter = netdev_priv(netdev); 3459 3460 rtnl_lock(); 3461 netif_device_detach(netdev); 3462 3463 /* close netdev when netdev is at running state. 3464 * For instance, it is true when system goes to sleep by pm-suspend 3465 * However, it is false when system goes to sleep by suspend GUI menu 3466 */ 3467 if (netif_running(netdev)) 3468 lan743x_netdev_close(netdev); 3469 rtnl_unlock(); 3470 3471 #ifdef CONFIG_PM 3472 pci_save_state(pdev); 3473 #endif 3474 3475 /* clean up lan743x portion */ 3476 lan743x_hardware_cleanup(adapter); 3477 } 3478 3479 #ifdef CONFIG_PM_SLEEP 3480 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 3481 { 3482 return bitrev16(crc16(0xFFFF, buf, len)); 3483 } 3484 3485 static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) 3486 { 3487 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; 3488 const u8 ipv6_multicast[3] = { 0x33, 0x33 }; 3489 const u8 arp_type[2] = { 0x08, 0x06 }; 3490 int mask_index; 3491 u32 sopass; 3492 u32 pmtctl; 3493 u32 wucsr; 3494 u32 macrx; 3495 u16 crc; 3496 3497 for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) 3498 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); 3499 3500 /* clear wake settings */ 3501 pmtctl = lan743x_csr_read(adapter, PMT_CTL); 3502 pmtctl |= PMT_CTL_WUPS_MASK_; 3503 pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | 3504 PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | 3505 PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); 3506 3507 macrx = lan743x_csr_read(adapter, MAC_RX); 3508 3509 wucsr = 0; 3510 mask_index = 0; 3511 3512 pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; 3513 3514 if (adapter->wolopts & WAKE_PHY) { 3515 pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; 3516 pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; 3517 } 3518 if (adapter->wolopts & WAKE_MAGIC) { 3519 wucsr |= MAC_WUCSR_MPEN_; 3520 macrx |= MAC_RX_RXEN_; 3521 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3522 } 3523 if (adapter->wolopts & WAKE_UCAST) { 3524 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; 3525 macrx |= MAC_RX_RXEN_; 3526 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3527 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3528 } 3529 if (adapter->wolopts & WAKE_BCAST) { 3530 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; 3531 macrx |= MAC_RX_RXEN_; 3532 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3533 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3534 } 3535 if (adapter->wolopts & WAKE_MCAST) { 3536 /* IPv4 multicast */ 3537 crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); 3538 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3539 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 3540 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3541 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3542 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); 3543 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3544 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3545 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3546 mask_index++; 3547 3548 /* IPv6 multicast */ 3549 crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); 3550 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3551 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 3552 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3553 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3554 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); 3555 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3556 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3557 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3558 mask_index++; 3559 3560 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 3561 macrx |= MAC_RX_RXEN_; 3562 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3563 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3564 } 3565 if (adapter->wolopts & WAKE_ARP) { 3566 /* set MAC_WUF_CFG & WUF_MASK 3567 * for packettype (offset 12,13) = ARP (0x0806) 3568 */ 3569 crc = lan743x_pm_wakeframe_crc16(arp_type, 2); 3570 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3571 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | 3572 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3573 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3574 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); 3575 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3576 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3577 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3578 mask_index++; 3579 3580 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 3581 macrx |= MAC_RX_RXEN_; 3582 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3583 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3584 } 3585 3586 if (adapter->wolopts & WAKE_MAGICSECURE) { 3587 sopass = *(u32 *)adapter->sopass; 3588 lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass); 3589 sopass = *(u16 *)&adapter->sopass[4]; 3590 lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass); 3591 wucsr |= MAC_MP_SO_EN_; 3592 } 3593 3594 lan743x_csr_write(adapter, MAC_WUCSR, wucsr); 3595 lan743x_csr_write(adapter, PMT_CTL, pmtctl); 3596 lan743x_csr_write(adapter, MAC_RX, macrx); 3597 } 3598 3599 static int lan743x_pm_suspend(struct device *dev) 3600 { 3601 struct pci_dev *pdev = to_pci_dev(dev); 3602 struct net_device *netdev = pci_get_drvdata(pdev); 3603 struct lan743x_adapter *adapter = netdev_priv(netdev); 3604 u32 data; 3605 3606 lan743x_pcidev_shutdown(pdev); 3607 3608 /* clear all wakes */ 3609 lan743x_csr_write(adapter, MAC_WUCSR, 0); 3610 lan743x_csr_write(adapter, MAC_WUCSR2, 0); 3611 lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); 3612 3613 if (adapter->wolopts) 3614 lan743x_pm_set_wol(adapter); 3615 3616 if (adapter->is_pci11x1x) { 3617 /* Save HW_CFG to config again in PM resume */ 3618 data = lan743x_csr_read(adapter, HW_CFG); 3619 adapter->hw_cfg = data; 3620 data |= (HW_CFG_RST_PROTECT_PCIE_ | 3621 HW_CFG_D3_RESET_DIS_ | 3622 HW_CFG_D3_VAUX_OVR_ | 3623 HW_CFG_HOT_RESET_DIS_ | 3624 HW_CFG_RST_PROTECT_); 3625 lan743x_csr_write(adapter, HW_CFG, data); 3626 } 3627 3628 /* Host sets PME_En, put D3hot */ 3629 return pci_prepare_to_sleep(pdev); 3630 } 3631 3632 static int lan743x_pm_resume(struct device *dev) 3633 { 3634 struct pci_dev *pdev = to_pci_dev(dev); 3635 struct net_device *netdev = pci_get_drvdata(pdev); 3636 struct lan743x_adapter *adapter = netdev_priv(netdev); 3637 int ret; 3638 3639 pci_set_power_state(pdev, PCI_D0); 3640 pci_restore_state(pdev); 3641 pci_save_state(pdev); 3642 3643 /* Restore HW_CFG that was saved during pm suspend */ 3644 if (adapter->is_pci11x1x) 3645 lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg); 3646 3647 ret = lan743x_hardware_init(adapter, pdev); 3648 if (ret) { 3649 netif_err(adapter, probe, adapter->netdev, 3650 "lan743x_hardware_init returned %d\n", ret); 3651 lan743x_pci_cleanup(adapter); 3652 return ret; 3653 } 3654 3655 /* open netdev when netdev is at running state while resume. 3656 * For instance, it is true when system wakesup after pm-suspend 3657 * However, it is false when system wakes up after suspend GUI menu 3658 */ 3659 if (netif_running(netdev)) 3660 lan743x_netdev_open(netdev); 3661 3662 netif_device_attach(netdev); 3663 ret = lan743x_csr_read(adapter, MAC_WK_SRC); 3664 netif_info(adapter, drv, adapter->netdev, 3665 "Wakeup source : 0x%08X\n", ret); 3666 3667 return 0; 3668 } 3669 3670 static const struct dev_pm_ops lan743x_pm_ops = { 3671 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 3672 }; 3673 #endif /* CONFIG_PM_SLEEP */ 3674 3675 static const struct pci_device_id lan743x_pcidev_tbl[] = { 3676 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3677 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, 3678 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) }, 3679 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) }, 3680 { 0, } 3681 }; 3682 3683 MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl); 3684 3685 static struct pci_driver lan743x_pcidev_driver = { 3686 .name = DRIVER_NAME, 3687 .id_table = lan743x_pcidev_tbl, 3688 .probe = lan743x_pcidev_probe, 3689 .remove = lan743x_pcidev_remove, 3690 #ifdef CONFIG_PM_SLEEP 3691 .driver.pm = &lan743x_pm_ops, 3692 #endif 3693 .shutdown = lan743x_pcidev_shutdown, 3694 }; 3695 3696 module_pci_driver(lan743x_pcidev_driver); 3697 3698 MODULE_AUTHOR(DRIVER_AUTHOR); 3699 MODULE_DESCRIPTION(DRIVER_DESC); 3700 MODULE_LICENSE("GPL"); 3701