1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (C) 2018 Microchip Technology Inc. */ 3 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/crc32.h> 9 #include <linux/microchipphy.h> 10 #include <linux/net_tstamp.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 #include <linux/phy.h> 14 #include <linux/phy_fixed.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/iopoll.h> 17 #include <linux/crc16.h> 18 #include "lan743x_main.h" 19 #include "lan743x_ethtool.h" 20 21 #define MMD_ACCESS_ADDRESS 0 22 #define MMD_ACCESS_WRITE 1 23 #define MMD_ACCESS_READ 2 24 #define MMD_ACCESS_READ_INC 3 25 26 static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter) 27 { 28 u32 chip_rev; 29 u32 strap; 30 31 strap = lan743x_csr_read(adapter, STRAP_READ); 32 if (strap & STRAP_READ_USE_SGMII_EN_) { 33 if (strap & STRAP_READ_SGMII_EN_) 34 adapter->is_sgmii_en = true; 35 else 36 adapter->is_sgmii_en = false; 37 netif_dbg(adapter, drv, adapter->netdev, 38 "STRAP_READ: 0x%08X\n", strap); 39 } else { 40 chip_rev = lan743x_csr_read(adapter, FPGA_REV); 41 if (chip_rev) { 42 if (chip_rev & FPGA_SGMII_OP) 43 adapter->is_sgmii_en = true; 44 else 45 adapter->is_sgmii_en = false; 46 netif_dbg(adapter, drv, adapter->netdev, 47 "FPGA_REV: 0x%08X\n", chip_rev); 48 } else { 49 adapter->is_sgmii_en = false; 50 } 51 } 52 } 53 54 static bool is_pci11x1x_chip(struct lan743x_adapter *adapter) 55 { 56 struct lan743x_csr *csr = &adapter->csr; 57 u32 id_rev = csr->id_rev; 58 59 if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) || 60 ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) { 61 return true; 62 } 63 return false; 64 } 65 66 static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) 67 { 68 pci_release_selected_regions(adapter->pdev, 69 pci_select_bars(adapter->pdev, 70 IORESOURCE_MEM)); 71 pci_disable_device(adapter->pdev); 72 } 73 74 static int lan743x_pci_init(struct lan743x_adapter *adapter, 75 struct pci_dev *pdev) 76 { 77 unsigned long bars = 0; 78 int ret; 79 80 adapter->pdev = pdev; 81 ret = pci_enable_device_mem(pdev); 82 if (ret) 83 goto return_error; 84 85 netif_info(adapter, probe, adapter->netdev, 86 "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", 87 pdev->vendor, pdev->device); 88 bars = pci_select_bars(pdev, IORESOURCE_MEM); 89 if (!test_bit(0, &bars)) 90 goto disable_device; 91 92 ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); 93 if (ret) 94 goto disable_device; 95 96 pci_set_master(pdev); 97 return 0; 98 99 disable_device: 100 pci_disable_device(adapter->pdev); 101 102 return_error: 103 return ret; 104 } 105 106 u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) 107 { 108 return ioread32(&adapter->csr.csr_address[offset]); 109 } 110 111 void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, 112 u32 data) 113 { 114 iowrite32(data, &adapter->csr.csr_address[offset]); 115 } 116 117 #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) 118 119 static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) 120 { 121 u32 data; 122 123 data = lan743x_csr_read(adapter, HW_CFG); 124 data |= HW_CFG_LRST_; 125 lan743x_csr_write(adapter, HW_CFG, data); 126 127 return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data, 128 !(data & HW_CFG_LRST_), 100000, 10000000); 129 } 130 131 static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, 132 int offset, u32 bit_mask, 133 int target_value, int usleep_min, 134 int usleep_max, int count) 135 { 136 u32 data; 137 138 return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data, 139 target_value == ((data & bit_mask) ? 1 : 0), 140 usleep_max, usleep_min * count); 141 } 142 143 static int lan743x_csr_init(struct lan743x_adapter *adapter) 144 { 145 struct lan743x_csr *csr = &adapter->csr; 146 resource_size_t bar_start, bar_length; 147 int result; 148 149 bar_start = pci_resource_start(adapter->pdev, 0); 150 bar_length = pci_resource_len(adapter->pdev, 0); 151 csr->csr_address = devm_ioremap(&adapter->pdev->dev, 152 bar_start, bar_length); 153 if (!csr->csr_address) { 154 result = -ENOMEM; 155 goto clean_up; 156 } 157 158 csr->id_rev = lan743x_csr_read(adapter, ID_REV); 159 csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); 160 netif_info(adapter, probe, adapter->netdev, 161 "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", 162 csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev), 163 FPGA_REV_GET_MINOR_(csr->fpga_rev)); 164 if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) { 165 result = -ENODEV; 166 goto clean_up; 167 } 168 169 csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 170 switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { 171 case ID_REV_CHIP_REV_A0_: 172 csr->flags |= LAN743X_CSR_FLAG_IS_A0; 173 csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 174 break; 175 case ID_REV_CHIP_REV_B0_: 176 csr->flags |= LAN743X_CSR_FLAG_IS_B0; 177 break; 178 } 179 180 result = lan743x_csr_light_reset(adapter); 181 if (result) 182 goto clean_up; 183 return 0; 184 clean_up: 185 return result; 186 } 187 188 static void lan743x_intr_software_isr(struct lan743x_adapter *adapter) 189 { 190 struct lan743x_intr *intr = &adapter->intr; 191 192 /* disable the interrupt to prevent repeated re-triggering */ 193 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 194 intr->software_isr_flag = true; 195 wake_up(&intr->software_isr_wq); 196 } 197 198 static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) 199 { 200 struct lan743x_tx *tx = context; 201 struct lan743x_adapter *adapter = tx->adapter; 202 bool enable_flag = true; 203 204 lan743x_csr_read(adapter, INT_EN_SET); 205 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 206 lan743x_csr_write(adapter, INT_EN_CLR, 207 INT_BIT_DMA_TX_(tx->channel_number)); 208 } 209 210 if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { 211 u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 212 u32 dmac_int_sts; 213 u32 dmac_int_en; 214 215 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 216 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 217 else 218 dmac_int_sts = ioc_bit; 219 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 220 dmac_int_en = lan743x_csr_read(adapter, 221 DMAC_INT_EN_SET); 222 else 223 dmac_int_en = ioc_bit; 224 225 dmac_int_en &= ioc_bit; 226 dmac_int_sts &= dmac_int_en; 227 if (dmac_int_sts & ioc_bit) { 228 napi_schedule(&tx->napi); 229 enable_flag = false;/* poll func will enable later */ 230 } 231 } 232 233 if (enable_flag) 234 /* enable isr */ 235 lan743x_csr_write(adapter, INT_EN_SET, 236 INT_BIT_DMA_TX_(tx->channel_number)); 237 } 238 239 static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) 240 { 241 struct lan743x_rx *rx = context; 242 struct lan743x_adapter *adapter = rx->adapter; 243 bool enable_flag = true; 244 245 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 246 lan743x_csr_write(adapter, INT_EN_CLR, 247 INT_BIT_DMA_RX_(rx->channel_number)); 248 } 249 250 if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { 251 u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); 252 u32 dmac_int_sts; 253 u32 dmac_int_en; 254 255 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 256 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 257 else 258 dmac_int_sts = rx_frame_bit; 259 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 260 dmac_int_en = lan743x_csr_read(adapter, 261 DMAC_INT_EN_SET); 262 else 263 dmac_int_en = rx_frame_bit; 264 265 dmac_int_en &= rx_frame_bit; 266 dmac_int_sts &= dmac_int_en; 267 if (dmac_int_sts & rx_frame_bit) { 268 napi_schedule(&rx->napi); 269 enable_flag = false;/* poll funct will enable later */ 270 } 271 } 272 273 if (enable_flag) { 274 /* enable isr */ 275 lan743x_csr_write(adapter, INT_EN_SET, 276 INT_BIT_DMA_RX_(rx->channel_number)); 277 } 278 } 279 280 static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) 281 { 282 struct lan743x_adapter *adapter = context; 283 unsigned int channel; 284 285 if (int_sts & INT_BIT_ALL_RX_) { 286 for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; 287 channel++) { 288 u32 int_bit = INT_BIT_DMA_RX_(channel); 289 290 if (int_sts & int_bit) { 291 lan743x_rx_isr(&adapter->rx[channel], 292 int_bit, flags); 293 int_sts &= ~int_bit; 294 } 295 } 296 } 297 if (int_sts & INT_BIT_ALL_TX_) { 298 for (channel = 0; channel < adapter->used_tx_channels; 299 channel++) { 300 u32 int_bit = INT_BIT_DMA_TX_(channel); 301 302 if (int_sts & int_bit) { 303 lan743x_tx_isr(&adapter->tx[channel], 304 int_bit, flags); 305 int_sts &= ~int_bit; 306 } 307 } 308 } 309 if (int_sts & INT_BIT_ALL_OTHER_) { 310 if (int_sts & INT_BIT_SW_GP_) { 311 lan743x_intr_software_isr(adapter); 312 int_sts &= ~INT_BIT_SW_GP_; 313 } 314 if (int_sts & INT_BIT_1588_) { 315 lan743x_ptp_isr(adapter); 316 int_sts &= ~INT_BIT_1588_; 317 } 318 } 319 if (int_sts) 320 lan743x_csr_write(adapter, INT_EN_CLR, int_sts); 321 } 322 323 static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) 324 { 325 struct lan743x_vector *vector = ptr; 326 struct lan743x_adapter *adapter = vector->adapter; 327 irqreturn_t result = IRQ_NONE; 328 u32 int_enables; 329 u32 int_sts; 330 331 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { 332 int_sts = lan743x_csr_read(adapter, INT_STS); 333 } else if (vector->flags & 334 (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | 335 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { 336 int_sts = lan743x_csr_read(adapter, INT_STS_R2C); 337 } else { 338 /* use mask as implied status */ 339 int_sts = vector->int_mask | INT_BIT_MAS_; 340 } 341 342 if (!(int_sts & INT_BIT_MAS_)) 343 goto irq_done; 344 345 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) 346 /* disable vector interrupt */ 347 lan743x_csr_write(adapter, 348 INT_VEC_EN_CLR, 349 INT_VEC_EN_(vector->vector_index)); 350 351 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) 352 /* disable master interrupt */ 353 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 354 355 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { 356 int_enables = lan743x_csr_read(adapter, INT_EN_SET); 357 } else { 358 /* use vector mask as implied enable mask */ 359 int_enables = vector->int_mask; 360 } 361 362 int_sts &= int_enables; 363 int_sts &= vector->int_mask; 364 if (int_sts) { 365 if (vector->handler) { 366 vector->handler(vector->context, 367 int_sts, vector->flags); 368 } else { 369 /* disable interrupts on this vector */ 370 lan743x_csr_write(adapter, INT_EN_CLR, 371 vector->int_mask); 372 } 373 result = IRQ_HANDLED; 374 } 375 376 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) 377 /* enable master interrupt */ 378 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 379 380 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) 381 /* enable vector interrupt */ 382 lan743x_csr_write(adapter, 383 INT_VEC_EN_SET, 384 INT_VEC_EN_(vector->vector_index)); 385 irq_done: 386 return result; 387 } 388 389 static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) 390 { 391 struct lan743x_intr *intr = &adapter->intr; 392 int ret; 393 394 intr->software_isr_flag = false; 395 396 /* enable and activate test interrupt */ 397 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); 398 lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); 399 400 ret = wait_event_timeout(intr->software_isr_wq, 401 intr->software_isr_flag, 402 msecs_to_jiffies(200)); 403 404 /* disable test interrupt */ 405 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 406 407 return ret > 0 ? 0 : -ENODEV; 408 } 409 410 static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, 411 int vector_index, u32 flags, 412 u32 int_mask, 413 lan743x_vector_handler handler, 414 void *context) 415 { 416 struct lan743x_vector *vector = &adapter->intr.vector_list 417 [vector_index]; 418 int ret; 419 420 vector->adapter = adapter; 421 vector->flags = flags; 422 vector->vector_index = vector_index; 423 vector->int_mask = int_mask; 424 vector->handler = handler; 425 vector->context = context; 426 427 ret = request_irq(vector->irq, 428 lan743x_intr_entry_isr, 429 (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? 430 IRQF_SHARED : 0, DRIVER_NAME, vector); 431 if (ret) { 432 vector->handler = NULL; 433 vector->context = NULL; 434 vector->int_mask = 0; 435 vector->flags = 0; 436 } 437 return ret; 438 } 439 440 static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, 441 int vector_index) 442 { 443 struct lan743x_vector *vector = &adapter->intr.vector_list 444 [vector_index]; 445 446 free_irq(vector->irq, vector); 447 vector->handler = NULL; 448 vector->context = NULL; 449 vector->int_mask = 0; 450 vector->flags = 0; 451 } 452 453 static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, 454 u32 int_mask) 455 { 456 int index; 457 458 for (index = 0; index < adapter->max_vector_count; index++) { 459 if (adapter->intr.vector_list[index].int_mask & int_mask) 460 return adapter->intr.vector_list[index].flags; 461 } 462 return 0; 463 } 464 465 static void lan743x_intr_close(struct lan743x_adapter *adapter) 466 { 467 struct lan743x_intr *intr = &adapter->intr; 468 int index = 0; 469 470 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 471 if (adapter->is_pci11x1x) 472 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF); 473 else 474 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); 475 476 for (index = 0; index < intr->number_of_vectors; index++) { 477 if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { 478 lan743x_intr_unregister_isr(adapter, index); 479 intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); 480 } 481 } 482 483 if (intr->flags & INTR_FLAG_MSI_ENABLED) { 484 pci_disable_msi(adapter->pdev); 485 intr->flags &= ~INTR_FLAG_MSI_ENABLED; 486 } 487 488 if (intr->flags & INTR_FLAG_MSIX_ENABLED) { 489 pci_disable_msix(adapter->pdev); 490 intr->flags &= ~INTR_FLAG_MSIX_ENABLED; 491 } 492 } 493 494 static int lan743x_intr_open(struct lan743x_adapter *adapter) 495 { 496 struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT]; 497 struct lan743x_intr *intr = &adapter->intr; 498 unsigned int used_tx_channels; 499 u32 int_vec_en_auto_clr = 0; 500 u8 max_vector_count; 501 u32 int_vec_map0 = 0; 502 u32 int_vec_map1 = 0; 503 int ret = -ENODEV; 504 int index = 0; 505 u32 flags = 0; 506 507 intr->number_of_vectors = 0; 508 509 /* Try to set up MSIX interrupts */ 510 max_vector_count = adapter->max_vector_count; 511 memset(&msix_entries[0], 0, 512 sizeof(struct msix_entry) * max_vector_count); 513 for (index = 0; index < max_vector_count; index++) 514 msix_entries[index].entry = index; 515 used_tx_channels = adapter->used_tx_channels; 516 ret = pci_enable_msix_range(adapter->pdev, 517 msix_entries, 1, 518 1 + used_tx_channels + 519 LAN743X_USED_RX_CHANNELS); 520 521 if (ret > 0) { 522 intr->flags |= INTR_FLAG_MSIX_ENABLED; 523 intr->number_of_vectors = ret; 524 intr->using_vectors = true; 525 for (index = 0; index < intr->number_of_vectors; index++) 526 intr->vector_list[index].irq = msix_entries 527 [index].vector; 528 netif_info(adapter, ifup, adapter->netdev, 529 "using MSIX interrupts, number of vectors = %d\n", 530 intr->number_of_vectors); 531 } 532 533 /* If MSIX failed try to setup using MSI interrupts */ 534 if (!intr->number_of_vectors) { 535 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 536 if (!pci_enable_msi(adapter->pdev)) { 537 intr->flags |= INTR_FLAG_MSI_ENABLED; 538 intr->number_of_vectors = 1; 539 intr->using_vectors = true; 540 intr->vector_list[0].irq = 541 adapter->pdev->irq; 542 netif_info(adapter, ifup, adapter->netdev, 543 "using MSI interrupts, number of vectors = %d\n", 544 intr->number_of_vectors); 545 } 546 } 547 } 548 549 /* If MSIX, and MSI failed, setup using legacy interrupt */ 550 if (!intr->number_of_vectors) { 551 intr->number_of_vectors = 1; 552 intr->using_vectors = false; 553 intr->vector_list[0].irq = intr->irq; 554 netif_info(adapter, ifup, adapter->netdev, 555 "using legacy interrupts\n"); 556 } 557 558 /* At this point we must have at least one irq */ 559 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); 560 561 /* map all interrupts to vector 0 */ 562 lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); 563 lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); 564 lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); 565 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 566 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 567 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 568 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 569 570 if (intr->using_vectors) { 571 flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 572 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 573 } else { 574 flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | 575 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | 576 LAN743X_VECTOR_FLAG_IRQ_SHARED; 577 } 578 579 if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 580 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; 581 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; 582 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 583 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; 584 flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; 585 flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; 586 } 587 588 init_waitqueue_head(&intr->software_isr_wq); 589 590 ret = lan743x_intr_register_isr(adapter, 0, flags, 591 INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | 592 INT_BIT_ALL_OTHER_, 593 lan743x_intr_shared_isr, adapter); 594 if (ret) 595 goto clean_up; 596 intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); 597 598 if (intr->using_vectors) 599 lan743x_csr_write(adapter, INT_VEC_EN_SET, 600 INT_VEC_EN_(0)); 601 602 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 603 lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); 604 lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); 605 lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); 606 lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); 607 lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); 608 lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); 609 lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); 610 lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); 611 if (adapter->is_pci11x1x) { 612 lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD); 613 lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD); 614 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654); 615 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210); 616 } else { 617 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); 618 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); 619 } 620 lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); 621 } 622 623 /* enable interrupts */ 624 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 625 ret = lan743x_intr_test_isr(adapter); 626 if (ret) 627 goto clean_up; 628 629 if (intr->number_of_vectors > 1) { 630 int number_of_tx_vectors = intr->number_of_vectors - 1; 631 632 if (number_of_tx_vectors > used_tx_channels) 633 number_of_tx_vectors = used_tx_channels; 634 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 635 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 636 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 637 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 638 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 639 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 640 641 if (adapter->csr.flags & 642 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 643 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 644 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 645 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 646 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 647 } 648 649 for (index = 0; index < number_of_tx_vectors; index++) { 650 u32 int_bit = INT_BIT_DMA_TX_(index); 651 int vector = index + 1; 652 653 /* map TX interrupt to vector */ 654 int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); 655 lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); 656 657 /* Remove TX interrupt from shared mask */ 658 intr->vector_list[0].int_mask &= ~int_bit; 659 ret = lan743x_intr_register_isr(adapter, vector, flags, 660 int_bit, lan743x_tx_isr, 661 &adapter->tx[index]); 662 if (ret) 663 goto clean_up; 664 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 665 if (!(flags & 666 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) 667 lan743x_csr_write(adapter, INT_VEC_EN_SET, 668 INT_VEC_EN_(vector)); 669 } 670 } 671 if ((intr->number_of_vectors - used_tx_channels) > 1) { 672 int number_of_rx_vectors = intr->number_of_vectors - 673 used_tx_channels - 1; 674 675 if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) 676 number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; 677 678 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 679 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 680 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 681 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 682 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 683 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 684 685 if (adapter->csr.flags & 686 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 687 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | 688 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 689 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 690 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 691 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 692 } 693 for (index = 0; index < number_of_rx_vectors; index++) { 694 int vector = index + 1 + used_tx_channels; 695 u32 int_bit = INT_BIT_DMA_RX_(index); 696 697 /* map RX interrupt to vector */ 698 int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); 699 lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); 700 if (flags & 701 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { 702 int_vec_en_auto_clr |= INT_VEC_EN_(vector); 703 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, 704 int_vec_en_auto_clr); 705 } 706 707 /* Remove RX interrupt from shared mask */ 708 intr->vector_list[0].int_mask &= ~int_bit; 709 ret = lan743x_intr_register_isr(adapter, vector, flags, 710 int_bit, lan743x_rx_isr, 711 &adapter->rx[index]); 712 if (ret) 713 goto clean_up; 714 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 715 716 lan743x_csr_write(adapter, INT_VEC_EN_SET, 717 INT_VEC_EN_(vector)); 718 } 719 } 720 return 0; 721 722 clean_up: 723 lan743x_intr_close(adapter); 724 return ret; 725 } 726 727 static int lan743x_dp_write(struct lan743x_adapter *adapter, 728 u32 select, u32 addr, u32 length, u32 *buf) 729 { 730 u32 dp_sel; 731 int i; 732 733 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 734 1, 40, 100, 100)) 735 return -EIO; 736 dp_sel = lan743x_csr_read(adapter, DP_SEL); 737 dp_sel &= ~DP_SEL_MASK_; 738 dp_sel |= select; 739 lan743x_csr_write(adapter, DP_SEL, dp_sel); 740 741 for (i = 0; i < length; i++) { 742 lan743x_csr_write(adapter, DP_ADDR, addr + i); 743 lan743x_csr_write(adapter, DP_DATA_0, buf[i]); 744 lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); 745 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 746 1, 40, 100, 100)) 747 return -EIO; 748 } 749 750 return 0; 751 } 752 753 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) 754 { 755 u32 ret; 756 757 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 758 MAC_MII_ACC_PHY_ADDR_MASK_; 759 ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & 760 MAC_MII_ACC_MIIRINDA_MASK_; 761 762 if (read) 763 ret |= MAC_MII_ACC_MII_READ_; 764 else 765 ret |= MAC_MII_ACC_MII_WRITE_; 766 ret |= MAC_MII_ACC_MII_BUSY_; 767 768 return ret; 769 } 770 771 static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) 772 { 773 u32 data; 774 775 return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data, 776 !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); 777 } 778 779 static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) 780 { 781 struct lan743x_adapter *adapter = bus->priv; 782 u32 val, mii_access; 783 int ret; 784 785 /* comfirm MII not busy */ 786 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 787 if (ret < 0) 788 return ret; 789 790 /* set the address, index & direction (read from PHY) */ 791 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); 792 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 793 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 794 if (ret < 0) 795 return ret; 796 797 val = lan743x_csr_read(adapter, MAC_MII_DATA); 798 return (int)(val & 0xFFFF); 799 } 800 801 static int lan743x_mdiobus_write(struct mii_bus *bus, 802 int phy_id, int index, u16 regval) 803 { 804 struct lan743x_adapter *adapter = bus->priv; 805 u32 val, mii_access; 806 int ret; 807 808 /* confirm MII not busy */ 809 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 810 if (ret < 0) 811 return ret; 812 val = (u32)regval; 813 lan743x_csr_write(adapter, MAC_MII_DATA, val); 814 815 /* set the address, index & direction (write to PHY) */ 816 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); 817 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 818 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 819 return ret; 820 } 821 822 static u32 lan743x_mac_mmd_access(int id, int index, int op) 823 { 824 u16 dev_addr; 825 u32 ret; 826 827 dev_addr = (index >> 16) & 0x1f; 828 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 829 MAC_MII_ACC_PHY_ADDR_MASK_; 830 ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) & 831 MAC_MII_ACC_MIIMMD_MASK_; 832 if (op == MMD_ACCESS_WRITE) 833 ret |= MAC_MII_ACC_MIICMD_WRITE_; 834 else if (op == MMD_ACCESS_READ) 835 ret |= MAC_MII_ACC_MIICMD_READ_; 836 else if (op == MMD_ACCESS_READ_INC) 837 ret |= MAC_MII_ACC_MIICMD_READ_INC_; 838 else 839 ret |= MAC_MII_ACC_MIICMD_ADDR_; 840 ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_); 841 842 return ret; 843 } 844 845 static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index) 846 { 847 struct lan743x_adapter *adapter = bus->priv; 848 u32 mmd_access; 849 int ret; 850 851 /* comfirm MII not busy */ 852 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 853 if (ret < 0) 854 return ret; 855 if (index & MII_ADDR_C45) { 856 /* Load Register Address */ 857 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); 858 mmd_access = lan743x_mac_mmd_access(phy_id, index, 859 MMD_ACCESS_ADDRESS); 860 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 861 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 862 if (ret < 0) 863 return ret; 864 /* Read Data */ 865 mmd_access = lan743x_mac_mmd_access(phy_id, index, 866 MMD_ACCESS_READ); 867 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 868 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 869 if (ret < 0) 870 return ret; 871 ret = lan743x_csr_read(adapter, MAC_MII_DATA); 872 return (int)(ret & 0xFFFF); 873 } 874 875 ret = lan743x_mdiobus_read(bus, phy_id, index); 876 return ret; 877 } 878 879 static int lan743x_mdiobus_c45_write(struct mii_bus *bus, 880 int phy_id, int index, u16 regval) 881 { 882 struct lan743x_adapter *adapter = bus->priv; 883 u32 mmd_access; 884 int ret; 885 886 /* confirm MII not busy */ 887 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 888 if (ret < 0) 889 return ret; 890 if (index & MII_ADDR_C45) { 891 /* Load Register Address */ 892 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); 893 mmd_access = lan743x_mac_mmd_access(phy_id, index, 894 MMD_ACCESS_ADDRESS); 895 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 896 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 897 if (ret < 0) 898 return ret; 899 /* Write Data */ 900 lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); 901 mmd_access = lan743x_mac_mmd_access(phy_id, index, 902 MMD_ACCESS_WRITE); 903 lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); 904 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 905 } else { 906 ret = lan743x_mdiobus_write(bus, phy_id, index, regval); 907 } 908 909 return ret; 910 } 911 912 static void lan743x_mac_set_address(struct lan743x_adapter *adapter, 913 u8 *addr) 914 { 915 u32 addr_lo, addr_hi; 916 917 addr_lo = addr[0] | 918 addr[1] << 8 | 919 addr[2] << 16 | 920 addr[3] << 24; 921 addr_hi = addr[4] | 922 addr[5] << 8; 923 lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); 924 lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); 925 926 ether_addr_copy(adapter->mac_address, addr); 927 netif_info(adapter, drv, adapter->netdev, 928 "MAC address set to %pM\n", addr); 929 } 930 931 static int lan743x_mac_init(struct lan743x_adapter *adapter) 932 { 933 bool mac_address_valid = true; 934 struct net_device *netdev; 935 u32 mac_addr_hi = 0; 936 u32 mac_addr_lo = 0; 937 u32 data; 938 939 netdev = adapter->netdev; 940 941 /* disable auto duplex, and speed detection. Phylib does that */ 942 data = lan743x_csr_read(adapter, MAC_CR); 943 data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_); 944 data |= MAC_CR_CNTR_RST_; 945 lan743x_csr_write(adapter, MAC_CR, data); 946 947 if (!is_valid_ether_addr(adapter->mac_address)) { 948 mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); 949 mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); 950 adapter->mac_address[0] = mac_addr_lo & 0xFF; 951 adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; 952 adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; 953 adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; 954 adapter->mac_address[4] = mac_addr_hi & 0xFF; 955 adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; 956 957 if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && 958 mac_addr_lo == 0xFFFFFFFF) { 959 mac_address_valid = false; 960 } else if (!is_valid_ether_addr(adapter->mac_address)) { 961 mac_address_valid = false; 962 } 963 964 if (!mac_address_valid) 965 eth_random_addr(adapter->mac_address); 966 } 967 lan743x_mac_set_address(adapter, adapter->mac_address); 968 eth_hw_addr_set(netdev, adapter->mac_address); 969 970 return 0; 971 } 972 973 static int lan743x_mac_open(struct lan743x_adapter *adapter) 974 { 975 u32 temp; 976 977 temp = lan743x_csr_read(adapter, MAC_RX); 978 lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); 979 temp = lan743x_csr_read(adapter, MAC_TX); 980 lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); 981 return 0; 982 } 983 984 static void lan743x_mac_close(struct lan743x_adapter *adapter) 985 { 986 u32 temp; 987 988 temp = lan743x_csr_read(adapter, MAC_TX); 989 temp &= ~MAC_TX_TXEN_; 990 lan743x_csr_write(adapter, MAC_TX, temp); 991 lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, 992 1, 1000, 20000, 100); 993 994 temp = lan743x_csr_read(adapter, MAC_RX); 995 temp &= ~MAC_RX_RXEN_; 996 lan743x_csr_write(adapter, MAC_RX, temp); 997 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 998 1, 1000, 20000, 100); 999 } 1000 1001 static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, 1002 bool tx_enable, bool rx_enable) 1003 { 1004 u32 flow_setting = 0; 1005 1006 /* set maximum pause time because when fifo space frees 1007 * up a zero value pause frame will be sent to release the pause 1008 */ 1009 flow_setting = MAC_FLOW_CR_FCPT_MASK_; 1010 if (tx_enable) 1011 flow_setting |= MAC_FLOW_CR_TX_FCEN_; 1012 if (rx_enable) 1013 flow_setting |= MAC_FLOW_CR_RX_FCEN_; 1014 lan743x_csr_write(adapter, MAC_FLOW, flow_setting); 1015 } 1016 1017 static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) 1018 { 1019 int enabled = 0; 1020 u32 mac_rx = 0; 1021 1022 mac_rx = lan743x_csr_read(adapter, MAC_RX); 1023 if (mac_rx & MAC_RX_RXEN_) { 1024 enabled = 1; 1025 if (mac_rx & MAC_RX_RXD_) { 1026 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1027 mac_rx &= ~MAC_RX_RXD_; 1028 } 1029 mac_rx &= ~MAC_RX_RXEN_; 1030 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1031 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 1032 1, 1000, 20000, 100); 1033 lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); 1034 } 1035 1036 mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); 1037 mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN) 1038 << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); 1039 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1040 1041 if (enabled) { 1042 mac_rx |= MAC_RX_RXEN_; 1043 lan743x_csr_write(adapter, MAC_RX, mac_rx); 1044 } 1045 return 0; 1046 } 1047 1048 /* PHY */ 1049 static int lan743x_phy_reset(struct lan743x_adapter *adapter) 1050 { 1051 u32 data; 1052 1053 /* Only called with in probe, and before mdiobus_register */ 1054 1055 data = lan743x_csr_read(adapter, PMT_CTL); 1056 data |= PMT_CTL_ETH_PHY_RST_; 1057 lan743x_csr_write(adapter, PMT_CTL, data); 1058 1059 return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data, 1060 (!(data & PMT_CTL_ETH_PHY_RST_) && 1061 (data & PMT_CTL_READY_)), 1062 50000, 1000000); 1063 } 1064 1065 static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, 1066 u16 local_adv, u16 remote_adv) 1067 { 1068 struct lan743x_phy *phy = &adapter->phy; 1069 u8 cap; 1070 1071 if (phy->fc_autoneg) 1072 cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); 1073 else 1074 cap = phy->fc_request_control; 1075 1076 lan743x_mac_flow_ctrl_set_enables(adapter, 1077 cap & FLOW_CTRL_TX, 1078 cap & FLOW_CTRL_RX); 1079 } 1080 1081 static int lan743x_phy_init(struct lan743x_adapter *adapter) 1082 { 1083 return lan743x_phy_reset(adapter); 1084 } 1085 1086 static void lan743x_phy_link_status_change(struct net_device *netdev) 1087 { 1088 struct lan743x_adapter *adapter = netdev_priv(netdev); 1089 struct phy_device *phydev = netdev->phydev; 1090 u32 data; 1091 1092 phy_print_status(phydev); 1093 if (phydev->state == PHY_RUNNING) { 1094 int remote_advertisement = 0; 1095 int local_advertisement = 0; 1096 1097 data = lan743x_csr_read(adapter, MAC_CR); 1098 1099 /* set interface mode */ 1100 if (phy_interface_is_rgmii(phydev)) 1101 /* RGMII */ 1102 data &= ~MAC_CR_MII_EN_; 1103 else 1104 /* GMII */ 1105 data |= MAC_CR_MII_EN_; 1106 1107 /* set duplex mode */ 1108 if (phydev->duplex) 1109 data |= MAC_CR_DPX_; 1110 else 1111 data &= ~MAC_CR_DPX_; 1112 1113 /* set bus speed */ 1114 switch (phydev->speed) { 1115 case SPEED_10: 1116 data &= ~MAC_CR_CFG_H_; 1117 data &= ~MAC_CR_CFG_L_; 1118 break; 1119 case SPEED_100: 1120 data &= ~MAC_CR_CFG_H_; 1121 data |= MAC_CR_CFG_L_; 1122 break; 1123 case SPEED_1000: 1124 data |= MAC_CR_CFG_H_; 1125 data &= ~MAC_CR_CFG_L_; 1126 break; 1127 } 1128 lan743x_csr_write(adapter, MAC_CR, data); 1129 1130 local_advertisement = 1131 linkmode_adv_to_mii_adv_t(phydev->advertising); 1132 remote_advertisement = 1133 linkmode_adv_to_mii_adv_t(phydev->lp_advertising); 1134 1135 lan743x_phy_update_flowcontrol(adapter, local_advertisement, 1136 remote_advertisement); 1137 lan743x_ptp_update_latency(adapter, phydev->speed); 1138 } 1139 } 1140 1141 static void lan743x_phy_close(struct lan743x_adapter *adapter) 1142 { 1143 struct net_device *netdev = adapter->netdev; 1144 1145 phy_stop(netdev->phydev); 1146 phy_disconnect(netdev->phydev); 1147 netdev->phydev = NULL; 1148 } 1149 1150 static int lan743x_phy_open(struct lan743x_adapter *adapter) 1151 { 1152 struct net_device *netdev = adapter->netdev; 1153 struct lan743x_phy *phy = &adapter->phy; 1154 struct phy_device *phydev; 1155 int ret = -EIO; 1156 1157 /* try devicetree phy, or fixed link */ 1158 phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, 1159 lan743x_phy_link_status_change); 1160 1161 if (!phydev) { 1162 /* try internal phy */ 1163 phydev = phy_find_first(adapter->mdiobus); 1164 if (!phydev) 1165 goto return_error; 1166 1167 if (adapter->is_pci11x1x) 1168 ret = phy_connect_direct(netdev, phydev, 1169 lan743x_phy_link_status_change, 1170 PHY_INTERFACE_MODE_RGMII); 1171 else 1172 ret = phy_connect_direct(netdev, phydev, 1173 lan743x_phy_link_status_change, 1174 PHY_INTERFACE_MODE_GMII); 1175 if (ret) 1176 goto return_error; 1177 } 1178 1179 /* MAC doesn't support 1000T Half */ 1180 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1181 1182 /* support both flow controls */ 1183 phy_support_asym_pause(phydev); 1184 phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); 1185 phy->fc_autoneg = phydev->autoneg; 1186 1187 phy_start(phydev); 1188 phy_start_aneg(phydev); 1189 phy_attached_info(phydev); 1190 return 0; 1191 1192 return_error: 1193 return ret; 1194 } 1195 1196 static void lan743x_rfe_open(struct lan743x_adapter *adapter) 1197 { 1198 lan743x_csr_write(adapter, RFE_RSS_CFG, 1199 RFE_RSS_CFG_UDP_IPV6_EX_ | 1200 RFE_RSS_CFG_TCP_IPV6_EX_ | 1201 RFE_RSS_CFG_IPV6_EX_ | 1202 RFE_RSS_CFG_UDP_IPV6_ | 1203 RFE_RSS_CFG_TCP_IPV6_ | 1204 RFE_RSS_CFG_IPV6_ | 1205 RFE_RSS_CFG_UDP_IPV4_ | 1206 RFE_RSS_CFG_TCP_IPV4_ | 1207 RFE_RSS_CFG_IPV4_ | 1208 RFE_RSS_CFG_VALID_HASH_BITS_ | 1209 RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | 1210 RFE_RSS_CFG_RSS_HASH_STORE_ | 1211 RFE_RSS_CFG_RSS_ENABLE_); 1212 } 1213 1214 static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) 1215 { 1216 u8 *mac_addr; 1217 u32 mac_addr_hi = 0; 1218 u32 mac_addr_lo = 0; 1219 1220 /* Add mac address to perfect Filter */ 1221 mac_addr = adapter->mac_address; 1222 mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | 1223 (((u32)(mac_addr[1])) << 8) | 1224 (((u32)(mac_addr[2])) << 16) | 1225 (((u32)(mac_addr[3])) << 24)); 1226 mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | 1227 (((u32)(mac_addr[5])) << 8)); 1228 1229 lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); 1230 lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), 1231 mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); 1232 } 1233 1234 static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) 1235 { 1236 struct net_device *netdev = adapter->netdev; 1237 u32 hash_table[DP_SEL_VHF_HASH_LEN]; 1238 u32 rfctl; 1239 u32 data; 1240 1241 rfctl = lan743x_csr_read(adapter, RFE_CTL); 1242 rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | 1243 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); 1244 rfctl |= RFE_CTL_AB_; 1245 if (netdev->flags & IFF_PROMISC) { 1246 rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; 1247 } else { 1248 if (netdev->flags & IFF_ALLMULTI) 1249 rfctl |= RFE_CTL_AM_; 1250 } 1251 1252 memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); 1253 if (netdev_mc_count(netdev)) { 1254 struct netdev_hw_addr *ha; 1255 int i; 1256 1257 rfctl |= RFE_CTL_DA_PERFECT_; 1258 i = 1; 1259 netdev_for_each_mc_addr(ha, netdev) { 1260 /* set first 32 into Perfect Filter */ 1261 if (i < 33) { 1262 lan743x_csr_write(adapter, 1263 RFE_ADDR_FILT_HI(i), 0); 1264 data = ha->addr[3]; 1265 data = ha->addr[2] | (data << 8); 1266 data = ha->addr[1] | (data << 8); 1267 data = ha->addr[0] | (data << 8); 1268 lan743x_csr_write(adapter, 1269 RFE_ADDR_FILT_LO(i), data); 1270 data = ha->addr[5]; 1271 data = ha->addr[4] | (data << 8); 1272 data |= RFE_ADDR_FILT_HI_VALID_; 1273 lan743x_csr_write(adapter, 1274 RFE_ADDR_FILT_HI(i), data); 1275 } else { 1276 u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> 1277 23) & 0x1FF; 1278 hash_table[bitnum / 32] |= (1 << (bitnum % 32)); 1279 rfctl |= RFE_CTL_MCAST_HASH_; 1280 } 1281 i++; 1282 } 1283 } 1284 1285 lan743x_dp_write(adapter, DP_SEL_RFE_RAM, 1286 DP_SEL_VHF_VLAN_LEN, 1287 DP_SEL_VHF_HASH_LEN, hash_table); 1288 lan743x_csr_write(adapter, RFE_CTL, rfctl); 1289 } 1290 1291 static int lan743x_dmac_init(struct lan743x_adapter *adapter) 1292 { 1293 u32 data = 0; 1294 1295 lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); 1296 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, 1297 0, 1000, 20000, 100); 1298 switch (DEFAULT_DMA_DESCRIPTOR_SPACING) { 1299 case DMA_DESCRIPTOR_SPACING_16: 1300 data = DMAC_CFG_MAX_DSPACE_16_; 1301 break; 1302 case DMA_DESCRIPTOR_SPACING_32: 1303 data = DMAC_CFG_MAX_DSPACE_32_; 1304 break; 1305 case DMA_DESCRIPTOR_SPACING_64: 1306 data = DMAC_CFG_MAX_DSPACE_64_; 1307 break; 1308 case DMA_DESCRIPTOR_SPACING_128: 1309 data = DMAC_CFG_MAX_DSPACE_128_; 1310 break; 1311 default: 1312 return -EPERM; 1313 } 1314 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 1315 data |= DMAC_CFG_COAL_EN_; 1316 data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; 1317 data |= DMAC_CFG_MAX_READ_REQ_SET_(6); 1318 lan743x_csr_write(adapter, DMAC_CFG, data); 1319 data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); 1320 data |= DMAC_COAL_CFG_TIMER_TX_START_; 1321 data |= DMAC_COAL_CFG_FLUSH_INTS_; 1322 data |= DMAC_COAL_CFG_INT_EXIT_COAL_; 1323 data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; 1324 data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); 1325 data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); 1326 lan743x_csr_write(adapter, DMAC_COAL_CFG, data); 1327 data = DMAC_OBFF_TX_THRES_SET_(0x08); 1328 data |= DMAC_OBFF_RX_THRES_SET_(0x0A); 1329 lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); 1330 return 0; 1331 } 1332 1333 static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, 1334 int tx_channel) 1335 { 1336 u32 dmac_cmd = 0; 1337 1338 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1339 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1340 DMAC_CMD_START_T_(tx_channel)), 1341 (dmac_cmd & 1342 DMAC_CMD_STOP_T_(tx_channel))); 1343 } 1344 1345 static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, 1346 int tx_channel) 1347 { 1348 int timeout = 100; 1349 int result = 0; 1350 1351 while (timeout && 1352 ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == 1353 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1354 usleep_range(1000, 20000); 1355 timeout--; 1356 } 1357 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1358 result = -ENODEV; 1359 return result; 1360 } 1361 1362 static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, 1363 int rx_channel) 1364 { 1365 u32 dmac_cmd = 0; 1366 1367 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1368 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1369 DMAC_CMD_START_R_(rx_channel)), 1370 (dmac_cmd & 1371 DMAC_CMD_STOP_R_(rx_channel))); 1372 } 1373 1374 static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, 1375 int rx_channel) 1376 { 1377 int timeout = 100; 1378 int result = 0; 1379 1380 while (timeout && 1381 ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == 1382 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1383 usleep_range(1000, 20000); 1384 timeout--; 1385 } 1386 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1387 result = -ENODEV; 1388 return result; 1389 } 1390 1391 static void lan743x_tx_release_desc(struct lan743x_tx *tx, 1392 int descriptor_index, bool cleanup) 1393 { 1394 struct lan743x_tx_buffer_info *buffer_info = NULL; 1395 struct lan743x_tx_descriptor *descriptor = NULL; 1396 u32 descriptor_type = 0; 1397 bool ignore_sync; 1398 1399 descriptor = &tx->ring_cpu_ptr[descriptor_index]; 1400 buffer_info = &tx->buffer_info[descriptor_index]; 1401 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) 1402 goto done; 1403 1404 descriptor_type = le32_to_cpu(descriptor->data0) & 1405 TX_DESC_DATA0_DTYPE_MASK_; 1406 if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) 1407 goto clean_up_data_descriptor; 1408 else 1409 goto clear_active; 1410 1411 clean_up_data_descriptor: 1412 if (buffer_info->dma_ptr) { 1413 if (buffer_info->flags & 1414 TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { 1415 dma_unmap_page(&tx->adapter->pdev->dev, 1416 buffer_info->dma_ptr, 1417 buffer_info->buffer_length, 1418 DMA_TO_DEVICE); 1419 } else { 1420 dma_unmap_single(&tx->adapter->pdev->dev, 1421 buffer_info->dma_ptr, 1422 buffer_info->buffer_length, 1423 DMA_TO_DEVICE); 1424 } 1425 buffer_info->dma_ptr = 0; 1426 buffer_info->buffer_length = 0; 1427 } 1428 if (!buffer_info->skb) 1429 goto clear_active; 1430 1431 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) { 1432 dev_kfree_skb_any(buffer_info->skb); 1433 goto clear_skb; 1434 } 1435 1436 if (cleanup) { 1437 lan743x_ptp_unrequest_tx_timestamp(tx->adapter); 1438 dev_kfree_skb_any(buffer_info->skb); 1439 } else { 1440 ignore_sync = (buffer_info->flags & 1441 TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0; 1442 lan743x_ptp_tx_timestamp_skb(tx->adapter, 1443 buffer_info->skb, ignore_sync); 1444 } 1445 1446 clear_skb: 1447 buffer_info->skb = NULL; 1448 1449 clear_active: 1450 buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; 1451 1452 done: 1453 memset(buffer_info, 0, sizeof(*buffer_info)); 1454 memset(descriptor, 0, sizeof(*descriptor)); 1455 } 1456 1457 static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) 1458 { 1459 return ((++index) % tx->ring_size); 1460 } 1461 1462 static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) 1463 { 1464 while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) { 1465 lan743x_tx_release_desc(tx, tx->last_head, false); 1466 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1467 } 1468 } 1469 1470 static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) 1471 { 1472 u32 original_head = 0; 1473 1474 original_head = tx->last_head; 1475 do { 1476 lan743x_tx_release_desc(tx, tx->last_head, true); 1477 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1478 } while (tx->last_head != original_head); 1479 memset(tx->ring_cpu_ptr, 0, 1480 sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); 1481 memset(tx->buffer_info, 0, 1482 sizeof(*tx->buffer_info) * (tx->ring_size)); 1483 } 1484 1485 static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, 1486 struct sk_buff *skb) 1487 { 1488 int result = 1; /* 1 for the main skb buffer */ 1489 int nr_frags = 0; 1490 1491 if (skb_is_gso(skb)) 1492 result++; /* requires an extension descriptor */ 1493 nr_frags = skb_shinfo(skb)->nr_frags; 1494 result += nr_frags; /* 1 for each fragment buffer */ 1495 return result; 1496 } 1497 1498 static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) 1499 { 1500 int last_head = tx->last_head; 1501 int last_tail = tx->last_tail; 1502 1503 if (last_tail >= last_head) 1504 return tx->ring_size - last_tail + last_head - 1; 1505 else 1506 return last_head - last_tail - 1; 1507 } 1508 1509 void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, 1510 bool enable_timestamping, 1511 bool enable_onestep_sync) 1512 { 1513 if (enable_timestamping) 1514 tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED; 1515 else 1516 tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED; 1517 if (enable_onestep_sync) 1518 tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC; 1519 else 1520 tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC; 1521 } 1522 1523 static int lan743x_tx_frame_start(struct lan743x_tx *tx, 1524 unsigned char *first_buffer, 1525 unsigned int first_buffer_length, 1526 unsigned int frame_length, 1527 bool time_stamp, 1528 bool check_sum) 1529 { 1530 /* called only from within lan743x_tx_xmit_frame. 1531 * assuming tx->ring_lock has already been acquired. 1532 */ 1533 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1534 struct lan743x_tx_buffer_info *buffer_info = NULL; 1535 struct lan743x_adapter *adapter = tx->adapter; 1536 struct device *dev = &adapter->pdev->dev; 1537 dma_addr_t dma_ptr; 1538 1539 tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; 1540 tx->frame_first = tx->last_tail; 1541 tx->frame_tail = tx->frame_first; 1542 1543 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1544 buffer_info = &tx->buffer_info[tx->frame_tail]; 1545 dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, 1546 DMA_TO_DEVICE); 1547 if (dma_mapping_error(dev, dma_ptr)) 1548 return -ENOMEM; 1549 1550 tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); 1551 tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); 1552 tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & 1553 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); 1554 1555 buffer_info->skb = NULL; 1556 buffer_info->dma_ptr = dma_ptr; 1557 buffer_info->buffer_length = first_buffer_length; 1558 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1559 1560 tx->frame_data0 = (first_buffer_length & 1561 TX_DESC_DATA0_BUF_LENGTH_MASK_) | 1562 TX_DESC_DATA0_DTYPE_DATA_ | 1563 TX_DESC_DATA0_FS_ | 1564 TX_DESC_DATA0_FCS_; 1565 if (time_stamp) 1566 tx->frame_data0 |= TX_DESC_DATA0_TSE_; 1567 1568 if (check_sum) 1569 tx->frame_data0 |= TX_DESC_DATA0_ICE_ | 1570 TX_DESC_DATA0_IPE_ | 1571 TX_DESC_DATA0_TPE_; 1572 1573 /* data0 will be programmed in one of other frame assembler functions */ 1574 return 0; 1575 } 1576 1577 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, 1578 unsigned int frame_length, 1579 int nr_frags) 1580 { 1581 /* called only from within lan743x_tx_xmit_frame. 1582 * assuming tx->ring_lock has already been acquired. 1583 */ 1584 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1585 struct lan743x_tx_buffer_info *buffer_info = NULL; 1586 1587 /* wrap up previous descriptor */ 1588 tx->frame_data0 |= TX_DESC_DATA0_EXT_; 1589 if (nr_frags <= 0) { 1590 tx->frame_data0 |= TX_DESC_DATA0_LS_; 1591 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 1592 } 1593 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1594 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 1595 1596 /* move to next descriptor */ 1597 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1598 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1599 buffer_info = &tx->buffer_info[tx->frame_tail]; 1600 1601 /* add extension descriptor */ 1602 tx_descriptor->data1 = 0; 1603 tx_descriptor->data2 = 0; 1604 tx_descriptor->data3 = 0; 1605 1606 buffer_info->skb = NULL; 1607 buffer_info->dma_ptr = 0; 1608 buffer_info->buffer_length = 0; 1609 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1610 1611 tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | 1612 TX_DESC_DATA0_DTYPE_EXT_ | 1613 TX_DESC_DATA0_EXT_LSO_; 1614 1615 /* data0 will be programmed in one of other frame assembler functions */ 1616 } 1617 1618 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, 1619 const skb_frag_t *fragment, 1620 unsigned int frame_length) 1621 { 1622 /* called only from within lan743x_tx_xmit_frame 1623 * assuming tx->ring_lock has already been acquired 1624 */ 1625 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1626 struct lan743x_tx_buffer_info *buffer_info = NULL; 1627 struct lan743x_adapter *adapter = tx->adapter; 1628 struct device *dev = &adapter->pdev->dev; 1629 unsigned int fragment_length = 0; 1630 dma_addr_t dma_ptr; 1631 1632 fragment_length = skb_frag_size(fragment); 1633 if (!fragment_length) 1634 return 0; 1635 1636 /* wrap up previous descriptor */ 1637 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1638 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 1639 1640 /* move to next descriptor */ 1641 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1642 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1643 buffer_info = &tx->buffer_info[tx->frame_tail]; 1644 dma_ptr = skb_frag_dma_map(dev, fragment, 1645 0, fragment_length, 1646 DMA_TO_DEVICE); 1647 if (dma_mapping_error(dev, dma_ptr)) { 1648 int desc_index; 1649 1650 /* cleanup all previously setup descriptors */ 1651 desc_index = tx->frame_first; 1652 while (desc_index != tx->frame_tail) { 1653 lan743x_tx_release_desc(tx, desc_index, true); 1654 desc_index = lan743x_tx_next_index(tx, desc_index); 1655 } 1656 dma_wmb(); 1657 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 1658 tx->frame_first = 0; 1659 tx->frame_data0 = 0; 1660 tx->frame_tail = 0; 1661 return -ENOMEM; 1662 } 1663 1664 tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr)); 1665 tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr)); 1666 tx_descriptor->data3 = cpu_to_le32((frame_length << 16) & 1667 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_); 1668 1669 buffer_info->skb = NULL; 1670 buffer_info->dma_ptr = dma_ptr; 1671 buffer_info->buffer_length = fragment_length; 1672 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1673 buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; 1674 1675 tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | 1676 TX_DESC_DATA0_DTYPE_DATA_ | 1677 TX_DESC_DATA0_FCS_; 1678 1679 /* data0 will be programmed in one of other frame assembler functions */ 1680 return 0; 1681 } 1682 1683 static void lan743x_tx_frame_end(struct lan743x_tx *tx, 1684 struct sk_buff *skb, 1685 bool time_stamp, 1686 bool ignore_sync) 1687 { 1688 /* called only from within lan743x_tx_xmit_frame 1689 * assuming tx->ring_lock has already been acquired 1690 */ 1691 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1692 struct lan743x_tx_buffer_info *buffer_info = NULL; 1693 struct lan743x_adapter *adapter = tx->adapter; 1694 u32 tx_tail_flags = 0; 1695 1696 /* wrap up previous descriptor */ 1697 if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == 1698 TX_DESC_DATA0_DTYPE_DATA_) { 1699 tx->frame_data0 |= TX_DESC_DATA0_LS_; 1700 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 1701 } 1702 1703 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1704 buffer_info = &tx->buffer_info[tx->frame_tail]; 1705 buffer_info->skb = skb; 1706 if (time_stamp) 1707 buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; 1708 if (ignore_sync) 1709 buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; 1710 1711 tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); 1712 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1713 tx->last_tail = tx->frame_tail; 1714 1715 dma_wmb(); 1716 1717 if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 1718 tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; 1719 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) 1720 tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | 1721 TX_TAIL_SET_TOP_INT_EN_; 1722 1723 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 1724 tx_tail_flags | tx->frame_tail); 1725 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 1726 } 1727 1728 static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, 1729 struct sk_buff *skb) 1730 { 1731 int required_number_of_descriptors = 0; 1732 unsigned int start_frame_length = 0; 1733 unsigned int frame_length = 0; 1734 unsigned int head_length = 0; 1735 unsigned long irq_flags = 0; 1736 bool do_timestamp = false; 1737 bool ignore_sync = false; 1738 int nr_frags = 0; 1739 bool gso = false; 1740 int j; 1741 1742 required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); 1743 1744 spin_lock_irqsave(&tx->ring_lock, irq_flags); 1745 if (required_number_of_descriptors > 1746 lan743x_tx_get_avail_desc(tx)) { 1747 if (required_number_of_descriptors > (tx->ring_size - 1)) { 1748 dev_kfree_skb_irq(skb); 1749 } else { 1750 /* save to overflow buffer */ 1751 tx->overflow_skb = skb; 1752 netif_stop_queue(tx->adapter->netdev); 1753 } 1754 goto unlock; 1755 } 1756 1757 /* space available, transmit skb */ 1758 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1759 (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) && 1760 (lan743x_ptp_request_tx_timestamp(tx->adapter))) { 1761 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1762 do_timestamp = true; 1763 if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC) 1764 ignore_sync = true; 1765 } 1766 head_length = skb_headlen(skb); 1767 frame_length = skb_pagelen(skb); 1768 nr_frags = skb_shinfo(skb)->nr_frags; 1769 start_frame_length = frame_length; 1770 gso = skb_is_gso(skb); 1771 if (gso) { 1772 start_frame_length = max(skb_shinfo(skb)->gso_size, 1773 (unsigned short)8); 1774 } 1775 1776 if (lan743x_tx_frame_start(tx, 1777 skb->data, head_length, 1778 start_frame_length, 1779 do_timestamp, 1780 skb->ip_summed == CHECKSUM_PARTIAL)) { 1781 dev_kfree_skb_irq(skb); 1782 goto unlock; 1783 } 1784 tx->frame_count++; 1785 1786 if (gso) 1787 lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); 1788 1789 if (nr_frags <= 0) 1790 goto finish; 1791 1792 for (j = 0; j < nr_frags; j++) { 1793 const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]); 1794 1795 if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { 1796 /* upon error no need to call 1797 * lan743x_tx_frame_end 1798 * frame assembler clean up was performed inside 1799 * lan743x_tx_frame_add_fragment 1800 */ 1801 dev_kfree_skb_irq(skb); 1802 goto unlock; 1803 } 1804 } 1805 1806 finish: 1807 lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync); 1808 1809 unlock: 1810 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 1811 return NETDEV_TX_OK; 1812 } 1813 1814 static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) 1815 { 1816 struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); 1817 struct lan743x_adapter *adapter = tx->adapter; 1818 bool start_transmitter = false; 1819 unsigned long irq_flags = 0; 1820 u32 ioc_bit = 0; 1821 1822 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 1823 lan743x_csr_read(adapter, DMAC_INT_STS); 1824 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) 1825 lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); 1826 spin_lock_irqsave(&tx->ring_lock, irq_flags); 1827 1828 /* clean up tx ring */ 1829 lan743x_tx_release_completed_descriptors(tx); 1830 if (netif_queue_stopped(adapter->netdev)) { 1831 if (tx->overflow_skb) { 1832 if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <= 1833 lan743x_tx_get_avail_desc(tx)) 1834 start_transmitter = true; 1835 } else { 1836 netif_wake_queue(adapter->netdev); 1837 } 1838 } 1839 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 1840 1841 if (start_transmitter) { 1842 /* space is now available, transmit overflow skb */ 1843 lan743x_tx_xmit_frame(tx, tx->overflow_skb); 1844 tx->overflow_skb = NULL; 1845 netif_wake_queue(adapter->netdev); 1846 } 1847 1848 if (!napi_complete(napi)) 1849 goto done; 1850 1851 /* enable isr */ 1852 lan743x_csr_write(adapter, INT_EN_SET, 1853 INT_BIT_DMA_TX_(tx->channel_number)); 1854 lan743x_csr_read(adapter, INT_STS); 1855 1856 done: 1857 return 0; 1858 } 1859 1860 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) 1861 { 1862 if (tx->head_cpu_ptr) { 1863 dma_free_coherent(&tx->adapter->pdev->dev, 1864 sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr, 1865 tx->head_dma_ptr); 1866 tx->head_cpu_ptr = NULL; 1867 tx->head_dma_ptr = 0; 1868 } 1869 kfree(tx->buffer_info); 1870 tx->buffer_info = NULL; 1871 1872 if (tx->ring_cpu_ptr) { 1873 dma_free_coherent(&tx->adapter->pdev->dev, 1874 tx->ring_allocation_size, tx->ring_cpu_ptr, 1875 tx->ring_dma_ptr); 1876 tx->ring_allocation_size = 0; 1877 tx->ring_cpu_ptr = NULL; 1878 tx->ring_dma_ptr = 0; 1879 } 1880 tx->ring_size = 0; 1881 } 1882 1883 static int lan743x_tx_ring_init(struct lan743x_tx *tx) 1884 { 1885 size_t ring_allocation_size = 0; 1886 void *cpu_ptr = NULL; 1887 dma_addr_t dma_ptr; 1888 int ret = -ENOMEM; 1889 1890 tx->ring_size = LAN743X_TX_RING_SIZE; 1891 if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { 1892 ret = -EINVAL; 1893 goto cleanup; 1894 } 1895 if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, 1896 DMA_BIT_MASK(64))) { 1897 dev_warn(&tx->adapter->pdev->dev, 1898 "lan743x_: No suitable DMA available\n"); 1899 ret = -ENOMEM; 1900 goto cleanup; 1901 } 1902 ring_allocation_size = ALIGN(tx->ring_size * 1903 sizeof(struct lan743x_tx_descriptor), 1904 PAGE_SIZE); 1905 dma_ptr = 0; 1906 cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, 1907 ring_allocation_size, &dma_ptr, GFP_KERNEL); 1908 if (!cpu_ptr) { 1909 ret = -ENOMEM; 1910 goto cleanup; 1911 } 1912 1913 tx->ring_allocation_size = ring_allocation_size; 1914 tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; 1915 tx->ring_dma_ptr = dma_ptr; 1916 1917 cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); 1918 if (!cpu_ptr) { 1919 ret = -ENOMEM; 1920 goto cleanup; 1921 } 1922 tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; 1923 dma_ptr = 0; 1924 cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, 1925 sizeof(*tx->head_cpu_ptr), &dma_ptr, 1926 GFP_KERNEL); 1927 if (!cpu_ptr) { 1928 ret = -ENOMEM; 1929 goto cleanup; 1930 } 1931 1932 tx->head_cpu_ptr = cpu_ptr; 1933 tx->head_dma_ptr = dma_ptr; 1934 if (tx->head_dma_ptr & 0x3) { 1935 ret = -ENOMEM; 1936 goto cleanup; 1937 } 1938 1939 return 0; 1940 1941 cleanup: 1942 lan743x_tx_ring_cleanup(tx); 1943 return ret; 1944 } 1945 1946 static void lan743x_tx_close(struct lan743x_tx *tx) 1947 { 1948 struct lan743x_adapter *adapter = tx->adapter; 1949 1950 lan743x_csr_write(adapter, 1951 DMAC_CMD, 1952 DMAC_CMD_STOP_T_(tx->channel_number)); 1953 lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); 1954 1955 lan743x_csr_write(adapter, 1956 DMAC_INT_EN_CLR, 1957 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 1958 lan743x_csr_write(adapter, INT_EN_CLR, 1959 INT_BIT_DMA_TX_(tx->channel_number)); 1960 napi_disable(&tx->napi); 1961 netif_napi_del(&tx->napi); 1962 1963 lan743x_csr_write(adapter, FCT_TX_CTL, 1964 FCT_TX_CTL_DIS_(tx->channel_number)); 1965 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 1966 FCT_TX_CTL_EN_(tx->channel_number), 1967 0, 1000, 20000, 100); 1968 1969 lan743x_tx_release_all_descriptors(tx); 1970 1971 if (tx->overflow_skb) { 1972 dev_kfree_skb(tx->overflow_skb); 1973 tx->overflow_skb = NULL; 1974 } 1975 1976 lan743x_tx_ring_cleanup(tx); 1977 } 1978 1979 static int lan743x_tx_open(struct lan743x_tx *tx) 1980 { 1981 struct lan743x_adapter *adapter = NULL; 1982 u32 data = 0; 1983 int ret; 1984 1985 adapter = tx->adapter; 1986 ret = lan743x_tx_ring_init(tx); 1987 if (ret) 1988 return ret; 1989 1990 /* initialize fifo */ 1991 lan743x_csr_write(adapter, FCT_TX_CTL, 1992 FCT_TX_CTL_RESET_(tx->channel_number)); 1993 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 1994 FCT_TX_CTL_RESET_(tx->channel_number), 1995 0, 1000, 20000, 100); 1996 1997 /* enable fifo */ 1998 lan743x_csr_write(adapter, FCT_TX_CTL, 1999 FCT_TX_CTL_EN_(tx->channel_number)); 2000 2001 /* reset tx channel */ 2002 lan743x_csr_write(adapter, DMAC_CMD, 2003 DMAC_CMD_TX_SWR_(tx->channel_number)); 2004 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2005 DMAC_CMD_TX_SWR_(tx->channel_number), 2006 0, 1000, 20000, 100); 2007 2008 /* Write TX_BASE_ADDR */ 2009 lan743x_csr_write(adapter, 2010 TX_BASE_ADDRH(tx->channel_number), 2011 DMA_ADDR_HIGH32(tx->ring_dma_ptr)); 2012 lan743x_csr_write(adapter, 2013 TX_BASE_ADDRL(tx->channel_number), 2014 DMA_ADDR_LOW32(tx->ring_dma_ptr)); 2015 2016 /* Write TX_CFG_B */ 2017 data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); 2018 data &= ~TX_CFG_B_TX_RING_LEN_MASK_; 2019 data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); 2020 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 2021 data |= TX_CFG_B_TDMABL_512_; 2022 lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); 2023 2024 /* Write TX_CFG_A */ 2025 data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; 2026 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2027 data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; 2028 data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); 2029 data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); 2030 data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); 2031 } 2032 lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); 2033 2034 /* Write TX_HEAD_WRITEBACK_ADDR */ 2035 lan743x_csr_write(adapter, 2036 TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), 2037 DMA_ADDR_HIGH32(tx->head_dma_ptr)); 2038 lan743x_csr_write(adapter, 2039 TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), 2040 DMA_ADDR_LOW32(tx->head_dma_ptr)); 2041 2042 /* set last head */ 2043 tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); 2044 2045 /* write TX_TAIL */ 2046 tx->last_tail = 0; 2047 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 2048 (u32)(tx->last_tail)); 2049 tx->vector_flags = lan743x_intr_get_vector_flags(adapter, 2050 INT_BIT_DMA_TX_ 2051 (tx->channel_number)); 2052 netif_napi_add_tx_weight(adapter->netdev, 2053 &tx->napi, lan743x_tx_napi_poll, 2054 tx->ring_size - 1); 2055 napi_enable(&tx->napi); 2056 2057 data = 0; 2058 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 2059 data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; 2060 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 2061 data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; 2062 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 2063 data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; 2064 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 2065 data |= TX_CFG_C_TX_INT_EN_R2C_; 2066 lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); 2067 2068 if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) 2069 lan743x_csr_write(adapter, INT_EN_SET, 2070 INT_BIT_DMA_TX_(tx->channel_number)); 2071 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 2072 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 2073 2074 /* start dmac channel */ 2075 lan743x_csr_write(adapter, DMAC_CMD, 2076 DMAC_CMD_START_T_(tx->channel_number)); 2077 return 0; 2078 } 2079 2080 static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) 2081 { 2082 return ((++index) % rx->ring_size); 2083 } 2084 2085 static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index) 2086 { 2087 /* update the tail once per 8 descriptors */ 2088 if ((index & 7) == 7) 2089 lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number), 2090 index); 2091 } 2092 2093 static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, 2094 gfp_t gfp) 2095 { 2096 struct net_device *netdev = rx->adapter->netdev; 2097 struct device *dev = &rx->adapter->pdev->dev; 2098 struct lan743x_rx_buffer_info *buffer_info; 2099 unsigned int buffer_length, used_length; 2100 struct lan743x_rx_descriptor *descriptor; 2101 struct sk_buff *skb; 2102 dma_addr_t dma_ptr; 2103 2104 buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING; 2105 2106 descriptor = &rx->ring_cpu_ptr[index]; 2107 buffer_info = &rx->buffer_info[index]; 2108 skb = __netdev_alloc_skb(netdev, buffer_length, gfp); 2109 if (!skb) 2110 return -ENOMEM; 2111 dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); 2112 if (dma_mapping_error(dev, dma_ptr)) { 2113 dev_kfree_skb_any(skb); 2114 return -ENOMEM; 2115 } 2116 if (buffer_info->dma_ptr) { 2117 /* sync used area of buffer only */ 2118 if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_) 2119 /* frame length is valid only if LS bit is set. 2120 * it's a safe upper bound for the used area in this 2121 * buffer. 2122 */ 2123 used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_ 2124 (le32_to_cpu(descriptor->data0)), 2125 buffer_info->buffer_length); 2126 else 2127 used_length = buffer_info->buffer_length; 2128 dma_sync_single_for_cpu(dev, buffer_info->dma_ptr, 2129 used_length, 2130 DMA_FROM_DEVICE); 2131 dma_unmap_single_attrs(dev, buffer_info->dma_ptr, 2132 buffer_info->buffer_length, 2133 DMA_FROM_DEVICE, 2134 DMA_ATTR_SKIP_CPU_SYNC); 2135 } 2136 2137 buffer_info->skb = skb; 2138 buffer_info->dma_ptr = dma_ptr; 2139 buffer_info->buffer_length = buffer_length; 2140 descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); 2141 descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); 2142 descriptor->data3 = 0; 2143 descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | 2144 (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_))); 2145 lan743x_rx_update_tail(rx, index); 2146 2147 return 0; 2148 } 2149 2150 static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) 2151 { 2152 struct lan743x_rx_buffer_info *buffer_info; 2153 struct lan743x_rx_descriptor *descriptor; 2154 2155 descriptor = &rx->ring_cpu_ptr[index]; 2156 buffer_info = &rx->buffer_info[index]; 2157 2158 descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr)); 2159 descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr)); 2160 descriptor->data3 = 0; 2161 descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ | 2162 ((buffer_info->buffer_length) & 2163 RX_DESC_DATA0_BUF_LENGTH_MASK_))); 2164 lan743x_rx_update_tail(rx, index); 2165 } 2166 2167 static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) 2168 { 2169 struct lan743x_rx_buffer_info *buffer_info; 2170 struct lan743x_rx_descriptor *descriptor; 2171 2172 descriptor = &rx->ring_cpu_ptr[index]; 2173 buffer_info = &rx->buffer_info[index]; 2174 2175 memset(descriptor, 0, sizeof(*descriptor)); 2176 2177 if (buffer_info->dma_ptr) { 2178 dma_unmap_single(&rx->adapter->pdev->dev, 2179 buffer_info->dma_ptr, 2180 buffer_info->buffer_length, 2181 DMA_FROM_DEVICE); 2182 buffer_info->dma_ptr = 0; 2183 } 2184 2185 if (buffer_info->skb) { 2186 dev_kfree_skb(buffer_info->skb); 2187 buffer_info->skb = NULL; 2188 } 2189 2190 memset(buffer_info, 0, sizeof(*buffer_info)); 2191 } 2192 2193 static struct sk_buff * 2194 lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length) 2195 { 2196 if (skb_linearize(skb)) { 2197 dev_kfree_skb_irq(skb); 2198 return NULL; 2199 } 2200 frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN); 2201 if (skb->len > frame_length) { 2202 skb->tail -= skb->len - frame_length; 2203 skb->len = frame_length; 2204 } 2205 return skb; 2206 } 2207 2208 static int lan743x_rx_process_buffer(struct lan743x_rx *rx) 2209 { 2210 int current_head_index = le32_to_cpu(*rx->head_cpu_ptr); 2211 struct lan743x_rx_descriptor *descriptor, *desc_ext; 2212 struct net_device *netdev = rx->adapter->netdev; 2213 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 2214 struct lan743x_rx_buffer_info *buffer_info; 2215 int frame_length, buffer_length; 2216 int extension_index = -1; 2217 bool is_last, is_first; 2218 struct sk_buff *skb; 2219 2220 if (current_head_index < 0 || current_head_index >= rx->ring_size) 2221 goto done; 2222 2223 if (rx->last_head < 0 || rx->last_head >= rx->ring_size) 2224 goto done; 2225 2226 if (rx->last_head == current_head_index) 2227 goto done; 2228 2229 descriptor = &rx->ring_cpu_ptr[rx->last_head]; 2230 if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_) 2231 goto done; 2232 buffer_info = &rx->buffer_info[rx->last_head]; 2233 2234 is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_; 2235 is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_; 2236 2237 if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) { 2238 /* extension is expected to follow */ 2239 int index = lan743x_rx_next_index(rx, rx->last_head); 2240 2241 if (index == current_head_index) 2242 /* extension not yet available */ 2243 goto done; 2244 desc_ext = &rx->ring_cpu_ptr[index]; 2245 if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_) 2246 /* extension not yet available */ 2247 goto done; 2248 if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_)) 2249 goto move_forward; 2250 extension_index = index; 2251 } 2252 2253 /* Only the last buffer in a multi-buffer frame contains the total frame 2254 * length. The chip occasionally sends more buffers than strictly 2255 * required to reach the total frame length. 2256 * Handle this by adding all buffers to the skb in their entirety. 2257 * Once the real frame length is known, trim the skb. 2258 */ 2259 frame_length = 2260 RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0)); 2261 buffer_length = buffer_info->buffer_length; 2262 2263 netdev_dbg(netdev, "%s%schunk: %d/%d", 2264 is_first ? "first " : " ", 2265 is_last ? "last " : " ", 2266 frame_length, buffer_length); 2267 2268 /* save existing skb, allocate new skb and map to dma */ 2269 skb = buffer_info->skb; 2270 if (lan743x_rx_init_ring_element(rx, rx->last_head, 2271 GFP_ATOMIC | GFP_DMA)) { 2272 /* failed to allocate next skb. 2273 * Memory is very low. 2274 * Drop this packet and reuse buffer. 2275 */ 2276 lan743x_rx_reuse_ring_element(rx, rx->last_head); 2277 /* drop packet that was being assembled */ 2278 dev_kfree_skb_irq(rx->skb_head); 2279 rx->skb_head = NULL; 2280 goto process_extension; 2281 } 2282 2283 /* add buffers to skb via skb->frag_list */ 2284 if (is_first) { 2285 skb_reserve(skb, RX_HEAD_PADDING); 2286 skb_put(skb, buffer_length - RX_HEAD_PADDING); 2287 if (rx->skb_head) 2288 dev_kfree_skb_irq(rx->skb_head); 2289 rx->skb_head = skb; 2290 } else if (rx->skb_head) { 2291 skb_put(skb, buffer_length); 2292 if (skb_shinfo(rx->skb_head)->frag_list) 2293 rx->skb_tail->next = skb; 2294 else 2295 skb_shinfo(rx->skb_head)->frag_list = skb; 2296 rx->skb_tail = skb; 2297 rx->skb_head->len += skb->len; 2298 rx->skb_head->data_len += skb->len; 2299 rx->skb_head->truesize += skb->truesize; 2300 } else { 2301 /* packet to assemble has already been dropped because one or 2302 * more of its buffers could not be allocated 2303 */ 2304 netdev_dbg(netdev, "drop buffer intended for dropped packet"); 2305 dev_kfree_skb_irq(skb); 2306 } 2307 2308 process_extension: 2309 if (extension_index >= 0) { 2310 u32 ts_sec; 2311 u32 ts_nsec; 2312 2313 ts_sec = le32_to_cpu(desc_ext->data1); 2314 ts_nsec = (le32_to_cpu(desc_ext->data2) & 2315 RX_DESC_DATA2_TS_NS_MASK_); 2316 if (rx->skb_head) 2317 skb_hwtstamps(rx->skb_head)->hwtstamp = 2318 ktime_set(ts_sec, ts_nsec); 2319 lan743x_rx_reuse_ring_element(rx, extension_index); 2320 rx->last_head = extension_index; 2321 netdev_dbg(netdev, "process extension"); 2322 } 2323 2324 if (is_last && rx->skb_head) 2325 rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length); 2326 2327 if (is_last && rx->skb_head) { 2328 rx->skb_head->protocol = eth_type_trans(rx->skb_head, 2329 rx->adapter->netdev); 2330 netdev_dbg(netdev, "sending %d byte frame to OS", 2331 rx->skb_head->len); 2332 napi_gro_receive(&rx->napi, rx->skb_head); 2333 rx->skb_head = NULL; 2334 } 2335 2336 move_forward: 2337 /* push tail and head forward */ 2338 rx->last_tail = rx->last_head; 2339 rx->last_head = lan743x_rx_next_index(rx, rx->last_head); 2340 result = RX_PROCESS_RESULT_BUFFER_RECEIVED; 2341 done: 2342 return result; 2343 } 2344 2345 static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) 2346 { 2347 struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); 2348 struct lan743x_adapter *adapter = rx->adapter; 2349 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 2350 u32 rx_tail_flags = 0; 2351 int count; 2352 2353 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { 2354 /* clear int status bit before reading packet */ 2355 lan743x_csr_write(adapter, DMAC_INT_STS, 2356 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2357 } 2358 for (count = 0; count < weight; count++) { 2359 result = lan743x_rx_process_buffer(rx); 2360 if (result == RX_PROCESS_RESULT_NOTHING_TO_DO) 2361 break; 2362 } 2363 rx->frame_count += count; 2364 if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED) 2365 return weight; 2366 2367 if (!napi_complete_done(napi, count)) 2368 return count; 2369 2370 /* re-arm interrupts, must write to rx tail on some chip variants */ 2371 if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 2372 rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; 2373 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { 2374 rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; 2375 } else { 2376 lan743x_csr_write(adapter, INT_EN_SET, 2377 INT_BIT_DMA_RX_(rx->channel_number)); 2378 } 2379 2380 if (rx_tail_flags) 2381 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2382 rx_tail_flags | rx->last_tail); 2383 2384 return count; 2385 } 2386 2387 static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) 2388 { 2389 if (rx->buffer_info && rx->ring_cpu_ptr) { 2390 int index; 2391 2392 for (index = 0; index < rx->ring_size; index++) 2393 lan743x_rx_release_ring_element(rx, index); 2394 } 2395 2396 if (rx->head_cpu_ptr) { 2397 dma_free_coherent(&rx->adapter->pdev->dev, 2398 sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr, 2399 rx->head_dma_ptr); 2400 rx->head_cpu_ptr = NULL; 2401 rx->head_dma_ptr = 0; 2402 } 2403 2404 kfree(rx->buffer_info); 2405 rx->buffer_info = NULL; 2406 2407 if (rx->ring_cpu_ptr) { 2408 dma_free_coherent(&rx->adapter->pdev->dev, 2409 rx->ring_allocation_size, rx->ring_cpu_ptr, 2410 rx->ring_dma_ptr); 2411 rx->ring_allocation_size = 0; 2412 rx->ring_cpu_ptr = NULL; 2413 rx->ring_dma_ptr = 0; 2414 } 2415 2416 rx->ring_size = 0; 2417 rx->last_head = 0; 2418 } 2419 2420 static int lan743x_rx_ring_init(struct lan743x_rx *rx) 2421 { 2422 size_t ring_allocation_size = 0; 2423 dma_addr_t dma_ptr = 0; 2424 void *cpu_ptr = NULL; 2425 int ret = -ENOMEM; 2426 int index = 0; 2427 2428 rx->ring_size = LAN743X_RX_RING_SIZE; 2429 if (rx->ring_size <= 1) { 2430 ret = -EINVAL; 2431 goto cleanup; 2432 } 2433 if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { 2434 ret = -EINVAL; 2435 goto cleanup; 2436 } 2437 if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, 2438 DMA_BIT_MASK(64))) { 2439 dev_warn(&rx->adapter->pdev->dev, 2440 "lan743x_: No suitable DMA available\n"); 2441 ret = -ENOMEM; 2442 goto cleanup; 2443 } 2444 ring_allocation_size = ALIGN(rx->ring_size * 2445 sizeof(struct lan743x_rx_descriptor), 2446 PAGE_SIZE); 2447 dma_ptr = 0; 2448 cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, 2449 ring_allocation_size, &dma_ptr, GFP_KERNEL); 2450 if (!cpu_ptr) { 2451 ret = -ENOMEM; 2452 goto cleanup; 2453 } 2454 rx->ring_allocation_size = ring_allocation_size; 2455 rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; 2456 rx->ring_dma_ptr = dma_ptr; 2457 2458 cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), 2459 GFP_KERNEL); 2460 if (!cpu_ptr) { 2461 ret = -ENOMEM; 2462 goto cleanup; 2463 } 2464 rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; 2465 dma_ptr = 0; 2466 cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, 2467 sizeof(*rx->head_cpu_ptr), &dma_ptr, 2468 GFP_KERNEL); 2469 if (!cpu_ptr) { 2470 ret = -ENOMEM; 2471 goto cleanup; 2472 } 2473 2474 rx->head_cpu_ptr = cpu_ptr; 2475 rx->head_dma_ptr = dma_ptr; 2476 if (rx->head_dma_ptr & 0x3) { 2477 ret = -ENOMEM; 2478 goto cleanup; 2479 } 2480 2481 rx->last_head = 0; 2482 for (index = 0; index < rx->ring_size; index++) { 2483 ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL); 2484 if (ret) 2485 goto cleanup; 2486 } 2487 return 0; 2488 2489 cleanup: 2490 netif_warn(rx->adapter, ifup, rx->adapter->netdev, 2491 "Error allocating memory for LAN743x\n"); 2492 2493 lan743x_rx_ring_cleanup(rx); 2494 return ret; 2495 } 2496 2497 static void lan743x_rx_close(struct lan743x_rx *rx) 2498 { 2499 struct lan743x_adapter *adapter = rx->adapter; 2500 2501 lan743x_csr_write(adapter, FCT_RX_CTL, 2502 FCT_RX_CTL_DIS_(rx->channel_number)); 2503 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2504 FCT_RX_CTL_EN_(rx->channel_number), 2505 0, 1000, 20000, 100); 2506 2507 lan743x_csr_write(adapter, DMAC_CMD, 2508 DMAC_CMD_STOP_R_(rx->channel_number)); 2509 lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); 2510 2511 lan743x_csr_write(adapter, DMAC_INT_EN_CLR, 2512 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2513 lan743x_csr_write(adapter, INT_EN_CLR, 2514 INT_BIT_DMA_RX_(rx->channel_number)); 2515 napi_disable(&rx->napi); 2516 2517 netif_napi_del(&rx->napi); 2518 2519 lan743x_rx_ring_cleanup(rx); 2520 } 2521 2522 static int lan743x_rx_open(struct lan743x_rx *rx) 2523 { 2524 struct lan743x_adapter *adapter = rx->adapter; 2525 u32 data = 0; 2526 int ret; 2527 2528 rx->frame_count = 0; 2529 ret = lan743x_rx_ring_init(rx); 2530 if (ret) 2531 goto return_error; 2532 2533 netif_napi_add(adapter->netdev, 2534 &rx->napi, lan743x_rx_napi_poll, 2535 NAPI_POLL_WEIGHT); 2536 2537 lan743x_csr_write(adapter, DMAC_CMD, 2538 DMAC_CMD_RX_SWR_(rx->channel_number)); 2539 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2540 DMAC_CMD_RX_SWR_(rx->channel_number), 2541 0, 1000, 20000, 100); 2542 2543 /* set ring base address */ 2544 lan743x_csr_write(adapter, 2545 RX_BASE_ADDRH(rx->channel_number), 2546 DMA_ADDR_HIGH32(rx->ring_dma_ptr)); 2547 lan743x_csr_write(adapter, 2548 RX_BASE_ADDRL(rx->channel_number), 2549 DMA_ADDR_LOW32(rx->ring_dma_ptr)); 2550 2551 /* set rx write back address */ 2552 lan743x_csr_write(adapter, 2553 RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), 2554 DMA_ADDR_HIGH32(rx->head_dma_ptr)); 2555 lan743x_csr_write(adapter, 2556 RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), 2557 DMA_ADDR_LOW32(rx->head_dma_ptr)); 2558 data = RX_CFG_A_RX_HP_WB_EN_; 2559 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2560 data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | 2561 RX_CFG_A_RX_WB_THRES_SET_(0x7) | 2562 RX_CFG_A_RX_PF_THRES_SET_(16) | 2563 RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); 2564 } 2565 2566 /* set RX_CFG_A */ 2567 lan743x_csr_write(adapter, 2568 RX_CFG_A(rx->channel_number), data); 2569 2570 /* set RX_CFG_B */ 2571 data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); 2572 data &= ~RX_CFG_B_RX_PAD_MASK_; 2573 if (!RX_HEAD_PADDING) 2574 data |= RX_CFG_B_RX_PAD_0_; 2575 else 2576 data |= RX_CFG_B_RX_PAD_2_; 2577 data &= ~RX_CFG_B_RX_RING_LEN_MASK_; 2578 data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); 2579 data |= RX_CFG_B_TS_ALL_RX_; 2580 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 2581 data |= RX_CFG_B_RDMABL_512_; 2582 2583 lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); 2584 rx->vector_flags = lan743x_intr_get_vector_flags(adapter, 2585 INT_BIT_DMA_RX_ 2586 (rx->channel_number)); 2587 2588 /* set RX_CFG_C */ 2589 data = 0; 2590 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 2591 data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; 2592 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 2593 data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; 2594 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 2595 data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; 2596 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 2597 data |= RX_CFG_C_RX_INT_EN_R2C_; 2598 lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); 2599 2600 rx->last_tail = ((u32)(rx->ring_size - 1)); 2601 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2602 rx->last_tail); 2603 rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); 2604 if (rx->last_head) { 2605 ret = -EIO; 2606 goto napi_delete; 2607 } 2608 2609 napi_enable(&rx->napi); 2610 2611 lan743x_csr_write(adapter, INT_EN_SET, 2612 INT_BIT_DMA_RX_(rx->channel_number)); 2613 lan743x_csr_write(adapter, DMAC_INT_STS, 2614 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2615 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 2616 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2617 lan743x_csr_write(adapter, DMAC_CMD, 2618 DMAC_CMD_START_R_(rx->channel_number)); 2619 2620 /* initialize fifo */ 2621 lan743x_csr_write(adapter, FCT_RX_CTL, 2622 FCT_RX_CTL_RESET_(rx->channel_number)); 2623 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2624 FCT_RX_CTL_RESET_(rx->channel_number), 2625 0, 1000, 20000, 100); 2626 lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), 2627 FCT_FLOW_CTL_REQ_EN_ | 2628 FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | 2629 FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); 2630 2631 /* enable fifo */ 2632 lan743x_csr_write(adapter, FCT_RX_CTL, 2633 FCT_RX_CTL_EN_(rx->channel_number)); 2634 return 0; 2635 2636 napi_delete: 2637 netif_napi_del(&rx->napi); 2638 lan743x_rx_ring_cleanup(rx); 2639 2640 return_error: 2641 return ret; 2642 } 2643 2644 static int lan743x_netdev_close(struct net_device *netdev) 2645 { 2646 struct lan743x_adapter *adapter = netdev_priv(netdev); 2647 int index; 2648 2649 for (index = 0; index < adapter->used_tx_channels; index++) 2650 lan743x_tx_close(&adapter->tx[index]); 2651 2652 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) 2653 lan743x_rx_close(&adapter->rx[index]); 2654 2655 lan743x_ptp_close(adapter); 2656 2657 lan743x_phy_close(adapter); 2658 2659 lan743x_mac_close(adapter); 2660 2661 lan743x_intr_close(adapter); 2662 2663 return 0; 2664 } 2665 2666 static int lan743x_netdev_open(struct net_device *netdev) 2667 { 2668 struct lan743x_adapter *adapter = netdev_priv(netdev); 2669 int index; 2670 int ret; 2671 2672 ret = lan743x_intr_open(adapter); 2673 if (ret) 2674 goto return_error; 2675 2676 ret = lan743x_mac_open(adapter); 2677 if (ret) 2678 goto close_intr; 2679 2680 ret = lan743x_phy_open(adapter); 2681 if (ret) 2682 goto close_mac; 2683 2684 ret = lan743x_ptp_open(adapter); 2685 if (ret) 2686 goto close_phy; 2687 2688 lan743x_rfe_open(adapter); 2689 2690 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 2691 ret = lan743x_rx_open(&adapter->rx[index]); 2692 if (ret) 2693 goto close_rx; 2694 } 2695 2696 for (index = 0; index < adapter->used_tx_channels; index++) { 2697 ret = lan743x_tx_open(&adapter->tx[index]); 2698 if (ret) 2699 goto close_tx; 2700 } 2701 return 0; 2702 2703 close_tx: 2704 for (index = 0; index < adapter->used_tx_channels; index++) { 2705 if (adapter->tx[index].ring_cpu_ptr) 2706 lan743x_tx_close(&adapter->tx[index]); 2707 } 2708 2709 close_rx: 2710 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 2711 if (adapter->rx[index].ring_cpu_ptr) 2712 lan743x_rx_close(&adapter->rx[index]); 2713 } 2714 lan743x_ptp_close(adapter); 2715 2716 close_phy: 2717 lan743x_phy_close(adapter); 2718 2719 close_mac: 2720 lan743x_mac_close(adapter); 2721 2722 close_intr: 2723 lan743x_intr_close(adapter); 2724 2725 return_error: 2726 netif_warn(adapter, ifup, adapter->netdev, 2727 "Error opening LAN743x\n"); 2728 return ret; 2729 } 2730 2731 static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, 2732 struct net_device *netdev) 2733 { 2734 struct lan743x_adapter *adapter = netdev_priv(netdev); 2735 u8 ch = 0; 2736 2737 if (adapter->is_pci11x1x) 2738 ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS; 2739 2740 return lan743x_tx_xmit_frame(&adapter->tx[ch], skb); 2741 } 2742 2743 static int lan743x_netdev_ioctl(struct net_device *netdev, 2744 struct ifreq *ifr, int cmd) 2745 { 2746 if (!netif_running(netdev)) 2747 return -EINVAL; 2748 if (cmd == SIOCSHWTSTAMP) 2749 return lan743x_ptp_ioctl(netdev, ifr, cmd); 2750 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 2751 } 2752 2753 static void lan743x_netdev_set_multicast(struct net_device *netdev) 2754 { 2755 struct lan743x_adapter *adapter = netdev_priv(netdev); 2756 2757 lan743x_rfe_set_multicast(adapter); 2758 } 2759 2760 static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) 2761 { 2762 struct lan743x_adapter *adapter = netdev_priv(netdev); 2763 int ret = 0; 2764 2765 ret = lan743x_mac_set_mtu(adapter, new_mtu); 2766 if (!ret) 2767 netdev->mtu = new_mtu; 2768 return ret; 2769 } 2770 2771 static void lan743x_netdev_get_stats64(struct net_device *netdev, 2772 struct rtnl_link_stats64 *stats) 2773 { 2774 struct lan743x_adapter *adapter = netdev_priv(netdev); 2775 2776 stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); 2777 stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); 2778 stats->rx_bytes = lan743x_csr_read(adapter, 2779 STAT_RX_UNICAST_BYTE_COUNT) + 2780 lan743x_csr_read(adapter, 2781 STAT_RX_BROADCAST_BYTE_COUNT) + 2782 lan743x_csr_read(adapter, 2783 STAT_RX_MULTICAST_BYTE_COUNT); 2784 stats->tx_bytes = lan743x_csr_read(adapter, 2785 STAT_TX_UNICAST_BYTE_COUNT) + 2786 lan743x_csr_read(adapter, 2787 STAT_TX_BROADCAST_BYTE_COUNT) + 2788 lan743x_csr_read(adapter, 2789 STAT_TX_MULTICAST_BYTE_COUNT); 2790 stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + 2791 lan743x_csr_read(adapter, 2792 STAT_RX_ALIGNMENT_ERRORS) + 2793 lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + 2794 lan743x_csr_read(adapter, 2795 STAT_RX_UNDERSIZE_FRAME_ERRORS) + 2796 lan743x_csr_read(adapter, 2797 STAT_RX_OVERSIZE_FRAME_ERRORS); 2798 stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + 2799 lan743x_csr_read(adapter, 2800 STAT_TX_EXCESS_DEFERRAL_ERRORS) + 2801 lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); 2802 stats->rx_dropped = lan743x_csr_read(adapter, 2803 STAT_RX_DROPPED_FRAMES); 2804 stats->tx_dropped = lan743x_csr_read(adapter, 2805 STAT_TX_EXCESSIVE_COLLISION); 2806 stats->multicast = lan743x_csr_read(adapter, 2807 STAT_RX_MULTICAST_FRAMES) + 2808 lan743x_csr_read(adapter, 2809 STAT_TX_MULTICAST_FRAMES); 2810 stats->collisions = lan743x_csr_read(adapter, 2811 STAT_TX_SINGLE_COLLISIONS) + 2812 lan743x_csr_read(adapter, 2813 STAT_TX_MULTIPLE_COLLISIONS) + 2814 lan743x_csr_read(adapter, 2815 STAT_TX_LATE_COLLISIONS); 2816 } 2817 2818 static int lan743x_netdev_set_mac_address(struct net_device *netdev, 2819 void *addr) 2820 { 2821 struct lan743x_adapter *adapter = netdev_priv(netdev); 2822 struct sockaddr *sock_addr = addr; 2823 int ret; 2824 2825 ret = eth_prepare_mac_addr_change(netdev, sock_addr); 2826 if (ret) 2827 return ret; 2828 eth_hw_addr_set(netdev, sock_addr->sa_data); 2829 lan743x_mac_set_address(adapter, sock_addr->sa_data); 2830 lan743x_rfe_update_mac_address(adapter); 2831 return 0; 2832 } 2833 2834 static const struct net_device_ops lan743x_netdev_ops = { 2835 .ndo_open = lan743x_netdev_open, 2836 .ndo_stop = lan743x_netdev_close, 2837 .ndo_start_xmit = lan743x_netdev_xmit_frame, 2838 .ndo_eth_ioctl = lan743x_netdev_ioctl, 2839 .ndo_set_rx_mode = lan743x_netdev_set_multicast, 2840 .ndo_change_mtu = lan743x_netdev_change_mtu, 2841 .ndo_get_stats64 = lan743x_netdev_get_stats64, 2842 .ndo_set_mac_address = lan743x_netdev_set_mac_address, 2843 }; 2844 2845 static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) 2846 { 2847 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 2848 } 2849 2850 static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) 2851 { 2852 mdiobus_unregister(adapter->mdiobus); 2853 } 2854 2855 static void lan743x_full_cleanup(struct lan743x_adapter *adapter) 2856 { 2857 unregister_netdev(adapter->netdev); 2858 2859 lan743x_mdiobus_cleanup(adapter); 2860 lan743x_hardware_cleanup(adapter); 2861 lan743x_pci_cleanup(adapter); 2862 } 2863 2864 static int lan743x_hardware_init(struct lan743x_adapter *adapter, 2865 struct pci_dev *pdev) 2866 { 2867 struct lan743x_tx *tx; 2868 int index; 2869 int ret; 2870 2871 adapter->is_pci11x1x = is_pci11x1x_chip(adapter); 2872 if (adapter->is_pci11x1x) { 2873 adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS; 2874 adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS; 2875 adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT; 2876 pci11x1x_strap_get_status(adapter); 2877 spin_lock_init(&adapter->eth_syslock_spinlock); 2878 } else { 2879 adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; 2880 adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; 2881 adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT; 2882 } 2883 2884 adapter->intr.irq = adapter->pdev->irq; 2885 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 2886 2887 ret = lan743x_gpio_init(adapter); 2888 if (ret) 2889 return ret; 2890 2891 ret = lan743x_mac_init(adapter); 2892 if (ret) 2893 return ret; 2894 2895 ret = lan743x_phy_init(adapter); 2896 if (ret) 2897 return ret; 2898 2899 ret = lan743x_ptp_init(adapter); 2900 if (ret) 2901 return ret; 2902 2903 lan743x_rfe_update_mac_address(adapter); 2904 2905 ret = lan743x_dmac_init(adapter); 2906 if (ret) 2907 return ret; 2908 2909 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 2910 adapter->rx[index].adapter = adapter; 2911 adapter->rx[index].channel_number = index; 2912 } 2913 2914 for (index = 0; index < adapter->used_tx_channels; index++) { 2915 tx = &adapter->tx[index]; 2916 tx->adapter = adapter; 2917 tx->channel_number = index; 2918 spin_lock_init(&tx->ring_lock); 2919 } 2920 2921 return 0; 2922 } 2923 2924 static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) 2925 { 2926 u32 sgmii_ctl; 2927 int ret; 2928 2929 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 2930 if (!(adapter->mdiobus)) { 2931 ret = -ENOMEM; 2932 goto return_error; 2933 } 2934 2935 adapter->mdiobus->priv = (void *)adapter; 2936 if (adapter->is_pci11x1x) { 2937 if (adapter->is_sgmii_en) { 2938 sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); 2939 sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; 2940 sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; 2941 lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); 2942 netif_dbg(adapter, drv, adapter->netdev, 2943 "SGMII operation\n"); 2944 adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45; 2945 adapter->mdiobus->read = lan743x_mdiobus_c45_read; 2946 adapter->mdiobus->write = lan743x_mdiobus_c45_write; 2947 adapter->mdiobus->name = "lan743x-mdiobus-c45"; 2948 netif_dbg(adapter, drv, adapter->netdev, 2949 "lan743x-mdiobus-c45\n"); 2950 } else { 2951 sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); 2952 sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; 2953 sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; 2954 lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); 2955 netif_dbg(adapter, drv, adapter->netdev, 2956 "RGMII operation\n"); 2957 // Only C22 support when RGMII I/F 2958 adapter->mdiobus->probe_capabilities = MDIOBUS_C22; 2959 adapter->mdiobus->read = lan743x_mdiobus_read; 2960 adapter->mdiobus->write = lan743x_mdiobus_write; 2961 adapter->mdiobus->name = "lan743x-mdiobus"; 2962 netif_dbg(adapter, drv, adapter->netdev, 2963 "lan743x-mdiobus\n"); 2964 } 2965 } else { 2966 adapter->mdiobus->read = lan743x_mdiobus_read; 2967 adapter->mdiobus->write = lan743x_mdiobus_write; 2968 adapter->mdiobus->name = "lan743x-mdiobus"; 2969 netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); 2970 } 2971 2972 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, 2973 "pci-%s", pci_name(adapter->pdev)); 2974 2975 if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_) 2976 /* LAN7430 uses internal phy at address 1 */ 2977 adapter->mdiobus->phy_mask = ~(u32)BIT(1); 2978 2979 /* register mdiobus */ 2980 ret = mdiobus_register(adapter->mdiobus); 2981 if (ret < 0) 2982 goto return_error; 2983 return 0; 2984 2985 return_error: 2986 return ret; 2987 } 2988 2989 /* lan743x_pcidev_probe - Device Initialization Routine 2990 * @pdev: PCI device information struct 2991 * @id: entry in lan743x_pci_tbl 2992 * 2993 * Returns 0 on success, negative on failure 2994 * 2995 * initializes an adapter identified by a pci_dev structure. 2996 * The OS initialization, configuring of the adapter private structure, 2997 * and a hardware reset occur. 2998 **/ 2999 static int lan743x_pcidev_probe(struct pci_dev *pdev, 3000 const struct pci_device_id *id) 3001 { 3002 struct lan743x_adapter *adapter = NULL; 3003 struct net_device *netdev = NULL; 3004 int ret = -ENODEV; 3005 3006 if (id->device == PCI_DEVICE_ID_SMSC_A011 || 3007 id->device == PCI_DEVICE_ID_SMSC_A041) { 3008 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 3009 sizeof(struct lan743x_adapter), 3010 PCI11X1X_USED_TX_CHANNELS, 3011 LAN743X_USED_RX_CHANNELS); 3012 } else { 3013 netdev = devm_alloc_etherdev(&pdev->dev, 3014 sizeof(struct lan743x_adapter)); 3015 } 3016 3017 if (!netdev) 3018 goto return_error; 3019 3020 SET_NETDEV_DEV(netdev, &pdev->dev); 3021 pci_set_drvdata(pdev, netdev); 3022 adapter = netdev_priv(netdev); 3023 adapter->netdev = netdev; 3024 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 3025 NETIF_MSG_LINK | NETIF_MSG_IFUP | 3026 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 3027 netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; 3028 3029 of_get_mac_address(pdev->dev.of_node, adapter->mac_address); 3030 3031 ret = lan743x_pci_init(adapter, pdev); 3032 if (ret) 3033 goto return_error; 3034 3035 ret = lan743x_csr_init(adapter); 3036 if (ret) 3037 goto cleanup_pci; 3038 3039 ret = lan743x_hardware_init(adapter, pdev); 3040 if (ret) 3041 goto cleanup_pci; 3042 3043 ret = lan743x_mdiobus_init(adapter); 3044 if (ret) 3045 goto cleanup_hardware; 3046 3047 adapter->netdev->netdev_ops = &lan743x_netdev_ops; 3048 adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; 3049 adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 3050 adapter->netdev->hw_features = adapter->netdev->features; 3051 3052 /* carrier off reporting is important to ethtool even BEFORE open */ 3053 netif_carrier_off(netdev); 3054 3055 ret = register_netdev(adapter->netdev); 3056 if (ret < 0) 3057 goto cleanup_mdiobus; 3058 return 0; 3059 3060 cleanup_mdiobus: 3061 lan743x_mdiobus_cleanup(adapter); 3062 3063 cleanup_hardware: 3064 lan743x_hardware_cleanup(adapter); 3065 3066 cleanup_pci: 3067 lan743x_pci_cleanup(adapter); 3068 3069 return_error: 3070 pr_warn("Initialization failed\n"); 3071 return ret; 3072 } 3073 3074 /** 3075 * lan743x_pcidev_remove - Device Removal Routine 3076 * @pdev: PCI device information struct 3077 * 3078 * this is called by the PCI subsystem to alert the driver 3079 * that it should release a PCI device. This could be caused by a 3080 * Hot-Plug event, or because the driver is going to be removed from 3081 * memory. 3082 **/ 3083 static void lan743x_pcidev_remove(struct pci_dev *pdev) 3084 { 3085 struct net_device *netdev = pci_get_drvdata(pdev); 3086 struct lan743x_adapter *adapter = netdev_priv(netdev); 3087 3088 lan743x_full_cleanup(adapter); 3089 } 3090 3091 static void lan743x_pcidev_shutdown(struct pci_dev *pdev) 3092 { 3093 struct net_device *netdev = pci_get_drvdata(pdev); 3094 struct lan743x_adapter *adapter = netdev_priv(netdev); 3095 3096 rtnl_lock(); 3097 netif_device_detach(netdev); 3098 3099 /* close netdev when netdev is at running state. 3100 * For instance, it is true when system goes to sleep by pm-suspend 3101 * However, it is false when system goes to sleep by suspend GUI menu 3102 */ 3103 if (netif_running(netdev)) 3104 lan743x_netdev_close(netdev); 3105 rtnl_unlock(); 3106 3107 #ifdef CONFIG_PM 3108 pci_save_state(pdev); 3109 #endif 3110 3111 /* clean up lan743x portion */ 3112 lan743x_hardware_cleanup(adapter); 3113 } 3114 3115 #ifdef CONFIG_PM_SLEEP 3116 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 3117 { 3118 return bitrev16(crc16(0xFFFF, buf, len)); 3119 } 3120 3121 static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) 3122 { 3123 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; 3124 const u8 ipv6_multicast[3] = { 0x33, 0x33 }; 3125 const u8 arp_type[2] = { 0x08, 0x06 }; 3126 int mask_index; 3127 u32 pmtctl; 3128 u32 wucsr; 3129 u32 macrx; 3130 u16 crc; 3131 3132 for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) 3133 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); 3134 3135 /* clear wake settings */ 3136 pmtctl = lan743x_csr_read(adapter, PMT_CTL); 3137 pmtctl |= PMT_CTL_WUPS_MASK_; 3138 pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | 3139 PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | 3140 PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); 3141 3142 macrx = lan743x_csr_read(adapter, MAC_RX); 3143 3144 wucsr = 0; 3145 mask_index = 0; 3146 3147 pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; 3148 3149 if (adapter->wolopts & WAKE_PHY) { 3150 pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; 3151 pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; 3152 } 3153 if (adapter->wolopts & WAKE_MAGIC) { 3154 wucsr |= MAC_WUCSR_MPEN_; 3155 macrx |= MAC_RX_RXEN_; 3156 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3157 } 3158 if (adapter->wolopts & WAKE_UCAST) { 3159 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; 3160 macrx |= MAC_RX_RXEN_; 3161 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3162 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3163 } 3164 if (adapter->wolopts & WAKE_BCAST) { 3165 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; 3166 macrx |= MAC_RX_RXEN_; 3167 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3168 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3169 } 3170 if (adapter->wolopts & WAKE_MCAST) { 3171 /* IPv4 multicast */ 3172 crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); 3173 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3174 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 3175 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3176 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3177 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); 3178 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3179 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3180 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3181 mask_index++; 3182 3183 /* IPv6 multicast */ 3184 crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); 3185 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3186 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 3187 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3188 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3189 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); 3190 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3191 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3192 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3193 mask_index++; 3194 3195 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 3196 macrx |= MAC_RX_RXEN_; 3197 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3198 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3199 } 3200 if (adapter->wolopts & WAKE_ARP) { 3201 /* set MAC_WUF_CFG & WUF_MASK 3202 * for packettype (offset 12,13) = ARP (0x0806) 3203 */ 3204 crc = lan743x_pm_wakeframe_crc16(arp_type, 2); 3205 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 3206 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | 3207 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 3208 (crc & MAC_WUF_CFG_CRC16_MASK_)); 3209 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); 3210 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 3211 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 3212 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 3213 mask_index++; 3214 3215 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 3216 macrx |= MAC_RX_RXEN_; 3217 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 3218 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 3219 } 3220 3221 lan743x_csr_write(adapter, MAC_WUCSR, wucsr); 3222 lan743x_csr_write(adapter, PMT_CTL, pmtctl); 3223 lan743x_csr_write(adapter, MAC_RX, macrx); 3224 } 3225 3226 static int lan743x_pm_suspend(struct device *dev) 3227 { 3228 struct pci_dev *pdev = to_pci_dev(dev); 3229 struct net_device *netdev = pci_get_drvdata(pdev); 3230 struct lan743x_adapter *adapter = netdev_priv(netdev); 3231 3232 lan743x_pcidev_shutdown(pdev); 3233 3234 /* clear all wakes */ 3235 lan743x_csr_write(adapter, MAC_WUCSR, 0); 3236 lan743x_csr_write(adapter, MAC_WUCSR2, 0); 3237 lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); 3238 3239 if (adapter->wolopts) 3240 lan743x_pm_set_wol(adapter); 3241 3242 /* Host sets PME_En, put D3hot */ 3243 return pci_prepare_to_sleep(pdev); 3244 } 3245 3246 static int lan743x_pm_resume(struct device *dev) 3247 { 3248 struct pci_dev *pdev = to_pci_dev(dev); 3249 struct net_device *netdev = pci_get_drvdata(pdev); 3250 struct lan743x_adapter *adapter = netdev_priv(netdev); 3251 int ret; 3252 3253 pci_set_power_state(pdev, PCI_D0); 3254 pci_restore_state(pdev); 3255 pci_save_state(pdev); 3256 3257 ret = lan743x_hardware_init(adapter, pdev); 3258 if (ret) { 3259 netif_err(adapter, probe, adapter->netdev, 3260 "lan743x_hardware_init returned %d\n", ret); 3261 lan743x_pci_cleanup(adapter); 3262 return ret; 3263 } 3264 3265 /* open netdev when netdev is at running state while resume. 3266 * For instance, it is true when system wakesup after pm-suspend 3267 * However, it is false when system wakes up after suspend GUI menu 3268 */ 3269 if (netif_running(netdev)) 3270 lan743x_netdev_open(netdev); 3271 3272 netif_device_attach(netdev); 3273 3274 return 0; 3275 } 3276 3277 static const struct dev_pm_ops lan743x_pm_ops = { 3278 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 3279 }; 3280 #endif /* CONFIG_PM_SLEEP */ 3281 3282 static const struct pci_device_id lan743x_pcidev_tbl[] = { 3283 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3284 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, 3285 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) }, 3286 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) }, 3287 { 0, } 3288 }; 3289 3290 MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl); 3291 3292 static struct pci_driver lan743x_pcidev_driver = { 3293 .name = DRIVER_NAME, 3294 .id_table = lan743x_pcidev_tbl, 3295 .probe = lan743x_pcidev_probe, 3296 .remove = lan743x_pcidev_remove, 3297 #ifdef CONFIG_PM_SLEEP 3298 .driver.pm = &lan743x_pm_ops, 3299 #endif 3300 .shutdown = lan743x_pcidev_shutdown, 3301 }; 3302 3303 module_pci_driver(lan743x_pcidev_driver); 3304 3305 MODULE_AUTHOR(DRIVER_AUTHOR); 3306 MODULE_DESCRIPTION(DRIVER_DESC); 3307 MODULE_LICENSE("GPL"); 3308