1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (C) 2018 Microchip Technology Inc. */ 3 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/crc32.h> 9 #include <linux/microchipphy.h> 10 #include <linux/net_tstamp.h> 11 #include <linux/phy.h> 12 #include <linux/rtnetlink.h> 13 #include <linux/iopoll.h> 14 #include <linux/crc16.h> 15 #include "lan743x_main.h" 16 #include "lan743x_ethtool.h" 17 18 static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) 19 { 20 pci_release_selected_regions(adapter->pdev, 21 pci_select_bars(adapter->pdev, 22 IORESOURCE_MEM)); 23 pci_disable_device(adapter->pdev); 24 } 25 26 static int lan743x_pci_init(struct lan743x_adapter *adapter, 27 struct pci_dev *pdev) 28 { 29 unsigned long bars = 0; 30 int ret; 31 32 adapter->pdev = pdev; 33 ret = pci_enable_device_mem(pdev); 34 if (ret) 35 goto return_error; 36 37 netif_info(adapter, probe, adapter->netdev, 38 "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", 39 pdev->vendor, pdev->device); 40 bars = pci_select_bars(pdev, IORESOURCE_MEM); 41 if (!test_bit(0, &bars)) 42 goto disable_device; 43 44 ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); 45 if (ret) 46 goto disable_device; 47 48 pci_set_master(pdev); 49 return 0; 50 51 disable_device: 52 pci_disable_device(adapter->pdev); 53 54 return_error: 55 return ret; 56 } 57 58 u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) 59 { 60 return ioread32(&adapter->csr.csr_address[offset]); 61 } 62 63 void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, 64 u32 data) 65 { 66 iowrite32(data, &adapter->csr.csr_address[offset]); 67 } 68 69 #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset) 70 71 static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) 72 { 73 u32 data; 74 75 data = lan743x_csr_read(adapter, HW_CFG); 76 data |= HW_CFG_LRST_; 77 lan743x_csr_write(adapter, HW_CFG, data); 78 79 return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data, 80 !(data & HW_CFG_LRST_), 100000, 10000000); 81 } 82 83 static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, 84 int offset, u32 bit_mask, 85 int target_value, int usleep_min, 86 int usleep_max, int count) 87 { 88 u32 data; 89 90 return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data, 91 target_value == ((data & bit_mask) ? 1 : 0), 92 usleep_max, usleep_min * count); 93 } 94 95 static int lan743x_csr_init(struct lan743x_adapter *adapter) 96 { 97 struct lan743x_csr *csr = &adapter->csr; 98 resource_size_t bar_start, bar_length; 99 int result; 100 101 bar_start = pci_resource_start(adapter->pdev, 0); 102 bar_length = pci_resource_len(adapter->pdev, 0); 103 csr->csr_address = devm_ioremap(&adapter->pdev->dev, 104 bar_start, bar_length); 105 if (!csr->csr_address) { 106 result = -ENOMEM; 107 goto clean_up; 108 } 109 110 csr->id_rev = lan743x_csr_read(adapter, ID_REV); 111 csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); 112 netif_info(adapter, probe, adapter->netdev, 113 "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", 114 csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev), 115 FPGA_REV_GET_MINOR_(csr->fpga_rev)); 116 if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) { 117 result = -ENODEV; 118 goto clean_up; 119 } 120 121 csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 122 switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { 123 case ID_REV_CHIP_REV_A0_: 124 csr->flags |= LAN743X_CSR_FLAG_IS_A0; 125 csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; 126 break; 127 case ID_REV_CHIP_REV_B0_: 128 csr->flags |= LAN743X_CSR_FLAG_IS_B0; 129 break; 130 } 131 132 result = lan743x_csr_light_reset(adapter); 133 if (result) 134 goto clean_up; 135 return 0; 136 clean_up: 137 return result; 138 } 139 140 static void lan743x_intr_software_isr(void *context) 141 { 142 struct lan743x_adapter *adapter = context; 143 struct lan743x_intr *intr = &adapter->intr; 144 u32 int_sts; 145 146 int_sts = lan743x_csr_read(adapter, INT_STS); 147 if (int_sts & INT_BIT_SW_GP_) { 148 lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_); 149 intr->software_isr_flag = 1; 150 } 151 } 152 153 static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) 154 { 155 struct lan743x_tx *tx = context; 156 struct lan743x_adapter *adapter = tx->adapter; 157 bool enable_flag = true; 158 u32 int_en = 0; 159 160 int_en = lan743x_csr_read(adapter, INT_EN_SET); 161 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 162 lan743x_csr_write(adapter, INT_EN_CLR, 163 INT_BIT_DMA_TX_(tx->channel_number)); 164 } 165 166 if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { 167 u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 168 u32 dmac_int_sts; 169 u32 dmac_int_en; 170 171 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 172 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 173 else 174 dmac_int_sts = ioc_bit; 175 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 176 dmac_int_en = lan743x_csr_read(adapter, 177 DMAC_INT_EN_SET); 178 else 179 dmac_int_en = ioc_bit; 180 181 dmac_int_en &= ioc_bit; 182 dmac_int_sts &= dmac_int_en; 183 if (dmac_int_sts & ioc_bit) { 184 napi_schedule(&tx->napi); 185 enable_flag = false;/* poll func will enable later */ 186 } 187 } 188 189 if (enable_flag) 190 /* enable isr */ 191 lan743x_csr_write(adapter, INT_EN_SET, 192 INT_BIT_DMA_TX_(tx->channel_number)); 193 } 194 195 static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) 196 { 197 struct lan743x_rx *rx = context; 198 struct lan743x_adapter *adapter = rx->adapter; 199 bool enable_flag = true; 200 201 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { 202 lan743x_csr_write(adapter, INT_EN_CLR, 203 INT_BIT_DMA_RX_(rx->channel_number)); 204 } 205 206 if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { 207 u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); 208 u32 dmac_int_sts; 209 u32 dmac_int_en; 210 211 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) 212 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 213 else 214 dmac_int_sts = rx_frame_bit; 215 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) 216 dmac_int_en = lan743x_csr_read(adapter, 217 DMAC_INT_EN_SET); 218 else 219 dmac_int_en = rx_frame_bit; 220 221 dmac_int_en &= rx_frame_bit; 222 dmac_int_sts &= dmac_int_en; 223 if (dmac_int_sts & rx_frame_bit) { 224 napi_schedule(&rx->napi); 225 enable_flag = false;/* poll funct will enable later */ 226 } 227 } 228 229 if (enable_flag) { 230 /* enable isr */ 231 lan743x_csr_write(adapter, INT_EN_SET, 232 INT_BIT_DMA_RX_(rx->channel_number)); 233 } 234 } 235 236 static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) 237 { 238 struct lan743x_adapter *adapter = context; 239 unsigned int channel; 240 241 if (int_sts & INT_BIT_ALL_RX_) { 242 for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; 243 channel++) { 244 u32 int_bit = INT_BIT_DMA_RX_(channel); 245 246 if (int_sts & int_bit) { 247 lan743x_rx_isr(&adapter->rx[channel], 248 int_bit, flags); 249 int_sts &= ~int_bit; 250 } 251 } 252 } 253 if (int_sts & INT_BIT_ALL_TX_) { 254 for (channel = 0; channel < LAN743X_USED_TX_CHANNELS; 255 channel++) { 256 u32 int_bit = INT_BIT_DMA_TX_(channel); 257 258 if (int_sts & int_bit) { 259 lan743x_tx_isr(&adapter->tx[channel], 260 int_bit, flags); 261 int_sts &= ~int_bit; 262 } 263 } 264 } 265 if (int_sts & INT_BIT_ALL_OTHER_) { 266 if (int_sts & INT_BIT_SW_GP_) { 267 lan743x_intr_software_isr(adapter); 268 int_sts &= ~INT_BIT_SW_GP_; 269 } 270 } 271 if (int_sts) 272 lan743x_csr_write(adapter, INT_EN_CLR, int_sts); 273 } 274 275 static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) 276 { 277 struct lan743x_vector *vector = ptr; 278 struct lan743x_adapter *adapter = vector->adapter; 279 irqreturn_t result = IRQ_NONE; 280 u32 int_enables; 281 u32 int_sts; 282 283 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { 284 int_sts = lan743x_csr_read(adapter, INT_STS); 285 } else if (vector->flags & 286 (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | 287 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { 288 int_sts = lan743x_csr_read(adapter, INT_STS_R2C); 289 } else { 290 /* use mask as implied status */ 291 int_sts = vector->int_mask | INT_BIT_MAS_; 292 } 293 294 if (!(int_sts & INT_BIT_MAS_)) 295 goto irq_done; 296 297 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) 298 /* disable vector interrupt */ 299 lan743x_csr_write(adapter, 300 INT_VEC_EN_CLR, 301 INT_VEC_EN_(vector->vector_index)); 302 303 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) 304 /* disable master interrupt */ 305 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 306 307 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { 308 int_enables = lan743x_csr_read(adapter, INT_EN_SET); 309 } else { 310 /* use vector mask as implied enable mask */ 311 int_enables = vector->int_mask; 312 } 313 314 int_sts &= int_enables; 315 int_sts &= vector->int_mask; 316 if (int_sts) { 317 if (vector->handler) { 318 vector->handler(vector->context, 319 int_sts, vector->flags); 320 } else { 321 /* disable interrupts on this vector */ 322 lan743x_csr_write(adapter, INT_EN_CLR, 323 vector->int_mask); 324 } 325 result = IRQ_HANDLED; 326 } 327 328 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) 329 /* enable master interrupt */ 330 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 331 332 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) 333 /* enable vector interrupt */ 334 lan743x_csr_write(adapter, 335 INT_VEC_EN_SET, 336 INT_VEC_EN_(vector->vector_index)); 337 irq_done: 338 return result; 339 } 340 341 static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) 342 { 343 struct lan743x_intr *intr = &adapter->intr; 344 int result = -ENODEV; 345 int timeout = 10; 346 347 intr->software_isr_flag = 0; 348 349 /* enable interrupt */ 350 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); 351 352 /* activate interrupt here */ 353 lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); 354 while ((timeout > 0) && (!(intr->software_isr_flag))) { 355 usleep_range(1000, 20000); 356 timeout--; 357 } 358 359 if (intr->software_isr_flag) 360 result = 0; 361 362 /* disable interrupts */ 363 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); 364 return result; 365 } 366 367 static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, 368 int vector_index, u32 flags, 369 u32 int_mask, 370 lan743x_vector_handler handler, 371 void *context) 372 { 373 struct lan743x_vector *vector = &adapter->intr.vector_list 374 [vector_index]; 375 int ret; 376 377 vector->adapter = adapter; 378 vector->flags = flags; 379 vector->vector_index = vector_index; 380 vector->int_mask = int_mask; 381 vector->handler = handler; 382 vector->context = context; 383 384 ret = request_irq(vector->irq, 385 lan743x_intr_entry_isr, 386 (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? 387 IRQF_SHARED : 0, DRIVER_NAME, vector); 388 if (ret) { 389 vector->handler = NULL; 390 vector->context = NULL; 391 vector->int_mask = 0; 392 vector->flags = 0; 393 } 394 return ret; 395 } 396 397 static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, 398 int vector_index) 399 { 400 struct lan743x_vector *vector = &adapter->intr.vector_list 401 [vector_index]; 402 403 free_irq(vector->irq, vector); 404 vector->handler = NULL; 405 vector->context = NULL; 406 vector->int_mask = 0; 407 vector->flags = 0; 408 } 409 410 static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, 411 u32 int_mask) 412 { 413 int index; 414 415 for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { 416 if (adapter->intr.vector_list[index].int_mask & int_mask) 417 return adapter->intr.vector_list[index].flags; 418 } 419 return 0; 420 } 421 422 static void lan743x_intr_close(struct lan743x_adapter *adapter) 423 { 424 struct lan743x_intr *intr = &adapter->intr; 425 int index = 0; 426 427 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); 428 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); 429 430 for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { 431 if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { 432 lan743x_intr_unregister_isr(adapter, index); 433 intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); 434 } 435 } 436 437 if (intr->flags & INTR_FLAG_MSI_ENABLED) { 438 pci_disable_msi(adapter->pdev); 439 intr->flags &= ~INTR_FLAG_MSI_ENABLED; 440 } 441 442 if (intr->flags & INTR_FLAG_MSIX_ENABLED) { 443 pci_disable_msix(adapter->pdev); 444 intr->flags &= ~INTR_FLAG_MSIX_ENABLED; 445 } 446 } 447 448 static int lan743x_intr_open(struct lan743x_adapter *adapter) 449 { 450 struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT]; 451 struct lan743x_intr *intr = &adapter->intr; 452 u32 int_vec_en_auto_clr = 0; 453 u32 int_vec_map0 = 0; 454 u32 int_vec_map1 = 0; 455 int ret = -ENODEV; 456 int index = 0; 457 u32 flags = 0; 458 459 intr->number_of_vectors = 0; 460 461 /* Try to set up MSIX interrupts */ 462 memset(&msix_entries[0], 0, 463 sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT); 464 for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) 465 msix_entries[index].entry = index; 466 ret = pci_enable_msix_range(adapter->pdev, 467 msix_entries, 1, 468 1 + LAN743X_USED_TX_CHANNELS + 469 LAN743X_USED_RX_CHANNELS); 470 471 if (ret > 0) { 472 intr->flags |= INTR_FLAG_MSIX_ENABLED; 473 intr->number_of_vectors = ret; 474 intr->using_vectors = true; 475 for (index = 0; index < intr->number_of_vectors; index++) 476 intr->vector_list[index].irq = msix_entries 477 [index].vector; 478 netif_info(adapter, ifup, adapter->netdev, 479 "using MSIX interrupts, number of vectors = %d\n", 480 intr->number_of_vectors); 481 } 482 483 /* If MSIX failed try to setup using MSI interrupts */ 484 if (!intr->number_of_vectors) { 485 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 486 if (!pci_enable_msi(adapter->pdev)) { 487 intr->flags |= INTR_FLAG_MSI_ENABLED; 488 intr->number_of_vectors = 1; 489 intr->using_vectors = true; 490 intr->vector_list[0].irq = 491 adapter->pdev->irq; 492 netif_info(adapter, ifup, adapter->netdev, 493 "using MSI interrupts, number of vectors = %d\n", 494 intr->number_of_vectors); 495 } 496 } 497 } 498 499 /* If MSIX, and MSI failed, setup using legacy interrupt */ 500 if (!intr->number_of_vectors) { 501 intr->number_of_vectors = 1; 502 intr->using_vectors = false; 503 intr->vector_list[0].irq = intr->irq; 504 netif_info(adapter, ifup, adapter->netdev, 505 "using legacy interrupts\n"); 506 } 507 508 /* At this point we must have at least one irq */ 509 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); 510 511 /* map all interrupts to vector 0 */ 512 lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); 513 lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); 514 lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); 515 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 516 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 517 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 518 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 519 520 if (intr->using_vectors) { 521 flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 522 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 523 } else { 524 flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | 525 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | 526 LAN743X_VECTOR_FLAG_IRQ_SHARED; 527 } 528 529 if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 530 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; 531 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; 532 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; 533 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; 534 flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; 535 flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; 536 } 537 538 ret = lan743x_intr_register_isr(adapter, 0, flags, 539 INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | 540 INT_BIT_ALL_OTHER_, 541 lan743x_intr_shared_isr, adapter); 542 if (ret) 543 goto clean_up; 544 intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); 545 546 if (intr->using_vectors) 547 lan743x_csr_write(adapter, INT_VEC_EN_SET, 548 INT_VEC_EN_(0)); 549 550 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 551 lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); 552 lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); 553 lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); 554 lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); 555 lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); 556 lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); 557 lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); 558 lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); 559 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); 560 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); 561 lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); 562 } 563 564 /* enable interrupts */ 565 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); 566 ret = lan743x_intr_test_isr(adapter); 567 if (ret) 568 goto clean_up; 569 570 if (intr->number_of_vectors > 1) { 571 int number_of_tx_vectors = intr->number_of_vectors - 1; 572 573 if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS) 574 number_of_tx_vectors = LAN743X_USED_TX_CHANNELS; 575 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 576 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 577 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 578 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 579 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 580 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 581 582 if (adapter->csr.flags & 583 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 584 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | 585 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 586 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 587 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 588 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 589 } 590 591 for (index = 0; index < number_of_tx_vectors; index++) { 592 u32 int_bit = INT_BIT_DMA_TX_(index); 593 int vector = index + 1; 594 595 /* map TX interrupt to vector */ 596 int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); 597 lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); 598 if (flags & 599 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { 600 int_vec_en_auto_clr |= INT_VEC_EN_(vector); 601 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, 602 int_vec_en_auto_clr); 603 } 604 605 /* Remove TX interrupt from shared mask */ 606 intr->vector_list[0].int_mask &= ~int_bit; 607 ret = lan743x_intr_register_isr(adapter, vector, flags, 608 int_bit, lan743x_tx_isr, 609 &adapter->tx[index]); 610 if (ret) 611 goto clean_up; 612 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 613 if (!(flags & 614 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) 615 lan743x_csr_write(adapter, INT_VEC_EN_SET, 616 INT_VEC_EN_(vector)); 617 } 618 } 619 if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) { 620 int number_of_rx_vectors = intr->number_of_vectors - 621 LAN743X_USED_TX_CHANNELS - 1; 622 623 if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) 624 number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; 625 626 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | 627 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | 628 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | 629 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | 630 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | 631 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; 632 633 if (adapter->csr.flags & 634 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { 635 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | 636 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | 637 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | 638 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | 639 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; 640 } 641 for (index = 0; index < number_of_rx_vectors; index++) { 642 int vector = index + 1 + LAN743X_USED_TX_CHANNELS; 643 u32 int_bit = INT_BIT_DMA_RX_(index); 644 645 /* map RX interrupt to vector */ 646 int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); 647 lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); 648 if (flags & 649 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { 650 int_vec_en_auto_clr |= INT_VEC_EN_(vector); 651 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, 652 int_vec_en_auto_clr); 653 } 654 655 /* Remove RX interrupt from shared mask */ 656 intr->vector_list[0].int_mask &= ~int_bit; 657 ret = lan743x_intr_register_isr(adapter, vector, flags, 658 int_bit, lan743x_rx_isr, 659 &adapter->rx[index]); 660 if (ret) 661 goto clean_up; 662 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); 663 664 lan743x_csr_write(adapter, INT_VEC_EN_SET, 665 INT_VEC_EN_(vector)); 666 } 667 } 668 return 0; 669 670 clean_up: 671 lan743x_intr_close(adapter); 672 return ret; 673 } 674 675 static int lan743x_dp_write(struct lan743x_adapter *adapter, 676 u32 select, u32 addr, u32 length, u32 *buf) 677 { 678 int ret = -EIO; 679 u32 dp_sel; 680 int i; 681 682 mutex_lock(&adapter->dp_lock); 683 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 684 1, 40, 100, 100)) 685 goto unlock; 686 dp_sel = lan743x_csr_read(adapter, DP_SEL); 687 dp_sel &= ~DP_SEL_MASK_; 688 dp_sel |= select; 689 lan743x_csr_write(adapter, DP_SEL, dp_sel); 690 691 for (i = 0; i < length; i++) { 692 lan743x_csr_write(adapter, DP_ADDR, addr + i); 693 lan743x_csr_write(adapter, DP_DATA_0, buf[i]); 694 lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); 695 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, 696 1, 40, 100, 100)) 697 goto unlock; 698 } 699 ret = 0; 700 701 unlock: 702 mutex_unlock(&adapter->dp_lock); 703 return ret; 704 } 705 706 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) 707 { 708 u32 ret; 709 710 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & 711 MAC_MII_ACC_PHY_ADDR_MASK_; 712 ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & 713 MAC_MII_ACC_MIIRINDA_MASK_; 714 715 if (read) 716 ret |= MAC_MII_ACC_MII_READ_; 717 else 718 ret |= MAC_MII_ACC_MII_WRITE_; 719 ret |= MAC_MII_ACC_MII_BUSY_; 720 721 return ret; 722 } 723 724 static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) 725 { 726 u32 data; 727 728 return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data, 729 !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); 730 } 731 732 static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) 733 { 734 struct lan743x_adapter *adapter = bus->priv; 735 u32 val, mii_access; 736 int ret; 737 738 /* comfirm MII not busy */ 739 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 740 if (ret < 0) 741 return ret; 742 743 /* set the address, index & direction (read from PHY) */ 744 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); 745 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 746 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 747 if (ret < 0) 748 return ret; 749 750 val = lan743x_csr_read(adapter, MAC_MII_DATA); 751 return (int)(val & 0xFFFF); 752 } 753 754 static int lan743x_mdiobus_write(struct mii_bus *bus, 755 int phy_id, int index, u16 regval) 756 { 757 struct lan743x_adapter *adapter = bus->priv; 758 u32 val, mii_access; 759 int ret; 760 761 /* confirm MII not busy */ 762 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 763 if (ret < 0) 764 return ret; 765 val = (u32)regval; 766 lan743x_csr_write(adapter, MAC_MII_DATA, val); 767 768 /* set the address, index & direction (write to PHY) */ 769 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); 770 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); 771 ret = lan743x_mac_mii_wait_till_not_busy(adapter); 772 return ret; 773 } 774 775 static void lan743x_mac_set_address(struct lan743x_adapter *adapter, 776 u8 *addr) 777 { 778 u32 addr_lo, addr_hi; 779 780 addr_lo = addr[0] | 781 addr[1] << 8 | 782 addr[2] << 16 | 783 addr[3] << 24; 784 addr_hi = addr[4] | 785 addr[5] << 8; 786 lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); 787 lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); 788 789 ether_addr_copy(adapter->mac_address, addr); 790 netif_info(adapter, drv, adapter->netdev, 791 "MAC address set to %pM\n", addr); 792 } 793 794 static int lan743x_mac_init(struct lan743x_adapter *adapter) 795 { 796 bool mac_address_valid = true; 797 struct net_device *netdev; 798 u32 mac_addr_hi = 0; 799 u32 mac_addr_lo = 0; 800 u32 data; 801 int ret; 802 803 netdev = adapter->netdev; 804 lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_); 805 ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_, 806 0, 1000, 20000, 100); 807 if (ret) 808 return ret; 809 810 /* setup auto duplex, and speed detection */ 811 data = lan743x_csr_read(adapter, MAC_CR); 812 data |= MAC_CR_ADD_ | MAC_CR_ASD_; 813 data |= MAC_CR_CNTR_RST_; 814 lan743x_csr_write(adapter, MAC_CR, data); 815 816 mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); 817 mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); 818 adapter->mac_address[0] = mac_addr_lo & 0xFF; 819 adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; 820 adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; 821 adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; 822 adapter->mac_address[4] = mac_addr_hi & 0xFF; 823 adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; 824 825 if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && 826 mac_addr_lo == 0xFFFFFFFF) { 827 mac_address_valid = false; 828 } else if (!is_valid_ether_addr(adapter->mac_address)) { 829 mac_address_valid = false; 830 } 831 832 if (!mac_address_valid) 833 eth_random_addr(adapter->mac_address); 834 lan743x_mac_set_address(adapter, adapter->mac_address); 835 ether_addr_copy(netdev->dev_addr, adapter->mac_address); 836 return 0; 837 } 838 839 static int lan743x_mac_open(struct lan743x_adapter *adapter) 840 { 841 int ret = 0; 842 u32 temp; 843 844 temp = lan743x_csr_read(adapter, MAC_RX); 845 lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); 846 temp = lan743x_csr_read(adapter, MAC_TX); 847 lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); 848 return ret; 849 } 850 851 static void lan743x_mac_close(struct lan743x_adapter *adapter) 852 { 853 u32 temp; 854 855 temp = lan743x_csr_read(adapter, MAC_TX); 856 temp &= ~MAC_TX_TXEN_; 857 lan743x_csr_write(adapter, MAC_TX, temp); 858 lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, 859 1, 1000, 20000, 100); 860 861 temp = lan743x_csr_read(adapter, MAC_RX); 862 temp &= ~MAC_RX_RXEN_; 863 lan743x_csr_write(adapter, MAC_RX, temp); 864 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 865 1, 1000, 20000, 100); 866 } 867 868 static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, 869 bool tx_enable, bool rx_enable) 870 { 871 u32 flow_setting = 0; 872 873 /* set maximum pause time because when fifo space frees 874 * up a zero value pause frame will be sent to release the pause 875 */ 876 flow_setting = MAC_FLOW_CR_FCPT_MASK_; 877 if (tx_enable) 878 flow_setting |= MAC_FLOW_CR_TX_FCEN_; 879 if (rx_enable) 880 flow_setting |= MAC_FLOW_CR_RX_FCEN_; 881 lan743x_csr_write(adapter, MAC_FLOW, flow_setting); 882 } 883 884 static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) 885 { 886 int enabled = 0; 887 u32 mac_rx = 0; 888 889 mac_rx = lan743x_csr_read(adapter, MAC_RX); 890 if (mac_rx & MAC_RX_RXEN_) { 891 enabled = 1; 892 if (mac_rx & MAC_RX_RXD_) { 893 lan743x_csr_write(adapter, MAC_RX, mac_rx); 894 mac_rx &= ~MAC_RX_RXD_; 895 } 896 mac_rx &= ~MAC_RX_RXEN_; 897 lan743x_csr_write(adapter, MAC_RX, mac_rx); 898 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, 899 1, 1000, 20000, 100); 900 lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); 901 } 902 903 mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); 904 mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) & 905 MAC_RX_MAX_SIZE_MASK_); 906 lan743x_csr_write(adapter, MAC_RX, mac_rx); 907 908 if (enabled) { 909 mac_rx |= MAC_RX_RXEN_; 910 lan743x_csr_write(adapter, MAC_RX, mac_rx); 911 } 912 return 0; 913 } 914 915 /* PHY */ 916 static int lan743x_phy_reset(struct lan743x_adapter *adapter) 917 { 918 u32 data; 919 920 /* Only called with in probe, and before mdiobus_register */ 921 922 data = lan743x_csr_read(adapter, PMT_CTL); 923 data |= PMT_CTL_ETH_PHY_RST_; 924 lan743x_csr_write(adapter, PMT_CTL, data); 925 926 return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data, 927 (!(data & PMT_CTL_ETH_PHY_RST_) && 928 (data & PMT_CTL_READY_)), 929 50000, 1000000); 930 } 931 932 static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, 933 u8 duplex, u16 local_adv, 934 u16 remote_adv) 935 { 936 struct lan743x_phy *phy = &adapter->phy; 937 u8 cap; 938 939 if (phy->fc_autoneg) 940 cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); 941 else 942 cap = phy->fc_request_control; 943 944 lan743x_mac_flow_ctrl_set_enables(adapter, 945 cap & FLOW_CTRL_TX, 946 cap & FLOW_CTRL_RX); 947 } 948 949 static int lan743x_phy_init(struct lan743x_adapter *adapter) 950 { 951 return lan743x_phy_reset(adapter); 952 } 953 954 static void lan743x_phy_link_status_change(struct net_device *netdev) 955 { 956 struct lan743x_adapter *adapter = netdev_priv(netdev); 957 struct phy_device *phydev = netdev->phydev; 958 959 phy_print_status(phydev); 960 if (phydev->state == PHY_RUNNING) { 961 struct ethtool_link_ksettings ksettings; 962 int remote_advertisement = 0; 963 int local_advertisement = 0; 964 965 memset(&ksettings, 0, sizeof(ksettings)); 966 phy_ethtool_get_link_ksettings(netdev, &ksettings); 967 local_advertisement = phy_read(phydev, MII_ADVERTISE); 968 if (local_advertisement < 0) 969 return; 970 971 remote_advertisement = phy_read(phydev, MII_LPA); 972 if (remote_advertisement < 0) 973 return; 974 975 lan743x_phy_update_flowcontrol(adapter, 976 ksettings.base.duplex, 977 local_advertisement, 978 remote_advertisement); 979 } 980 } 981 982 static void lan743x_phy_close(struct lan743x_adapter *adapter) 983 { 984 struct net_device *netdev = adapter->netdev; 985 986 phy_stop(netdev->phydev); 987 phy_disconnect(netdev->phydev); 988 netdev->phydev = NULL; 989 } 990 991 static int lan743x_phy_open(struct lan743x_adapter *adapter) 992 { 993 struct lan743x_phy *phy = &adapter->phy; 994 struct phy_device *phydev; 995 struct net_device *netdev; 996 int ret = -EIO; 997 u32 mii_adv; 998 999 netdev = adapter->netdev; 1000 phydev = phy_find_first(adapter->mdiobus); 1001 if (!phydev) 1002 goto return_error; 1003 1004 ret = phy_connect_direct(netdev, phydev, 1005 lan743x_phy_link_status_change, 1006 PHY_INTERFACE_MODE_GMII); 1007 if (ret) 1008 goto return_error; 1009 1010 /* MAC doesn't support 1000T Half */ 1011 phydev->supported &= ~SUPPORTED_1000baseT_Half; 1012 1013 /* support both flow controls */ 1014 phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); 1015 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 1016 mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control); 1017 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); 1018 phy->fc_autoneg = phydev->autoneg; 1019 1020 phy_start(phydev); 1021 phy_start_aneg(phydev); 1022 return 0; 1023 1024 return_error: 1025 return ret; 1026 } 1027 1028 static void lan743x_rfe_open(struct lan743x_adapter *adapter) 1029 { 1030 lan743x_csr_write(adapter, RFE_RSS_CFG, 1031 RFE_RSS_CFG_UDP_IPV6_EX_ | 1032 RFE_RSS_CFG_TCP_IPV6_EX_ | 1033 RFE_RSS_CFG_IPV6_EX_ | 1034 RFE_RSS_CFG_UDP_IPV6_ | 1035 RFE_RSS_CFG_TCP_IPV6_ | 1036 RFE_RSS_CFG_IPV6_ | 1037 RFE_RSS_CFG_UDP_IPV4_ | 1038 RFE_RSS_CFG_TCP_IPV4_ | 1039 RFE_RSS_CFG_IPV4_ | 1040 RFE_RSS_CFG_VALID_HASH_BITS_ | 1041 RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | 1042 RFE_RSS_CFG_RSS_HASH_STORE_ | 1043 RFE_RSS_CFG_RSS_ENABLE_); 1044 } 1045 1046 static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) 1047 { 1048 u8 *mac_addr; 1049 u32 mac_addr_hi = 0; 1050 u32 mac_addr_lo = 0; 1051 1052 /* Add mac address to perfect Filter */ 1053 mac_addr = adapter->mac_address; 1054 mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | 1055 (((u32)(mac_addr[1])) << 8) | 1056 (((u32)(mac_addr[2])) << 16) | 1057 (((u32)(mac_addr[3])) << 24)); 1058 mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | 1059 (((u32)(mac_addr[5])) << 8)); 1060 1061 lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); 1062 lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), 1063 mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); 1064 } 1065 1066 static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) 1067 { 1068 struct net_device *netdev = adapter->netdev; 1069 u32 hash_table[DP_SEL_VHF_HASH_LEN]; 1070 u32 rfctl; 1071 u32 data; 1072 1073 rfctl = lan743x_csr_read(adapter, RFE_CTL); 1074 rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | 1075 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); 1076 rfctl |= RFE_CTL_AB_; 1077 if (netdev->flags & IFF_PROMISC) { 1078 rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; 1079 } else { 1080 if (netdev->flags & IFF_ALLMULTI) 1081 rfctl |= RFE_CTL_AM_; 1082 } 1083 1084 memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); 1085 if (netdev_mc_count(netdev)) { 1086 struct netdev_hw_addr *ha; 1087 int i; 1088 1089 rfctl |= RFE_CTL_DA_PERFECT_; 1090 i = 1; 1091 netdev_for_each_mc_addr(ha, netdev) { 1092 /* set first 32 into Perfect Filter */ 1093 if (i < 33) { 1094 lan743x_csr_write(adapter, 1095 RFE_ADDR_FILT_HI(i), 0); 1096 data = ha->addr[3]; 1097 data = ha->addr[2] | (data << 8); 1098 data = ha->addr[1] | (data << 8); 1099 data = ha->addr[0] | (data << 8); 1100 lan743x_csr_write(adapter, 1101 RFE_ADDR_FILT_LO(i), data); 1102 data = ha->addr[5]; 1103 data = ha->addr[4] | (data << 8); 1104 data |= RFE_ADDR_FILT_HI_VALID_; 1105 lan743x_csr_write(adapter, 1106 RFE_ADDR_FILT_HI(i), data); 1107 } else { 1108 u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> 1109 23) & 0x1FF; 1110 hash_table[bitnum / 32] |= (1 << (bitnum % 32)); 1111 rfctl |= RFE_CTL_MCAST_HASH_; 1112 } 1113 i++; 1114 } 1115 } 1116 1117 lan743x_dp_write(adapter, DP_SEL_RFE_RAM, 1118 DP_SEL_VHF_VLAN_LEN, 1119 DP_SEL_VHF_HASH_LEN, hash_table); 1120 lan743x_csr_write(adapter, RFE_CTL, rfctl); 1121 } 1122 1123 static int lan743x_dmac_init(struct lan743x_adapter *adapter) 1124 { 1125 u32 data = 0; 1126 1127 lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); 1128 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, 1129 0, 1000, 20000, 100); 1130 switch (DEFAULT_DMA_DESCRIPTOR_SPACING) { 1131 case DMA_DESCRIPTOR_SPACING_16: 1132 data = DMAC_CFG_MAX_DSPACE_16_; 1133 break; 1134 case DMA_DESCRIPTOR_SPACING_32: 1135 data = DMAC_CFG_MAX_DSPACE_32_; 1136 break; 1137 case DMA_DESCRIPTOR_SPACING_64: 1138 data = DMAC_CFG_MAX_DSPACE_64_; 1139 break; 1140 case DMA_DESCRIPTOR_SPACING_128: 1141 data = DMAC_CFG_MAX_DSPACE_128_; 1142 break; 1143 default: 1144 return -EPERM; 1145 } 1146 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 1147 data |= DMAC_CFG_COAL_EN_; 1148 data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; 1149 data |= DMAC_CFG_MAX_READ_REQ_SET_(6); 1150 lan743x_csr_write(adapter, DMAC_CFG, data); 1151 data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); 1152 data |= DMAC_COAL_CFG_TIMER_TX_START_; 1153 data |= DMAC_COAL_CFG_FLUSH_INTS_; 1154 data |= DMAC_COAL_CFG_INT_EXIT_COAL_; 1155 data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; 1156 data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); 1157 data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); 1158 lan743x_csr_write(adapter, DMAC_COAL_CFG, data); 1159 data = DMAC_OBFF_TX_THRES_SET_(0x08); 1160 data |= DMAC_OBFF_RX_THRES_SET_(0x0A); 1161 lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); 1162 return 0; 1163 } 1164 1165 static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, 1166 int tx_channel) 1167 { 1168 u32 dmac_cmd = 0; 1169 1170 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1171 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1172 DMAC_CMD_START_T_(tx_channel)), 1173 (dmac_cmd & 1174 DMAC_CMD_STOP_T_(tx_channel))); 1175 } 1176 1177 static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, 1178 int tx_channel) 1179 { 1180 int timeout = 100; 1181 int result = 0; 1182 1183 while (timeout && 1184 ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == 1185 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1186 usleep_range(1000, 20000); 1187 timeout--; 1188 } 1189 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1190 result = -ENODEV; 1191 return result; 1192 } 1193 1194 static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, 1195 int rx_channel) 1196 { 1197 u32 dmac_cmd = 0; 1198 1199 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); 1200 return DMAC_CHANNEL_STATE_SET((dmac_cmd & 1201 DMAC_CMD_START_R_(rx_channel)), 1202 (dmac_cmd & 1203 DMAC_CMD_STOP_R_(rx_channel))); 1204 } 1205 1206 static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, 1207 int rx_channel) 1208 { 1209 int timeout = 100; 1210 int result = 0; 1211 1212 while (timeout && 1213 ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == 1214 DMAC_CHANNEL_STATE_STOP_PENDING)) { 1215 usleep_range(1000, 20000); 1216 timeout--; 1217 } 1218 if (result == DMAC_CHANNEL_STATE_STOP_PENDING) 1219 result = -ENODEV; 1220 return result; 1221 } 1222 1223 static void lan743x_tx_release_desc(struct lan743x_tx *tx, 1224 int descriptor_index, bool cleanup) 1225 { 1226 struct lan743x_tx_buffer_info *buffer_info = NULL; 1227 struct lan743x_tx_descriptor *descriptor = NULL; 1228 u32 descriptor_type = 0; 1229 1230 descriptor = &tx->ring_cpu_ptr[descriptor_index]; 1231 buffer_info = &tx->buffer_info[descriptor_index]; 1232 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) 1233 goto done; 1234 1235 descriptor_type = (descriptor->data0) & 1236 TX_DESC_DATA0_DTYPE_MASK_; 1237 if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) 1238 goto clean_up_data_descriptor; 1239 else 1240 goto clear_active; 1241 1242 clean_up_data_descriptor: 1243 if (buffer_info->dma_ptr) { 1244 if (buffer_info->flags & 1245 TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { 1246 dma_unmap_page(&tx->adapter->pdev->dev, 1247 buffer_info->dma_ptr, 1248 buffer_info->buffer_length, 1249 DMA_TO_DEVICE); 1250 } else { 1251 dma_unmap_single(&tx->adapter->pdev->dev, 1252 buffer_info->dma_ptr, 1253 buffer_info->buffer_length, 1254 DMA_TO_DEVICE); 1255 } 1256 buffer_info->dma_ptr = 0; 1257 buffer_info->buffer_length = 0; 1258 } 1259 if (buffer_info->skb) { 1260 dev_kfree_skb(buffer_info->skb); 1261 buffer_info->skb = NULL; 1262 } 1263 1264 clear_active: 1265 buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; 1266 1267 done: 1268 memset(buffer_info, 0, sizeof(*buffer_info)); 1269 memset(descriptor, 0, sizeof(*descriptor)); 1270 } 1271 1272 static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) 1273 { 1274 return ((++index) % tx->ring_size); 1275 } 1276 1277 static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) 1278 { 1279 while ((*tx->head_cpu_ptr) != (tx->last_head)) { 1280 lan743x_tx_release_desc(tx, tx->last_head, false); 1281 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1282 } 1283 } 1284 1285 static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) 1286 { 1287 u32 original_head = 0; 1288 1289 original_head = tx->last_head; 1290 do { 1291 lan743x_tx_release_desc(tx, tx->last_head, true); 1292 tx->last_head = lan743x_tx_next_index(tx, tx->last_head); 1293 } while (tx->last_head != original_head); 1294 memset(tx->ring_cpu_ptr, 0, 1295 sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); 1296 memset(tx->buffer_info, 0, 1297 sizeof(*tx->buffer_info) * (tx->ring_size)); 1298 } 1299 1300 static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, 1301 struct sk_buff *skb) 1302 { 1303 int result = 1; /* 1 for the main skb buffer */ 1304 int nr_frags = 0; 1305 1306 if (skb_is_gso(skb)) 1307 result++; /* requires an extension descriptor */ 1308 nr_frags = skb_shinfo(skb)->nr_frags; 1309 result += nr_frags; /* 1 for each fragment buffer */ 1310 return result; 1311 } 1312 1313 static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) 1314 { 1315 int last_head = tx->last_head; 1316 int last_tail = tx->last_tail; 1317 1318 if (last_tail >= last_head) 1319 return tx->ring_size - last_tail + last_head - 1; 1320 else 1321 return last_head - last_tail - 1; 1322 } 1323 1324 static int lan743x_tx_frame_start(struct lan743x_tx *tx, 1325 unsigned char *first_buffer, 1326 unsigned int first_buffer_length, 1327 unsigned int frame_length, 1328 bool check_sum) 1329 { 1330 /* called only from within lan743x_tx_xmit_frame. 1331 * assuming tx->ring_lock has already been acquired. 1332 */ 1333 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1334 struct lan743x_tx_buffer_info *buffer_info = NULL; 1335 struct lan743x_adapter *adapter = tx->adapter; 1336 struct device *dev = &adapter->pdev->dev; 1337 dma_addr_t dma_ptr; 1338 1339 tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; 1340 tx->frame_first = tx->last_tail; 1341 tx->frame_tail = tx->frame_first; 1342 1343 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1344 buffer_info = &tx->buffer_info[tx->frame_tail]; 1345 dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, 1346 DMA_TO_DEVICE); 1347 if (dma_mapping_error(dev, dma_ptr)) 1348 return -ENOMEM; 1349 1350 tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr); 1351 tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr); 1352 tx_descriptor->data3 = (frame_length << 16) & 1353 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_; 1354 1355 buffer_info->skb = NULL; 1356 buffer_info->dma_ptr = dma_ptr; 1357 buffer_info->buffer_length = first_buffer_length; 1358 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1359 1360 tx->frame_data0 = (first_buffer_length & 1361 TX_DESC_DATA0_BUF_LENGTH_MASK_) | 1362 TX_DESC_DATA0_DTYPE_DATA_ | 1363 TX_DESC_DATA0_FS_ | 1364 TX_DESC_DATA0_FCS_; 1365 1366 if (check_sum) 1367 tx->frame_data0 |= TX_DESC_DATA0_ICE_ | 1368 TX_DESC_DATA0_IPE_ | 1369 TX_DESC_DATA0_TPE_; 1370 1371 /* data0 will be programmed in one of other frame assembler functions */ 1372 return 0; 1373 } 1374 1375 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, 1376 unsigned int frame_length) 1377 { 1378 /* called only from within lan743x_tx_xmit_frame. 1379 * assuming tx->ring_lock has already been acquired. 1380 */ 1381 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1382 struct lan743x_tx_buffer_info *buffer_info = NULL; 1383 1384 /* wrap up previous descriptor */ 1385 tx->frame_data0 |= TX_DESC_DATA0_EXT_; 1386 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1387 tx_descriptor->data0 = tx->frame_data0; 1388 1389 /* move to next descriptor */ 1390 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1391 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1392 buffer_info = &tx->buffer_info[tx->frame_tail]; 1393 1394 /* add extension descriptor */ 1395 tx_descriptor->data1 = 0; 1396 tx_descriptor->data2 = 0; 1397 tx_descriptor->data3 = 0; 1398 1399 buffer_info->skb = NULL; 1400 buffer_info->dma_ptr = 0; 1401 buffer_info->buffer_length = 0; 1402 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1403 1404 tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | 1405 TX_DESC_DATA0_DTYPE_EXT_ | 1406 TX_DESC_DATA0_EXT_LSO_; 1407 1408 /* data0 will be programmed in one of other frame assembler functions */ 1409 } 1410 1411 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, 1412 const struct skb_frag_struct *fragment, 1413 unsigned int frame_length) 1414 { 1415 /* called only from within lan743x_tx_xmit_frame 1416 * assuming tx->ring_lock has already been acquired 1417 */ 1418 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1419 struct lan743x_tx_buffer_info *buffer_info = NULL; 1420 struct lan743x_adapter *adapter = tx->adapter; 1421 struct device *dev = &adapter->pdev->dev; 1422 unsigned int fragment_length = 0; 1423 dma_addr_t dma_ptr; 1424 1425 fragment_length = skb_frag_size(fragment); 1426 if (!fragment_length) 1427 return 0; 1428 1429 /* wrap up previous descriptor */ 1430 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1431 tx_descriptor->data0 = tx->frame_data0; 1432 1433 /* move to next descriptor */ 1434 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1435 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1436 buffer_info = &tx->buffer_info[tx->frame_tail]; 1437 dma_ptr = skb_frag_dma_map(dev, fragment, 1438 0, fragment_length, 1439 DMA_TO_DEVICE); 1440 if (dma_mapping_error(dev, dma_ptr)) { 1441 int desc_index; 1442 1443 /* cleanup all previously setup descriptors */ 1444 desc_index = tx->frame_first; 1445 while (desc_index != tx->frame_tail) { 1446 lan743x_tx_release_desc(tx, desc_index, true); 1447 desc_index = lan743x_tx_next_index(tx, desc_index); 1448 } 1449 dma_wmb(); 1450 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 1451 tx->frame_first = 0; 1452 tx->frame_data0 = 0; 1453 tx->frame_tail = 0; 1454 return -ENOMEM; 1455 } 1456 1457 tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr); 1458 tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr); 1459 tx_descriptor->data3 = (frame_length << 16) & 1460 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_; 1461 1462 buffer_info->skb = NULL; 1463 buffer_info->dma_ptr = dma_ptr; 1464 buffer_info->buffer_length = fragment_length; 1465 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; 1466 buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; 1467 1468 tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | 1469 TX_DESC_DATA0_DTYPE_DATA_ | 1470 TX_DESC_DATA0_FCS_; 1471 1472 /* data0 will be programmed in one of other frame assembler functions */ 1473 return 0; 1474 } 1475 1476 static void lan743x_tx_frame_end(struct lan743x_tx *tx, 1477 struct sk_buff *skb, 1478 bool ignore_sync) 1479 { 1480 /* called only from within lan743x_tx_xmit_frame 1481 * assuming tx->ring_lock has already been acquired 1482 */ 1483 struct lan743x_tx_descriptor *tx_descriptor = NULL; 1484 struct lan743x_tx_buffer_info *buffer_info = NULL; 1485 struct lan743x_adapter *adapter = tx->adapter; 1486 u32 tx_tail_flags = 0; 1487 1488 /* wrap up previous descriptor */ 1489 tx->frame_data0 |= TX_DESC_DATA0_LS_; 1490 tx->frame_data0 |= TX_DESC_DATA0_IOC_; 1491 1492 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; 1493 buffer_info = &tx->buffer_info[tx->frame_tail]; 1494 buffer_info->skb = skb; 1495 if (ignore_sync) 1496 buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; 1497 1498 tx_descriptor->data0 = tx->frame_data0; 1499 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); 1500 tx->last_tail = tx->frame_tail; 1501 1502 dma_wmb(); 1503 1504 if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 1505 tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; 1506 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) 1507 tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | 1508 TX_TAIL_SET_TOP_INT_EN_; 1509 1510 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 1511 tx_tail_flags | tx->frame_tail); 1512 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; 1513 } 1514 1515 static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, 1516 struct sk_buff *skb) 1517 { 1518 int required_number_of_descriptors = 0; 1519 unsigned int start_frame_length = 0; 1520 unsigned int frame_length = 0; 1521 unsigned int head_length = 0; 1522 unsigned long irq_flags = 0; 1523 bool ignore_sync = false; 1524 int nr_frags = 0; 1525 bool gso = false; 1526 int j; 1527 1528 required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); 1529 1530 spin_lock_irqsave(&tx->ring_lock, irq_flags); 1531 if (required_number_of_descriptors > 1532 lan743x_tx_get_avail_desc(tx)) { 1533 if (required_number_of_descriptors > (tx->ring_size - 1)) { 1534 dev_kfree_skb(skb); 1535 } else { 1536 /* save to overflow buffer */ 1537 tx->overflow_skb = skb; 1538 netif_stop_queue(tx->adapter->netdev); 1539 } 1540 goto unlock; 1541 } 1542 1543 /* space available, transmit skb */ 1544 head_length = skb_headlen(skb); 1545 frame_length = skb_pagelen(skb); 1546 nr_frags = skb_shinfo(skb)->nr_frags; 1547 start_frame_length = frame_length; 1548 gso = skb_is_gso(skb); 1549 if (gso) { 1550 start_frame_length = max(skb_shinfo(skb)->gso_size, 1551 (unsigned short)8); 1552 } 1553 1554 if (lan743x_tx_frame_start(tx, 1555 skb->data, head_length, 1556 start_frame_length, 1557 skb->ip_summed == CHECKSUM_PARTIAL)) { 1558 dev_kfree_skb(skb); 1559 goto unlock; 1560 } 1561 1562 if (gso) 1563 lan743x_tx_frame_add_lso(tx, frame_length); 1564 1565 if (nr_frags <= 0) 1566 goto finish; 1567 1568 for (j = 0; j < nr_frags; j++) { 1569 const struct skb_frag_struct *frag; 1570 1571 frag = &(skb_shinfo(skb)->frags[j]); 1572 if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { 1573 /* upon error no need to call 1574 * lan743x_tx_frame_end 1575 * frame assembler clean up was performed inside 1576 * lan743x_tx_frame_add_fragment 1577 */ 1578 dev_kfree_skb(skb); 1579 goto unlock; 1580 } 1581 } 1582 1583 finish: 1584 lan743x_tx_frame_end(tx, skb, ignore_sync); 1585 1586 unlock: 1587 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 1588 return NETDEV_TX_OK; 1589 } 1590 1591 static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) 1592 { 1593 struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi); 1594 struct lan743x_adapter *adapter = tx->adapter; 1595 bool start_transmitter = false; 1596 unsigned long irq_flags = 0; 1597 u32 ioc_bit = 0; 1598 u32 int_sts = 0; 1599 1600 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); 1601 int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); 1602 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) 1603 lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); 1604 spin_lock_irqsave(&tx->ring_lock, irq_flags); 1605 1606 /* clean up tx ring */ 1607 lan743x_tx_release_completed_descriptors(tx); 1608 if (netif_queue_stopped(adapter->netdev)) { 1609 if (tx->overflow_skb) { 1610 if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <= 1611 lan743x_tx_get_avail_desc(tx)) 1612 start_transmitter = true; 1613 } else { 1614 netif_wake_queue(adapter->netdev); 1615 } 1616 } 1617 spin_unlock_irqrestore(&tx->ring_lock, irq_flags); 1618 1619 if (start_transmitter) { 1620 /* space is now available, transmit overflow skb */ 1621 lan743x_tx_xmit_frame(tx, tx->overflow_skb); 1622 tx->overflow_skb = NULL; 1623 netif_wake_queue(adapter->netdev); 1624 } 1625 1626 if (!napi_complete_done(napi, weight)) 1627 goto done; 1628 1629 /* enable isr */ 1630 lan743x_csr_write(adapter, INT_EN_SET, 1631 INT_BIT_DMA_TX_(tx->channel_number)); 1632 lan743x_csr_read(adapter, INT_STS); 1633 1634 done: 1635 return weight; 1636 } 1637 1638 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) 1639 { 1640 if (tx->head_cpu_ptr) { 1641 pci_free_consistent(tx->adapter->pdev, 1642 sizeof(*tx->head_cpu_ptr), 1643 (void *)(tx->head_cpu_ptr), 1644 tx->head_dma_ptr); 1645 tx->head_cpu_ptr = NULL; 1646 tx->head_dma_ptr = 0; 1647 } 1648 kfree(tx->buffer_info); 1649 tx->buffer_info = NULL; 1650 1651 if (tx->ring_cpu_ptr) { 1652 pci_free_consistent(tx->adapter->pdev, 1653 tx->ring_allocation_size, 1654 tx->ring_cpu_ptr, 1655 tx->ring_dma_ptr); 1656 tx->ring_allocation_size = 0; 1657 tx->ring_cpu_ptr = NULL; 1658 tx->ring_dma_ptr = 0; 1659 } 1660 tx->ring_size = 0; 1661 } 1662 1663 static int lan743x_tx_ring_init(struct lan743x_tx *tx) 1664 { 1665 size_t ring_allocation_size = 0; 1666 void *cpu_ptr = NULL; 1667 dma_addr_t dma_ptr; 1668 int ret = -ENOMEM; 1669 1670 tx->ring_size = LAN743X_TX_RING_SIZE; 1671 if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { 1672 ret = -EINVAL; 1673 goto cleanup; 1674 } 1675 ring_allocation_size = ALIGN(tx->ring_size * 1676 sizeof(struct lan743x_tx_descriptor), 1677 PAGE_SIZE); 1678 dma_ptr = 0; 1679 cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, 1680 ring_allocation_size, &dma_ptr); 1681 if (!cpu_ptr) { 1682 ret = -ENOMEM; 1683 goto cleanup; 1684 } 1685 1686 tx->ring_allocation_size = ring_allocation_size; 1687 tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; 1688 tx->ring_dma_ptr = dma_ptr; 1689 1690 cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); 1691 if (!cpu_ptr) { 1692 ret = -ENOMEM; 1693 goto cleanup; 1694 } 1695 tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; 1696 dma_ptr = 0; 1697 cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, 1698 sizeof(*tx->head_cpu_ptr), &dma_ptr); 1699 if (!cpu_ptr) { 1700 ret = -ENOMEM; 1701 goto cleanup; 1702 } 1703 1704 tx->head_cpu_ptr = cpu_ptr; 1705 tx->head_dma_ptr = dma_ptr; 1706 if (tx->head_dma_ptr & 0x3) { 1707 ret = -ENOMEM; 1708 goto cleanup; 1709 } 1710 1711 return 0; 1712 1713 cleanup: 1714 lan743x_tx_ring_cleanup(tx); 1715 return ret; 1716 } 1717 1718 static void lan743x_tx_close(struct lan743x_tx *tx) 1719 { 1720 struct lan743x_adapter *adapter = tx->adapter; 1721 1722 lan743x_csr_write(adapter, 1723 DMAC_CMD, 1724 DMAC_CMD_STOP_T_(tx->channel_number)); 1725 lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); 1726 1727 lan743x_csr_write(adapter, 1728 DMAC_INT_EN_CLR, 1729 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 1730 lan743x_csr_write(adapter, INT_EN_CLR, 1731 INT_BIT_DMA_TX_(tx->channel_number)); 1732 napi_disable(&tx->napi); 1733 netif_napi_del(&tx->napi); 1734 1735 lan743x_csr_write(adapter, FCT_TX_CTL, 1736 FCT_TX_CTL_DIS_(tx->channel_number)); 1737 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 1738 FCT_TX_CTL_EN_(tx->channel_number), 1739 0, 1000, 20000, 100); 1740 1741 lan743x_tx_release_all_descriptors(tx); 1742 1743 if (tx->overflow_skb) { 1744 dev_kfree_skb(tx->overflow_skb); 1745 tx->overflow_skb = NULL; 1746 } 1747 1748 lan743x_tx_ring_cleanup(tx); 1749 } 1750 1751 static int lan743x_tx_open(struct lan743x_tx *tx) 1752 { 1753 struct lan743x_adapter *adapter = NULL; 1754 u32 data = 0; 1755 int ret; 1756 1757 adapter = tx->adapter; 1758 ret = lan743x_tx_ring_init(tx); 1759 if (ret) 1760 return ret; 1761 1762 /* initialize fifo */ 1763 lan743x_csr_write(adapter, FCT_TX_CTL, 1764 FCT_TX_CTL_RESET_(tx->channel_number)); 1765 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, 1766 FCT_TX_CTL_RESET_(tx->channel_number), 1767 0, 1000, 20000, 100); 1768 1769 /* enable fifo */ 1770 lan743x_csr_write(adapter, FCT_TX_CTL, 1771 FCT_TX_CTL_EN_(tx->channel_number)); 1772 1773 /* reset tx channel */ 1774 lan743x_csr_write(adapter, DMAC_CMD, 1775 DMAC_CMD_TX_SWR_(tx->channel_number)); 1776 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 1777 DMAC_CMD_TX_SWR_(tx->channel_number), 1778 0, 1000, 20000, 100); 1779 1780 /* Write TX_BASE_ADDR */ 1781 lan743x_csr_write(adapter, 1782 TX_BASE_ADDRH(tx->channel_number), 1783 DMA_ADDR_HIGH32(tx->ring_dma_ptr)); 1784 lan743x_csr_write(adapter, 1785 TX_BASE_ADDRL(tx->channel_number), 1786 DMA_ADDR_LOW32(tx->ring_dma_ptr)); 1787 1788 /* Write TX_CFG_B */ 1789 data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); 1790 data &= ~TX_CFG_B_TX_RING_LEN_MASK_; 1791 data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); 1792 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 1793 data |= TX_CFG_B_TDMABL_512_; 1794 lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); 1795 1796 /* Write TX_CFG_A */ 1797 data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; 1798 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 1799 data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; 1800 data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); 1801 data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); 1802 data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); 1803 } 1804 lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); 1805 1806 /* Write TX_HEAD_WRITEBACK_ADDR */ 1807 lan743x_csr_write(adapter, 1808 TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), 1809 DMA_ADDR_HIGH32(tx->head_dma_ptr)); 1810 lan743x_csr_write(adapter, 1811 TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), 1812 DMA_ADDR_LOW32(tx->head_dma_ptr)); 1813 1814 /* set last head */ 1815 tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); 1816 1817 /* write TX_TAIL */ 1818 tx->last_tail = 0; 1819 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), 1820 (u32)(tx->last_tail)); 1821 tx->vector_flags = lan743x_intr_get_vector_flags(adapter, 1822 INT_BIT_DMA_TX_ 1823 (tx->channel_number)); 1824 netif_napi_add(adapter->netdev, 1825 &tx->napi, lan743x_tx_napi_poll, 1826 tx->ring_size - 1); 1827 napi_enable(&tx->napi); 1828 1829 data = 0; 1830 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 1831 data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; 1832 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 1833 data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; 1834 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 1835 data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; 1836 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 1837 data |= TX_CFG_C_TX_INT_EN_R2C_; 1838 lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); 1839 1840 if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) 1841 lan743x_csr_write(adapter, INT_EN_SET, 1842 INT_BIT_DMA_TX_(tx->channel_number)); 1843 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 1844 DMAC_INT_BIT_TX_IOC_(tx->channel_number)); 1845 1846 /* start dmac channel */ 1847 lan743x_csr_write(adapter, DMAC_CMD, 1848 DMAC_CMD_START_T_(tx->channel_number)); 1849 return 0; 1850 } 1851 1852 static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) 1853 { 1854 return ((++index) % rx->ring_size); 1855 } 1856 1857 static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) 1858 { 1859 struct lan743x_rx_buffer_info *buffer_info; 1860 struct lan743x_rx_descriptor *descriptor; 1861 int length = 0; 1862 1863 length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); 1864 descriptor = &rx->ring_cpu_ptr[index]; 1865 buffer_info = &rx->buffer_info[index]; 1866 buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev, 1867 length, 1868 GFP_ATOMIC | GFP_DMA); 1869 if (!(buffer_info->skb)) 1870 return -ENOMEM; 1871 buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev, 1872 buffer_info->skb->data, 1873 length, 1874 DMA_FROM_DEVICE); 1875 if (dma_mapping_error(&rx->adapter->pdev->dev, 1876 buffer_info->dma_ptr)) { 1877 buffer_info->dma_ptr = 0; 1878 return -ENOMEM; 1879 } 1880 1881 buffer_info->buffer_length = length; 1882 descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr); 1883 descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr); 1884 descriptor->data3 = 0; 1885 descriptor->data0 = (RX_DESC_DATA0_OWN_ | 1886 (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)); 1887 skb_reserve(buffer_info->skb, RX_HEAD_PADDING); 1888 1889 return 0; 1890 } 1891 1892 static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) 1893 { 1894 struct lan743x_rx_buffer_info *buffer_info; 1895 struct lan743x_rx_descriptor *descriptor; 1896 1897 descriptor = &rx->ring_cpu_ptr[index]; 1898 buffer_info = &rx->buffer_info[index]; 1899 1900 descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr); 1901 descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr); 1902 descriptor->data3 = 0; 1903 descriptor->data0 = (RX_DESC_DATA0_OWN_ | 1904 ((buffer_info->buffer_length) & 1905 RX_DESC_DATA0_BUF_LENGTH_MASK_)); 1906 } 1907 1908 static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) 1909 { 1910 struct lan743x_rx_buffer_info *buffer_info; 1911 struct lan743x_rx_descriptor *descriptor; 1912 1913 descriptor = &rx->ring_cpu_ptr[index]; 1914 buffer_info = &rx->buffer_info[index]; 1915 1916 memset(descriptor, 0, sizeof(*descriptor)); 1917 1918 if (buffer_info->dma_ptr) { 1919 dma_unmap_single(&rx->adapter->pdev->dev, 1920 buffer_info->dma_ptr, 1921 buffer_info->buffer_length, 1922 DMA_FROM_DEVICE); 1923 buffer_info->dma_ptr = 0; 1924 } 1925 1926 if (buffer_info->skb) { 1927 dev_kfree_skb(buffer_info->skb); 1928 buffer_info->skb = NULL; 1929 } 1930 1931 memset(buffer_info, 0, sizeof(*buffer_info)); 1932 } 1933 1934 static int lan743x_rx_process_packet(struct lan743x_rx *rx) 1935 { 1936 struct skb_shared_hwtstamps *hwtstamps = NULL; 1937 int result = RX_PROCESS_RESULT_NOTHING_TO_DO; 1938 struct lan743x_rx_buffer_info *buffer_info; 1939 struct lan743x_rx_descriptor *descriptor; 1940 int current_head_index = -1; 1941 int extension_index = -1; 1942 int first_index = -1; 1943 int last_index = -1; 1944 1945 current_head_index = *rx->head_cpu_ptr; 1946 if (current_head_index < 0 || current_head_index >= rx->ring_size) 1947 goto done; 1948 1949 if (rx->last_head < 0 || rx->last_head >= rx->ring_size) 1950 goto done; 1951 1952 if (rx->last_head != current_head_index) { 1953 descriptor = &rx->ring_cpu_ptr[rx->last_head]; 1954 if (descriptor->data0 & RX_DESC_DATA0_OWN_) 1955 goto done; 1956 1957 if (!(descriptor->data0 & RX_DESC_DATA0_FS_)) 1958 goto done; 1959 1960 first_index = rx->last_head; 1961 if (descriptor->data0 & RX_DESC_DATA0_LS_) { 1962 last_index = rx->last_head; 1963 } else { 1964 int index; 1965 1966 index = lan743x_rx_next_index(rx, first_index); 1967 while (index != current_head_index) { 1968 descriptor = &rx->ring_cpu_ptr[index]; 1969 if (descriptor->data0 & RX_DESC_DATA0_OWN_) 1970 goto done; 1971 1972 if (descriptor->data0 & RX_DESC_DATA0_LS_) { 1973 last_index = index; 1974 break; 1975 } 1976 index = lan743x_rx_next_index(rx, index); 1977 } 1978 } 1979 if (last_index >= 0) { 1980 descriptor = &rx->ring_cpu_ptr[last_index]; 1981 if (descriptor->data0 & RX_DESC_DATA0_EXT_) { 1982 /* extension is expected to follow */ 1983 int index = lan743x_rx_next_index(rx, 1984 last_index); 1985 if (index != current_head_index) { 1986 descriptor = &rx->ring_cpu_ptr[index]; 1987 if (descriptor->data0 & 1988 RX_DESC_DATA0_OWN_) { 1989 goto done; 1990 } 1991 if (descriptor->data0 & 1992 RX_DESC_DATA0_EXT_) { 1993 extension_index = index; 1994 } else { 1995 goto done; 1996 } 1997 } else { 1998 /* extension is not yet available */ 1999 /* prevent processing of this packet */ 2000 first_index = -1; 2001 last_index = -1; 2002 } 2003 } 2004 } 2005 } 2006 if (first_index >= 0 && last_index >= 0) { 2007 int real_last_index = last_index; 2008 struct sk_buff *skb = NULL; 2009 u32 ts_sec = 0; 2010 u32 ts_nsec = 0; 2011 2012 /* packet is available */ 2013 if (first_index == last_index) { 2014 /* single buffer packet */ 2015 int packet_length; 2016 2017 buffer_info = &rx->buffer_info[first_index]; 2018 skb = buffer_info->skb; 2019 descriptor = &rx->ring_cpu_ptr[first_index]; 2020 2021 /* unmap from dma */ 2022 if (buffer_info->dma_ptr) { 2023 dma_unmap_single(&rx->adapter->pdev->dev, 2024 buffer_info->dma_ptr, 2025 buffer_info->buffer_length, 2026 DMA_FROM_DEVICE); 2027 buffer_info->dma_ptr = 0; 2028 buffer_info->buffer_length = 0; 2029 } 2030 buffer_info->skb = NULL; 2031 packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_ 2032 (descriptor->data0); 2033 skb_put(skb, packet_length - 4); 2034 skb->protocol = eth_type_trans(skb, 2035 rx->adapter->netdev); 2036 lan743x_rx_allocate_ring_element(rx, first_index); 2037 } else { 2038 int index = first_index; 2039 2040 /* multi buffer packet not supported */ 2041 /* this should not happen since 2042 * buffers are allocated to be at least jumbo size 2043 */ 2044 2045 /* clean up buffers */ 2046 if (first_index <= last_index) { 2047 while ((index >= first_index) && 2048 (index <= last_index)) { 2049 lan743x_rx_release_ring_element(rx, 2050 index); 2051 lan743x_rx_allocate_ring_element(rx, 2052 index); 2053 index = lan743x_rx_next_index(rx, 2054 index); 2055 } 2056 } else { 2057 while ((index >= first_index) || 2058 (index <= last_index)) { 2059 lan743x_rx_release_ring_element(rx, 2060 index); 2061 lan743x_rx_allocate_ring_element(rx, 2062 index); 2063 index = lan743x_rx_next_index(rx, 2064 index); 2065 } 2066 } 2067 } 2068 2069 if (extension_index >= 0) { 2070 descriptor = &rx->ring_cpu_ptr[extension_index]; 2071 buffer_info = &rx->buffer_info[extension_index]; 2072 2073 ts_sec = descriptor->data1; 2074 ts_nsec = (descriptor->data2 & 2075 RX_DESC_DATA2_TS_NS_MASK_); 2076 lan743x_rx_reuse_ring_element(rx, extension_index); 2077 real_last_index = extension_index; 2078 } 2079 2080 if (!skb) { 2081 result = RX_PROCESS_RESULT_PACKET_DROPPED; 2082 goto move_forward; 2083 } 2084 2085 if (extension_index < 0) 2086 goto pass_packet_to_os; 2087 hwtstamps = skb_hwtstamps(skb); 2088 if (hwtstamps) 2089 hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec); 2090 2091 pass_packet_to_os: 2092 /* pass packet to OS */ 2093 napi_gro_receive(&rx->napi, skb); 2094 result = RX_PROCESS_RESULT_PACKET_RECEIVED; 2095 2096 move_forward: 2097 /* push tail and head forward */ 2098 rx->last_tail = real_last_index; 2099 rx->last_head = lan743x_rx_next_index(rx, real_last_index); 2100 } 2101 done: 2102 return result; 2103 } 2104 2105 static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) 2106 { 2107 struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); 2108 struct lan743x_adapter *adapter = rx->adapter; 2109 u32 rx_tail_flags = 0; 2110 int count; 2111 2112 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { 2113 /* clear int status bit before reading packet */ 2114 lan743x_csr_write(adapter, DMAC_INT_STS, 2115 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2116 } 2117 count = 0; 2118 while (count < weight) { 2119 int rx_process_result = -1; 2120 2121 rx_process_result = lan743x_rx_process_packet(rx); 2122 if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) { 2123 count++; 2124 } else if (rx_process_result == 2125 RX_PROCESS_RESULT_NOTHING_TO_DO) { 2126 break; 2127 } else if (rx_process_result == 2128 RX_PROCESS_RESULT_PACKET_DROPPED) { 2129 continue; 2130 } 2131 } 2132 rx->frame_count += count; 2133 if (count == weight) 2134 goto done; 2135 2136 if (!napi_complete_done(napi, count)) 2137 goto done; 2138 2139 if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) 2140 rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; 2141 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { 2142 rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; 2143 } else { 2144 lan743x_csr_write(adapter, INT_EN_SET, 2145 INT_BIT_DMA_RX_(rx->channel_number)); 2146 } 2147 2148 /* update RX_TAIL */ 2149 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2150 rx_tail_flags | rx->last_tail); 2151 done: 2152 return count; 2153 } 2154 2155 static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) 2156 { 2157 if (rx->buffer_info && rx->ring_cpu_ptr) { 2158 int index; 2159 2160 for (index = 0; index < rx->ring_size; index++) 2161 lan743x_rx_release_ring_element(rx, index); 2162 } 2163 2164 if (rx->head_cpu_ptr) { 2165 pci_free_consistent(rx->adapter->pdev, 2166 sizeof(*rx->head_cpu_ptr), 2167 rx->head_cpu_ptr, 2168 rx->head_dma_ptr); 2169 rx->head_cpu_ptr = NULL; 2170 rx->head_dma_ptr = 0; 2171 } 2172 2173 kfree(rx->buffer_info); 2174 rx->buffer_info = NULL; 2175 2176 if (rx->ring_cpu_ptr) { 2177 pci_free_consistent(rx->adapter->pdev, 2178 rx->ring_allocation_size, 2179 rx->ring_cpu_ptr, 2180 rx->ring_dma_ptr); 2181 rx->ring_allocation_size = 0; 2182 rx->ring_cpu_ptr = NULL; 2183 rx->ring_dma_ptr = 0; 2184 } 2185 2186 rx->ring_size = 0; 2187 rx->last_head = 0; 2188 } 2189 2190 static int lan743x_rx_ring_init(struct lan743x_rx *rx) 2191 { 2192 size_t ring_allocation_size = 0; 2193 dma_addr_t dma_ptr = 0; 2194 void *cpu_ptr = NULL; 2195 int ret = -ENOMEM; 2196 int index = 0; 2197 2198 rx->ring_size = LAN743X_RX_RING_SIZE; 2199 if (rx->ring_size <= 1) { 2200 ret = -EINVAL; 2201 goto cleanup; 2202 } 2203 if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { 2204 ret = -EINVAL; 2205 goto cleanup; 2206 } 2207 ring_allocation_size = ALIGN(rx->ring_size * 2208 sizeof(struct lan743x_rx_descriptor), 2209 PAGE_SIZE); 2210 dma_ptr = 0; 2211 cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, 2212 ring_allocation_size, &dma_ptr); 2213 if (!cpu_ptr) { 2214 ret = -ENOMEM; 2215 goto cleanup; 2216 } 2217 rx->ring_allocation_size = ring_allocation_size; 2218 rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; 2219 rx->ring_dma_ptr = dma_ptr; 2220 2221 cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), 2222 GFP_KERNEL); 2223 if (!cpu_ptr) { 2224 ret = -ENOMEM; 2225 goto cleanup; 2226 } 2227 rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; 2228 dma_ptr = 0; 2229 cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, 2230 sizeof(*rx->head_cpu_ptr), &dma_ptr); 2231 if (!cpu_ptr) { 2232 ret = -ENOMEM; 2233 goto cleanup; 2234 } 2235 2236 rx->head_cpu_ptr = cpu_ptr; 2237 rx->head_dma_ptr = dma_ptr; 2238 if (rx->head_dma_ptr & 0x3) { 2239 ret = -ENOMEM; 2240 goto cleanup; 2241 } 2242 2243 rx->last_head = 0; 2244 for (index = 0; index < rx->ring_size; index++) { 2245 ret = lan743x_rx_allocate_ring_element(rx, index); 2246 if (ret) 2247 goto cleanup; 2248 } 2249 return 0; 2250 2251 cleanup: 2252 lan743x_rx_ring_cleanup(rx); 2253 return ret; 2254 } 2255 2256 static void lan743x_rx_close(struct lan743x_rx *rx) 2257 { 2258 struct lan743x_adapter *adapter = rx->adapter; 2259 2260 lan743x_csr_write(adapter, FCT_RX_CTL, 2261 FCT_RX_CTL_DIS_(rx->channel_number)); 2262 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2263 FCT_RX_CTL_EN_(rx->channel_number), 2264 0, 1000, 20000, 100); 2265 2266 lan743x_csr_write(adapter, DMAC_CMD, 2267 DMAC_CMD_STOP_R_(rx->channel_number)); 2268 lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); 2269 2270 lan743x_csr_write(adapter, DMAC_INT_EN_CLR, 2271 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2272 lan743x_csr_write(adapter, INT_EN_CLR, 2273 INT_BIT_DMA_RX_(rx->channel_number)); 2274 napi_disable(&rx->napi); 2275 2276 netif_napi_del(&rx->napi); 2277 2278 lan743x_rx_ring_cleanup(rx); 2279 } 2280 2281 static int lan743x_rx_open(struct lan743x_rx *rx) 2282 { 2283 struct lan743x_adapter *adapter = rx->adapter; 2284 u32 data = 0; 2285 int ret; 2286 2287 rx->frame_count = 0; 2288 ret = lan743x_rx_ring_init(rx); 2289 if (ret) 2290 goto return_error; 2291 2292 netif_napi_add(adapter->netdev, 2293 &rx->napi, lan743x_rx_napi_poll, 2294 rx->ring_size - 1); 2295 2296 lan743x_csr_write(adapter, DMAC_CMD, 2297 DMAC_CMD_RX_SWR_(rx->channel_number)); 2298 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, 2299 DMAC_CMD_RX_SWR_(rx->channel_number), 2300 0, 1000, 20000, 100); 2301 2302 /* set ring base address */ 2303 lan743x_csr_write(adapter, 2304 RX_BASE_ADDRH(rx->channel_number), 2305 DMA_ADDR_HIGH32(rx->ring_dma_ptr)); 2306 lan743x_csr_write(adapter, 2307 RX_BASE_ADDRL(rx->channel_number), 2308 DMA_ADDR_LOW32(rx->ring_dma_ptr)); 2309 2310 /* set rx write back address */ 2311 lan743x_csr_write(adapter, 2312 RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), 2313 DMA_ADDR_HIGH32(rx->head_dma_ptr)); 2314 lan743x_csr_write(adapter, 2315 RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), 2316 DMA_ADDR_LOW32(rx->head_dma_ptr)); 2317 data = RX_CFG_A_RX_HP_WB_EN_; 2318 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { 2319 data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | 2320 RX_CFG_A_RX_WB_THRES_SET_(0x7) | 2321 RX_CFG_A_RX_PF_THRES_SET_(16) | 2322 RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); 2323 } 2324 2325 /* set RX_CFG_A */ 2326 lan743x_csr_write(adapter, 2327 RX_CFG_A(rx->channel_number), data); 2328 2329 /* set RX_CFG_B */ 2330 data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); 2331 data &= ~RX_CFG_B_RX_PAD_MASK_; 2332 if (!RX_HEAD_PADDING) 2333 data |= RX_CFG_B_RX_PAD_0_; 2334 else 2335 data |= RX_CFG_B_RX_PAD_2_; 2336 data &= ~RX_CFG_B_RX_RING_LEN_MASK_; 2337 data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); 2338 data |= RX_CFG_B_TS_ALL_RX_; 2339 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) 2340 data |= RX_CFG_B_RDMABL_512_; 2341 2342 lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); 2343 rx->vector_flags = lan743x_intr_get_vector_flags(adapter, 2344 INT_BIT_DMA_RX_ 2345 (rx->channel_number)); 2346 2347 /* set RX_CFG_C */ 2348 data = 0; 2349 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) 2350 data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; 2351 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) 2352 data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; 2353 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) 2354 data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; 2355 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) 2356 data |= RX_CFG_C_RX_INT_EN_R2C_; 2357 lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); 2358 2359 rx->last_tail = ((u32)(rx->ring_size - 1)); 2360 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), 2361 rx->last_tail); 2362 rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); 2363 if (rx->last_head) { 2364 ret = -EIO; 2365 goto napi_delete; 2366 } 2367 2368 napi_enable(&rx->napi); 2369 2370 lan743x_csr_write(adapter, INT_EN_SET, 2371 INT_BIT_DMA_RX_(rx->channel_number)); 2372 lan743x_csr_write(adapter, DMAC_INT_STS, 2373 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2374 lan743x_csr_write(adapter, DMAC_INT_EN_SET, 2375 DMAC_INT_BIT_RXFRM_(rx->channel_number)); 2376 lan743x_csr_write(adapter, DMAC_CMD, 2377 DMAC_CMD_START_R_(rx->channel_number)); 2378 2379 /* initialize fifo */ 2380 lan743x_csr_write(adapter, FCT_RX_CTL, 2381 FCT_RX_CTL_RESET_(rx->channel_number)); 2382 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, 2383 FCT_RX_CTL_RESET_(rx->channel_number), 2384 0, 1000, 20000, 100); 2385 lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), 2386 FCT_FLOW_CTL_REQ_EN_ | 2387 FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | 2388 FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); 2389 2390 /* enable fifo */ 2391 lan743x_csr_write(adapter, FCT_RX_CTL, 2392 FCT_RX_CTL_EN_(rx->channel_number)); 2393 return 0; 2394 2395 napi_delete: 2396 netif_napi_del(&rx->napi); 2397 lan743x_rx_ring_cleanup(rx); 2398 2399 return_error: 2400 return ret; 2401 } 2402 2403 static int lan743x_netdev_close(struct net_device *netdev) 2404 { 2405 struct lan743x_adapter *adapter = netdev_priv(netdev); 2406 int index; 2407 2408 lan743x_tx_close(&adapter->tx[0]); 2409 2410 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) 2411 lan743x_rx_close(&adapter->rx[index]); 2412 2413 lan743x_phy_close(adapter); 2414 2415 lan743x_mac_close(adapter); 2416 2417 lan743x_intr_close(adapter); 2418 2419 return 0; 2420 } 2421 2422 static int lan743x_netdev_open(struct net_device *netdev) 2423 { 2424 struct lan743x_adapter *adapter = netdev_priv(netdev); 2425 int index; 2426 int ret; 2427 2428 ret = lan743x_intr_open(adapter); 2429 if (ret) 2430 goto return_error; 2431 2432 ret = lan743x_mac_open(adapter); 2433 if (ret) 2434 goto close_intr; 2435 2436 ret = lan743x_phy_open(adapter); 2437 if (ret) 2438 goto close_mac; 2439 2440 lan743x_rfe_open(adapter); 2441 2442 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 2443 ret = lan743x_rx_open(&adapter->rx[index]); 2444 if (ret) 2445 goto close_rx; 2446 } 2447 2448 ret = lan743x_tx_open(&adapter->tx[0]); 2449 if (ret) 2450 goto close_rx; 2451 2452 return 0; 2453 2454 close_rx: 2455 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 2456 if (adapter->rx[index].ring_cpu_ptr) 2457 lan743x_rx_close(&adapter->rx[index]); 2458 } 2459 lan743x_phy_close(adapter); 2460 2461 close_mac: 2462 lan743x_mac_close(adapter); 2463 2464 close_intr: 2465 lan743x_intr_close(adapter); 2466 2467 return_error: 2468 netif_warn(adapter, ifup, adapter->netdev, 2469 "Error opening LAN743x\n"); 2470 return ret; 2471 } 2472 2473 static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, 2474 struct net_device *netdev) 2475 { 2476 struct lan743x_adapter *adapter = netdev_priv(netdev); 2477 2478 return lan743x_tx_xmit_frame(&adapter->tx[0], skb); 2479 } 2480 2481 static int lan743x_netdev_ioctl(struct net_device *netdev, 2482 struct ifreq *ifr, int cmd) 2483 { 2484 if (!netif_running(netdev)) 2485 return -EINVAL; 2486 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 2487 } 2488 2489 static void lan743x_netdev_set_multicast(struct net_device *netdev) 2490 { 2491 struct lan743x_adapter *adapter = netdev_priv(netdev); 2492 2493 lan743x_rfe_set_multicast(adapter); 2494 } 2495 2496 static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) 2497 { 2498 struct lan743x_adapter *adapter = netdev_priv(netdev); 2499 int ret = 0; 2500 2501 ret = lan743x_mac_set_mtu(adapter, new_mtu); 2502 if (!ret) 2503 netdev->mtu = new_mtu; 2504 return ret; 2505 } 2506 2507 static void lan743x_netdev_get_stats64(struct net_device *netdev, 2508 struct rtnl_link_stats64 *stats) 2509 { 2510 struct lan743x_adapter *adapter = netdev_priv(netdev); 2511 2512 stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); 2513 stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); 2514 stats->rx_bytes = lan743x_csr_read(adapter, 2515 STAT_RX_UNICAST_BYTE_COUNT) + 2516 lan743x_csr_read(adapter, 2517 STAT_RX_BROADCAST_BYTE_COUNT) + 2518 lan743x_csr_read(adapter, 2519 STAT_RX_MULTICAST_BYTE_COUNT); 2520 stats->tx_bytes = lan743x_csr_read(adapter, 2521 STAT_TX_UNICAST_BYTE_COUNT) + 2522 lan743x_csr_read(adapter, 2523 STAT_TX_BROADCAST_BYTE_COUNT) + 2524 lan743x_csr_read(adapter, 2525 STAT_TX_MULTICAST_BYTE_COUNT); 2526 stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + 2527 lan743x_csr_read(adapter, 2528 STAT_RX_ALIGNMENT_ERRORS) + 2529 lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + 2530 lan743x_csr_read(adapter, 2531 STAT_RX_UNDERSIZE_FRAME_ERRORS) + 2532 lan743x_csr_read(adapter, 2533 STAT_RX_OVERSIZE_FRAME_ERRORS); 2534 stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + 2535 lan743x_csr_read(adapter, 2536 STAT_TX_EXCESS_DEFERRAL_ERRORS) + 2537 lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); 2538 stats->rx_dropped = lan743x_csr_read(adapter, 2539 STAT_RX_DROPPED_FRAMES); 2540 stats->tx_dropped = lan743x_csr_read(adapter, 2541 STAT_TX_EXCESSIVE_COLLISION); 2542 stats->multicast = lan743x_csr_read(adapter, 2543 STAT_RX_MULTICAST_FRAMES) + 2544 lan743x_csr_read(adapter, 2545 STAT_TX_MULTICAST_FRAMES); 2546 stats->collisions = lan743x_csr_read(adapter, 2547 STAT_TX_SINGLE_COLLISIONS) + 2548 lan743x_csr_read(adapter, 2549 STAT_TX_MULTIPLE_COLLISIONS) + 2550 lan743x_csr_read(adapter, 2551 STAT_TX_LATE_COLLISIONS); 2552 } 2553 2554 static int lan743x_netdev_set_mac_address(struct net_device *netdev, 2555 void *addr) 2556 { 2557 struct lan743x_adapter *adapter = netdev_priv(netdev); 2558 struct sockaddr *sock_addr = addr; 2559 int ret; 2560 2561 ret = eth_prepare_mac_addr_change(netdev, sock_addr); 2562 if (ret) 2563 return ret; 2564 ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); 2565 lan743x_mac_set_address(adapter, sock_addr->sa_data); 2566 lan743x_rfe_update_mac_address(adapter); 2567 return 0; 2568 } 2569 2570 static const struct net_device_ops lan743x_netdev_ops = { 2571 .ndo_open = lan743x_netdev_open, 2572 .ndo_stop = lan743x_netdev_close, 2573 .ndo_start_xmit = lan743x_netdev_xmit_frame, 2574 .ndo_do_ioctl = lan743x_netdev_ioctl, 2575 .ndo_set_rx_mode = lan743x_netdev_set_multicast, 2576 .ndo_change_mtu = lan743x_netdev_change_mtu, 2577 .ndo_get_stats64 = lan743x_netdev_get_stats64, 2578 .ndo_set_mac_address = lan743x_netdev_set_mac_address, 2579 }; 2580 2581 static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) 2582 { 2583 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 2584 } 2585 2586 static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) 2587 { 2588 mdiobus_unregister(adapter->mdiobus); 2589 } 2590 2591 static void lan743x_full_cleanup(struct lan743x_adapter *adapter) 2592 { 2593 unregister_netdev(adapter->netdev); 2594 2595 lan743x_mdiobus_cleanup(adapter); 2596 lan743x_hardware_cleanup(adapter); 2597 lan743x_pci_cleanup(adapter); 2598 } 2599 2600 static int lan743x_hardware_init(struct lan743x_adapter *adapter, 2601 struct pci_dev *pdev) 2602 { 2603 struct lan743x_tx *tx; 2604 int index; 2605 int ret; 2606 2607 adapter->intr.irq = adapter->pdev->irq; 2608 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); 2609 mutex_init(&adapter->dp_lock); 2610 ret = lan743x_mac_init(adapter); 2611 if (ret) 2612 return ret; 2613 2614 ret = lan743x_phy_init(adapter); 2615 if (ret) 2616 return ret; 2617 2618 lan743x_rfe_update_mac_address(adapter); 2619 2620 ret = lan743x_dmac_init(adapter); 2621 if (ret) 2622 return ret; 2623 2624 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { 2625 adapter->rx[index].adapter = adapter; 2626 adapter->rx[index].channel_number = index; 2627 } 2628 2629 tx = &adapter->tx[0]; 2630 tx->adapter = adapter; 2631 tx->channel_number = 0; 2632 spin_lock_init(&tx->ring_lock); 2633 return 0; 2634 } 2635 2636 static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) 2637 { 2638 int ret; 2639 2640 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 2641 if (!(adapter->mdiobus)) { 2642 ret = -ENOMEM; 2643 goto return_error; 2644 } 2645 2646 adapter->mdiobus->priv = (void *)adapter; 2647 adapter->mdiobus->read = lan743x_mdiobus_read; 2648 adapter->mdiobus->write = lan743x_mdiobus_write; 2649 adapter->mdiobus->name = "lan743x-mdiobus"; 2650 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, 2651 "pci-%s", pci_name(adapter->pdev)); 2652 2653 /* set to internal PHY id */ 2654 adapter->mdiobus->phy_mask = ~(u32)BIT(1); 2655 2656 /* register mdiobus */ 2657 ret = mdiobus_register(adapter->mdiobus); 2658 if (ret < 0) 2659 goto return_error; 2660 return 0; 2661 2662 return_error: 2663 return ret; 2664 } 2665 2666 /* lan743x_pcidev_probe - Device Initialization Routine 2667 * @pdev: PCI device information struct 2668 * @id: entry in lan743x_pci_tbl 2669 * 2670 * Returns 0 on success, negative on failure 2671 * 2672 * initializes an adapter identified by a pci_dev structure. 2673 * The OS initialization, configuring of the adapter private structure, 2674 * and a hardware reset occur. 2675 **/ 2676 static int lan743x_pcidev_probe(struct pci_dev *pdev, 2677 const struct pci_device_id *id) 2678 { 2679 struct lan743x_adapter *adapter = NULL; 2680 struct net_device *netdev = NULL; 2681 int ret = -ENODEV; 2682 2683 netdev = devm_alloc_etherdev(&pdev->dev, 2684 sizeof(struct lan743x_adapter)); 2685 if (!netdev) 2686 goto return_error; 2687 2688 SET_NETDEV_DEV(netdev, &pdev->dev); 2689 pci_set_drvdata(pdev, netdev); 2690 adapter = netdev_priv(netdev); 2691 adapter->netdev = netdev; 2692 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 2693 NETIF_MSG_LINK | NETIF_MSG_IFUP | 2694 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 2695 netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; 2696 2697 ret = lan743x_pci_init(adapter, pdev); 2698 if (ret) 2699 goto return_error; 2700 2701 ret = lan743x_csr_init(adapter); 2702 if (ret) 2703 goto cleanup_pci; 2704 2705 ret = lan743x_hardware_init(adapter, pdev); 2706 if (ret) 2707 goto cleanup_pci; 2708 2709 ret = lan743x_mdiobus_init(adapter); 2710 if (ret) 2711 goto cleanup_hardware; 2712 2713 adapter->netdev->netdev_ops = &lan743x_netdev_ops; 2714 adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; 2715 adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2716 adapter->netdev->hw_features = adapter->netdev->features; 2717 2718 /* carrier off reporting is important to ethtool even BEFORE open */ 2719 netif_carrier_off(netdev); 2720 2721 ret = register_netdev(adapter->netdev); 2722 if (ret < 0) 2723 goto cleanup_mdiobus; 2724 return 0; 2725 2726 cleanup_mdiobus: 2727 lan743x_mdiobus_cleanup(adapter); 2728 2729 cleanup_hardware: 2730 lan743x_hardware_cleanup(adapter); 2731 2732 cleanup_pci: 2733 lan743x_pci_cleanup(adapter); 2734 2735 return_error: 2736 pr_warn("Initialization failed\n"); 2737 return ret; 2738 } 2739 2740 /** 2741 * lan743x_pcidev_remove - Device Removal Routine 2742 * @pdev: PCI device information struct 2743 * 2744 * this is called by the PCI subsystem to alert the driver 2745 * that it should release a PCI device. This could be caused by a 2746 * Hot-Plug event, or because the driver is going to be removed from 2747 * memory. 2748 **/ 2749 static void lan743x_pcidev_remove(struct pci_dev *pdev) 2750 { 2751 struct net_device *netdev = pci_get_drvdata(pdev); 2752 struct lan743x_adapter *adapter = netdev_priv(netdev); 2753 2754 lan743x_full_cleanup(adapter); 2755 } 2756 2757 static void lan743x_pcidev_shutdown(struct pci_dev *pdev) 2758 { 2759 struct net_device *netdev = pci_get_drvdata(pdev); 2760 struct lan743x_adapter *adapter = netdev_priv(netdev); 2761 2762 rtnl_lock(); 2763 netif_device_detach(netdev); 2764 2765 /* close netdev when netdev is at running state. 2766 * For instance, it is true when system goes to sleep by pm-suspend 2767 * However, it is false when system goes to sleep by suspend GUI menu 2768 */ 2769 if (netif_running(netdev)) 2770 lan743x_netdev_close(netdev); 2771 rtnl_unlock(); 2772 2773 #ifdef CONFIG_PM 2774 pci_save_state(pdev); 2775 #endif 2776 2777 /* clean up lan743x portion */ 2778 lan743x_hardware_cleanup(adapter); 2779 } 2780 2781 #ifdef CONFIG_PM 2782 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 2783 { 2784 return bitrev16(crc16(0xFFFF, buf, len)); 2785 } 2786 2787 static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) 2788 { 2789 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; 2790 const u8 ipv6_multicast[3] = { 0x33, 0x33 }; 2791 const u8 arp_type[2] = { 0x08, 0x06 }; 2792 int mask_index; 2793 u32 pmtctl; 2794 u32 wucsr; 2795 u32 macrx; 2796 u16 crc; 2797 2798 for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) 2799 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); 2800 2801 /* clear wake settings */ 2802 pmtctl = lan743x_csr_read(adapter, PMT_CTL); 2803 pmtctl |= PMT_CTL_WUPS_MASK_; 2804 pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | 2805 PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | 2806 PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); 2807 2808 macrx = lan743x_csr_read(adapter, MAC_RX); 2809 2810 wucsr = 0; 2811 mask_index = 0; 2812 2813 pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; 2814 2815 if (adapter->wolopts & WAKE_PHY) { 2816 pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; 2817 pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; 2818 } 2819 if (adapter->wolopts & WAKE_MAGIC) { 2820 wucsr |= MAC_WUCSR_MPEN_; 2821 macrx |= MAC_RX_RXEN_; 2822 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 2823 } 2824 if (adapter->wolopts & WAKE_UCAST) { 2825 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; 2826 macrx |= MAC_RX_RXEN_; 2827 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 2828 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 2829 } 2830 if (adapter->wolopts & WAKE_BCAST) { 2831 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; 2832 macrx |= MAC_RX_RXEN_; 2833 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 2834 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 2835 } 2836 if (adapter->wolopts & WAKE_MCAST) { 2837 /* IPv4 multicast */ 2838 crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); 2839 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 2840 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 2841 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 2842 (crc & MAC_WUF_CFG_CRC16_MASK_)); 2843 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); 2844 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 2845 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 2846 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 2847 mask_index++; 2848 2849 /* IPv6 multicast */ 2850 crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); 2851 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 2852 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | 2853 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 2854 (crc & MAC_WUF_CFG_CRC16_MASK_)); 2855 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); 2856 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 2857 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 2858 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 2859 mask_index++; 2860 2861 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 2862 macrx |= MAC_RX_RXEN_; 2863 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 2864 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 2865 } 2866 if (adapter->wolopts & WAKE_ARP) { 2867 /* set MAC_WUF_CFG & WUF_MASK 2868 * for packettype (offset 12,13) = ARP (0x0806) 2869 */ 2870 crc = lan743x_pm_wakeframe_crc16(arp_type, 2); 2871 lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 2872 MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | 2873 (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | 2874 (crc & MAC_WUF_CFG_CRC16_MASK_)); 2875 lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); 2876 lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); 2877 lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); 2878 lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); 2879 mask_index++; 2880 2881 wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; 2882 macrx |= MAC_RX_RXEN_; 2883 pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; 2884 pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; 2885 } 2886 2887 lan743x_csr_write(adapter, MAC_WUCSR, wucsr); 2888 lan743x_csr_write(adapter, PMT_CTL, pmtctl); 2889 lan743x_csr_write(adapter, MAC_RX, macrx); 2890 } 2891 2892 static int lan743x_pm_suspend(struct device *dev) 2893 { 2894 struct pci_dev *pdev = to_pci_dev(dev); 2895 struct net_device *netdev = pci_get_drvdata(pdev); 2896 struct lan743x_adapter *adapter = netdev_priv(netdev); 2897 int ret; 2898 2899 lan743x_pcidev_shutdown(pdev); 2900 2901 /* clear all wakes */ 2902 lan743x_csr_write(adapter, MAC_WUCSR, 0); 2903 lan743x_csr_write(adapter, MAC_WUCSR2, 0); 2904 lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); 2905 2906 if (adapter->wolopts) 2907 lan743x_pm_set_wol(adapter); 2908 2909 /* Host sets PME_En, put D3hot */ 2910 ret = pci_prepare_to_sleep(pdev); 2911 2912 return 0; 2913 } 2914 2915 static int lan743x_pm_resume(struct device *dev) 2916 { 2917 struct pci_dev *pdev = to_pci_dev(dev); 2918 struct net_device *netdev = pci_get_drvdata(pdev); 2919 struct lan743x_adapter *adapter = netdev_priv(netdev); 2920 int ret; 2921 2922 pci_set_power_state(pdev, PCI_D0); 2923 pci_restore_state(pdev); 2924 pci_save_state(pdev); 2925 2926 ret = lan743x_hardware_init(adapter, pdev); 2927 if (ret) { 2928 netif_err(adapter, probe, adapter->netdev, 2929 "lan743x_hardware_init returned %d\n", ret); 2930 } 2931 2932 /* open netdev when netdev is at running state while resume. 2933 * For instance, it is true when system wakesup after pm-suspend 2934 * However, it is false when system wakes up after suspend GUI menu 2935 */ 2936 if (netif_running(netdev)) 2937 lan743x_netdev_open(netdev); 2938 2939 netif_device_attach(netdev); 2940 2941 return 0; 2942 } 2943 2944 const struct dev_pm_ops lan743x_pm_ops = { 2945 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 2946 }; 2947 #endif /*CONFIG_PM */ 2948 2949 static const struct pci_device_id lan743x_pcidev_tbl[] = { 2950 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 2951 { 0, } 2952 }; 2953 2954 static struct pci_driver lan743x_pcidev_driver = { 2955 .name = DRIVER_NAME, 2956 .id_table = lan743x_pcidev_tbl, 2957 .probe = lan743x_pcidev_probe, 2958 .remove = lan743x_pcidev_remove, 2959 #ifdef CONFIG_PM 2960 .driver.pm = &lan743x_pm_ops, 2961 #endif 2962 .shutdown = lan743x_pcidev_shutdown, 2963 }; 2964 2965 module_pci_driver(lan743x_pcidev_driver); 2966 2967 MODULE_AUTHOR(DRIVER_AUTHOR); 2968 MODULE_DESCRIPTION(DRIVER_DESC); 2969 MODULE_LICENSE("GPL"); 2970