1 /* 2 * FUJITSU Extended Socket Network Device driver 3 * Copyright (c) 2015 FUJITSU LIMITED 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * The full GNU General Public License is included in this distribution in 18 * the file called "COPYING". 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/nls.h> 25 #include <linux/platform_device.h> 26 #include <linux/netdevice.h> 27 #include <linux/interrupt.h> 28 29 #include "fjes.h" 30 #include "fjes_trace.h" 31 32 #define MAJ 1 33 #define MIN 2 34 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) 35 #define DRV_NAME "fjes" 36 char fjes_driver_name[] = DRV_NAME; 37 char fjes_driver_version[] = DRV_VERSION; 38 static const char fjes_driver_string[] = 39 "FUJITSU Extended Socket Network Device Driver"; 40 static const char fjes_copyright[] = 41 "Copyright (c) 2015 FUJITSU LIMITED"; 42 43 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>"); 44 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver"); 45 MODULE_LICENSE("GPL"); 46 MODULE_VERSION(DRV_VERSION); 47 48 #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02" 49 50 static int fjes_request_irq(struct fjes_adapter *); 51 static void fjes_free_irq(struct fjes_adapter *); 52 53 static int fjes_open(struct net_device *); 54 static int fjes_close(struct net_device *); 55 static int fjes_setup_resources(struct fjes_adapter *); 56 static void fjes_free_resources(struct fjes_adapter *); 57 static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *); 58 static void fjes_raise_intr_rxdata_task(struct work_struct *); 59 static void fjes_tx_stall_task(struct work_struct *); 60 static void fjes_force_close_task(struct work_struct *); 61 static irqreturn_t fjes_intr(int, void*); 62 static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); 63 static int fjes_change_mtu(struct net_device *, int); 64 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16); 65 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16); 66 static void fjes_tx_retry(struct net_device *); 67 68 static int fjes_acpi_add(struct acpi_device *); 69 static int fjes_acpi_remove(struct acpi_device *); 70 static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*); 71 72 static int fjes_probe(struct platform_device *); 73 static int fjes_remove(struct platform_device *); 74 75 static int fjes_sw_init(struct fjes_adapter *); 76 static void fjes_netdev_setup(struct net_device *); 77 static void fjes_irq_watch_task(struct work_struct *); 78 static void fjes_watch_unshare_task(struct work_struct *); 79 static void fjes_rx_irq(struct fjes_adapter *, int); 80 static int fjes_poll(struct napi_struct *, int); 81 82 static const struct acpi_device_id fjes_acpi_ids[] = { 83 {ACPI_MOTHERBOARD_RESOURCE_HID, 0}, 84 {"", 0}, 85 }; 86 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); 87 88 static struct acpi_driver fjes_acpi_driver = { 89 .name = DRV_NAME, 90 .class = DRV_NAME, 91 .owner = THIS_MODULE, 92 .ids = fjes_acpi_ids, 93 .ops = { 94 .add = fjes_acpi_add, 95 .remove = fjes_acpi_remove, 96 }, 97 }; 98 99 static struct platform_driver fjes_driver = { 100 .driver = { 101 .name = DRV_NAME, 102 }, 103 .probe = fjes_probe, 104 .remove = fjes_remove, 105 }; 106 107 static struct resource fjes_resource[] = { 108 { 109 .flags = IORESOURCE_MEM, 110 .start = 0, 111 .end = 0, 112 }, 113 { 114 .flags = IORESOURCE_IRQ, 115 .start = 0, 116 .end = 0, 117 }, 118 }; 119 120 static bool is_extended_socket_device(struct acpi_device *device) 121 { 122 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 123 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; 124 union acpi_object *str; 125 acpi_status status; 126 int result; 127 128 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); 129 if (ACPI_FAILURE(status)) 130 return false; 131 132 str = buffer.pointer; 133 result = utf16s_to_utf8s((wchar_t *)str->string.pointer, 134 str->string.length, UTF16_LITTLE_ENDIAN, 135 str_buf, sizeof(str_buf) - 1); 136 str_buf[result] = 0; 137 138 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { 139 kfree(buffer.pointer); 140 return false; 141 } 142 kfree(buffer.pointer); 143 144 return true; 145 } 146 147 static int acpi_check_extended_socket_status(struct acpi_device *device) 148 { 149 unsigned long long sta; 150 acpi_status status; 151 152 status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta); 153 if (ACPI_FAILURE(status)) 154 return -ENODEV; 155 156 if (!((sta & ACPI_STA_DEVICE_PRESENT) && 157 (sta & ACPI_STA_DEVICE_ENABLED) && 158 (sta & ACPI_STA_DEVICE_UI) && 159 (sta & ACPI_STA_DEVICE_FUNCTIONING))) 160 return -ENODEV; 161 162 return 0; 163 } 164 165 static int fjes_acpi_add(struct acpi_device *device) 166 { 167 struct platform_device *plat_dev; 168 acpi_status status; 169 170 if (!is_extended_socket_device(device)) 171 return -ENODEV; 172 173 if (acpi_check_extended_socket_status(device)) 174 return -ENODEV; 175 176 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 177 fjes_get_acpi_resource, fjes_resource); 178 if (ACPI_FAILURE(status)) 179 return -ENODEV; 180 181 /* create platform_device */ 182 plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, 183 ARRAY_SIZE(fjes_resource)); 184 device->driver_data = plat_dev; 185 186 return 0; 187 } 188 189 static int fjes_acpi_remove(struct acpi_device *device) 190 { 191 struct platform_device *plat_dev; 192 193 plat_dev = (struct platform_device *)acpi_driver_data(device); 194 platform_device_unregister(plat_dev); 195 196 return 0; 197 } 198 199 static acpi_status 200 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data) 201 { 202 struct acpi_resource_address32 *addr; 203 struct acpi_resource_irq *irq; 204 struct resource *res = data; 205 206 switch (acpi_res->type) { 207 case ACPI_RESOURCE_TYPE_ADDRESS32: 208 addr = &acpi_res->data.address32; 209 res[0].start = addr->address.minimum; 210 res[0].end = addr->address.minimum + 211 addr->address.address_length - 1; 212 break; 213 214 case ACPI_RESOURCE_TYPE_IRQ: 215 irq = &acpi_res->data.irq; 216 if (irq->interrupt_count != 1) 217 return AE_ERROR; 218 res[1].start = irq->interrupts[0]; 219 res[1].end = irq->interrupts[0]; 220 break; 221 222 default: 223 break; 224 } 225 226 return AE_OK; 227 } 228 229 static int fjes_request_irq(struct fjes_adapter *adapter) 230 { 231 struct net_device *netdev = adapter->netdev; 232 int result = -1; 233 234 adapter->interrupt_watch_enable = true; 235 if (!delayed_work_pending(&adapter->interrupt_watch_task)) { 236 queue_delayed_work(adapter->control_wq, 237 &adapter->interrupt_watch_task, 238 FJES_IRQ_WATCH_DELAY); 239 } 240 241 if (!adapter->irq_registered) { 242 result = request_irq(adapter->hw.hw_res.irq, fjes_intr, 243 IRQF_SHARED, netdev->name, adapter); 244 if (result) 245 adapter->irq_registered = false; 246 else 247 adapter->irq_registered = true; 248 } 249 250 return result; 251 } 252 253 static void fjes_free_irq(struct fjes_adapter *adapter) 254 { 255 struct fjes_hw *hw = &adapter->hw; 256 257 adapter->interrupt_watch_enable = false; 258 cancel_delayed_work_sync(&adapter->interrupt_watch_task); 259 260 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); 261 262 if (adapter->irq_registered) { 263 free_irq(adapter->hw.hw_res.irq, adapter); 264 adapter->irq_registered = false; 265 } 266 } 267 268 static const struct net_device_ops fjes_netdev_ops = { 269 .ndo_open = fjes_open, 270 .ndo_stop = fjes_close, 271 .ndo_start_xmit = fjes_xmit_frame, 272 .ndo_get_stats64 = fjes_get_stats64, 273 .ndo_change_mtu = fjes_change_mtu, 274 .ndo_tx_timeout = fjes_tx_retry, 275 .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid, 276 .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid, 277 }; 278 279 /* fjes_open - Called when a network interface is made active */ 280 static int fjes_open(struct net_device *netdev) 281 { 282 struct fjes_adapter *adapter = netdev_priv(netdev); 283 struct fjes_hw *hw = &adapter->hw; 284 int result; 285 286 if (adapter->open_guard) 287 return -ENXIO; 288 289 result = fjes_setup_resources(adapter); 290 if (result) 291 goto err_setup_res; 292 293 hw->txrx_stop_req_bit = 0; 294 hw->epstop_req_bit = 0; 295 296 napi_enable(&adapter->napi); 297 298 fjes_hw_capture_interrupt_status(hw); 299 300 result = fjes_request_irq(adapter); 301 if (result) 302 goto err_req_irq; 303 304 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false); 305 306 netif_tx_start_all_queues(netdev); 307 netif_carrier_on(netdev); 308 309 return 0; 310 311 err_req_irq: 312 fjes_free_irq(adapter); 313 napi_disable(&adapter->napi); 314 315 err_setup_res: 316 fjes_free_resources(adapter); 317 return result; 318 } 319 320 /* fjes_close - Disables a network interface */ 321 static int fjes_close(struct net_device *netdev) 322 { 323 struct fjes_adapter *adapter = netdev_priv(netdev); 324 struct fjes_hw *hw = &adapter->hw; 325 unsigned long flags; 326 int epidx; 327 328 netif_tx_stop_all_queues(netdev); 329 netif_carrier_off(netdev); 330 331 fjes_hw_raise_epstop(hw); 332 333 napi_disable(&adapter->napi); 334 335 spin_lock_irqsave(&hw->rx_status_lock, flags); 336 for (epidx = 0; epidx < hw->max_epid; epidx++) { 337 if (epidx == hw->my_epid) 338 continue; 339 340 if (fjes_hw_get_partner_ep_status(hw, epidx) == 341 EP_PARTNER_SHARED) 342 adapter->hw.ep_shm_info[epidx] 343 .tx.info->v1i.rx_status &= 344 ~FJES_RX_POLL_WORK; 345 } 346 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 347 348 fjes_free_irq(adapter); 349 350 cancel_delayed_work_sync(&adapter->interrupt_watch_task); 351 cancel_work_sync(&adapter->unshare_watch_task); 352 adapter->unshare_watch_bitmask = 0; 353 cancel_work_sync(&adapter->raise_intr_rxdata_task); 354 cancel_work_sync(&adapter->tx_stall_task); 355 356 cancel_work_sync(&hw->update_zone_task); 357 cancel_work_sync(&hw->epstop_task); 358 359 fjes_hw_wait_epstop(hw); 360 361 fjes_free_resources(adapter); 362 363 return 0; 364 } 365 366 static int fjes_setup_resources(struct fjes_adapter *adapter) 367 { 368 struct net_device *netdev = adapter->netdev; 369 struct ep_share_mem_info *buf_pair; 370 struct fjes_hw *hw = &adapter->hw; 371 unsigned long flags; 372 int result; 373 int epidx; 374 375 mutex_lock(&hw->hw_info.lock); 376 result = fjes_hw_request_info(hw); 377 switch (result) { 378 case 0: 379 for (epidx = 0; epidx < hw->max_epid; epidx++) { 380 hw->ep_shm_info[epidx].es_status = 381 hw->hw_info.res_buf->info.info[epidx].es_status; 382 hw->ep_shm_info[epidx].zone = 383 hw->hw_info.res_buf->info.info[epidx].zone; 384 } 385 break; 386 default: 387 case -ENOMSG: 388 case -EBUSY: 389 adapter->force_reset = true; 390 391 mutex_unlock(&hw->hw_info.lock); 392 return result; 393 } 394 mutex_unlock(&hw->hw_info.lock); 395 396 for (epidx = 0; epidx < (hw->max_epid); epidx++) { 397 if ((epidx != hw->my_epid) && 398 (hw->ep_shm_info[epidx].es_status == 399 FJES_ZONING_STATUS_ENABLE)) { 400 fjes_hw_raise_interrupt(hw, epidx, 401 REG_ICTL_MASK_INFO_UPDATE); 402 hw->ep_shm_info[epidx].ep_stats 403 .send_intr_zoneupdate += 1; 404 } 405 } 406 407 msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid); 408 409 for (epidx = 0; epidx < (hw->max_epid); epidx++) { 410 if (epidx == hw->my_epid) 411 continue; 412 413 buf_pair = &hw->ep_shm_info[epidx]; 414 415 spin_lock_irqsave(&hw->rx_status_lock, flags); 416 fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr, 417 netdev->mtu); 418 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 419 420 if (fjes_hw_epid_is_same_zone(hw, epidx)) { 421 mutex_lock(&hw->hw_info.lock); 422 result = 423 fjes_hw_register_buff_addr(hw, epidx, buf_pair); 424 mutex_unlock(&hw->hw_info.lock); 425 426 switch (result) { 427 case 0: 428 break; 429 case -ENOMSG: 430 case -EBUSY: 431 default: 432 adapter->force_reset = true; 433 return result; 434 } 435 436 hw->ep_shm_info[epidx].ep_stats 437 .com_regist_buf_exec += 1; 438 } 439 } 440 441 return 0; 442 } 443 444 static void fjes_free_resources(struct fjes_adapter *adapter) 445 { 446 struct net_device *netdev = adapter->netdev; 447 struct fjes_device_command_param param; 448 struct ep_share_mem_info *buf_pair; 449 struct fjes_hw *hw = &adapter->hw; 450 bool reset_flag = false; 451 unsigned long flags; 452 int result; 453 int epidx; 454 455 for (epidx = 0; epidx < hw->max_epid; epidx++) { 456 if (epidx == hw->my_epid) 457 continue; 458 459 mutex_lock(&hw->hw_info.lock); 460 result = fjes_hw_unregister_buff_addr(hw, epidx); 461 mutex_unlock(&hw->hw_info.lock); 462 463 hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1; 464 465 if (result) 466 reset_flag = true; 467 468 buf_pair = &hw->ep_shm_info[epidx]; 469 470 spin_lock_irqsave(&hw->rx_status_lock, flags); 471 fjes_hw_setup_epbuf(&buf_pair->tx, 472 netdev->dev_addr, netdev->mtu); 473 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 474 475 clear_bit(epidx, &hw->txrx_stop_req_bit); 476 } 477 478 if (reset_flag || adapter->force_reset) { 479 result = fjes_hw_reset(hw); 480 481 adapter->force_reset = false; 482 483 if (result) 484 adapter->open_guard = true; 485 486 hw->hw_info.buffer_share_bit = 0; 487 488 memset((void *)¶m, 0, sizeof(param)); 489 490 param.req_len = hw->hw_info.req_buf_size; 491 param.req_start = __pa(hw->hw_info.req_buf); 492 param.res_len = hw->hw_info.res_buf_size; 493 param.res_start = __pa(hw->hw_info.res_buf); 494 param.share_start = __pa(hw->hw_info.share->ep_status); 495 496 fjes_hw_init_command_registers(hw, ¶m); 497 } 498 } 499 500 static void fjes_tx_stall_task(struct work_struct *work) 501 { 502 struct fjes_adapter *adapter = container_of(work, 503 struct fjes_adapter, tx_stall_task); 504 struct net_device *netdev = adapter->netdev; 505 struct fjes_hw *hw = &adapter->hw; 506 int all_queue_available, sendable; 507 enum ep_partner_status pstatus; 508 int max_epid, my_epid, epid; 509 union ep_buffer_info *info; 510 int i; 511 512 if (((long)jiffies - 513 dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) { 514 netif_wake_queue(netdev); 515 return; 516 } 517 518 my_epid = hw->my_epid; 519 max_epid = hw->max_epid; 520 521 for (i = 0; i < 5; i++) { 522 all_queue_available = 1; 523 524 for (epid = 0; epid < max_epid; epid++) { 525 if (my_epid == epid) 526 continue; 527 528 pstatus = fjes_hw_get_partner_ep_status(hw, epid); 529 sendable = (pstatus == EP_PARTNER_SHARED); 530 if (!sendable) 531 continue; 532 533 info = adapter->hw.ep_shm_info[epid].tx.info; 534 535 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE)) 536 return; 537 538 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, 539 info->v1i.count_max)) { 540 all_queue_available = 0; 541 break; 542 } 543 } 544 545 if (all_queue_available) { 546 netif_wake_queue(netdev); 547 return; 548 } 549 } 550 551 usleep_range(50, 100); 552 553 queue_work(adapter->txrx_wq, &adapter->tx_stall_task); 554 } 555 556 static void fjes_force_close_task(struct work_struct *work) 557 { 558 struct fjes_adapter *adapter = container_of(work, 559 struct fjes_adapter, force_close_task); 560 struct net_device *netdev = adapter->netdev; 561 562 rtnl_lock(); 563 dev_close(netdev); 564 rtnl_unlock(); 565 } 566 567 static void fjes_raise_intr_rxdata_task(struct work_struct *work) 568 { 569 struct fjes_adapter *adapter = container_of(work, 570 struct fjes_adapter, raise_intr_rxdata_task); 571 struct fjes_hw *hw = &adapter->hw; 572 enum ep_partner_status pstatus; 573 int max_epid, my_epid, epid; 574 575 my_epid = hw->my_epid; 576 max_epid = hw->max_epid; 577 578 for (epid = 0; epid < max_epid; epid++) 579 hw->ep_shm_info[epid].tx_status_work = 0; 580 581 for (epid = 0; epid < max_epid; epid++) { 582 if (epid == my_epid) 583 continue; 584 585 pstatus = fjes_hw_get_partner_ep_status(hw, epid); 586 if (pstatus == EP_PARTNER_SHARED) { 587 hw->ep_shm_info[epid].tx_status_work = 588 hw->ep_shm_info[epid].tx.info->v1i.tx_status; 589 590 if (hw->ep_shm_info[epid].tx_status_work == 591 FJES_TX_DELAY_SEND_PENDING) { 592 hw->ep_shm_info[epid].tx.info->v1i.tx_status = 593 FJES_TX_DELAY_SEND_NONE; 594 } 595 } 596 } 597 598 for (epid = 0; epid < max_epid; epid++) { 599 if (epid == my_epid) 600 continue; 601 602 pstatus = fjes_hw_get_partner_ep_status(hw, epid); 603 if ((hw->ep_shm_info[epid].tx_status_work == 604 FJES_TX_DELAY_SEND_PENDING) && 605 (pstatus == EP_PARTNER_SHARED) && 606 !(hw->ep_shm_info[epid].rx.info->v1i.rx_status & 607 FJES_RX_POLL_WORK)) { 608 fjes_hw_raise_interrupt(hw, epid, 609 REG_ICTL_MASK_RX_DATA); 610 hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1; 611 } 612 } 613 614 usleep_range(500, 1000); 615 } 616 617 static int fjes_tx_send(struct fjes_adapter *adapter, int dest, 618 void *data, size_t len) 619 { 620 int retval; 621 622 retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx, 623 data, len); 624 if (retval) 625 return retval; 626 627 adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status = 628 FJES_TX_DELAY_SEND_PENDING; 629 if (!work_pending(&adapter->raise_intr_rxdata_task)) 630 queue_work(adapter->txrx_wq, 631 &adapter->raise_intr_rxdata_task); 632 633 retval = 0; 634 return retval; 635 } 636 637 static netdev_tx_t 638 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 639 { 640 struct fjes_adapter *adapter = netdev_priv(netdev); 641 struct fjes_hw *hw = &adapter->hw; 642 643 int max_epid, my_epid, dest_epid; 644 enum ep_partner_status pstatus; 645 struct netdev_queue *cur_queue; 646 char shortpkt[VLAN_ETH_HLEN]; 647 bool is_multi, vlan; 648 struct ethhdr *eth; 649 u16 queue_no = 0; 650 u16 vlan_id = 0; 651 netdev_tx_t ret; 652 char *data; 653 int len; 654 655 ret = NETDEV_TX_OK; 656 is_multi = false; 657 cur_queue = netdev_get_tx_queue(netdev, queue_no); 658 659 eth = (struct ethhdr *)skb->data; 660 my_epid = hw->my_epid; 661 662 vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false; 663 664 data = skb->data; 665 len = skb->len; 666 667 if (is_multicast_ether_addr(eth->h_dest)) { 668 dest_epid = 0; 669 max_epid = hw->max_epid; 670 is_multi = true; 671 } else if (is_local_ether_addr(eth->h_dest)) { 672 dest_epid = eth->h_dest[ETH_ALEN - 1]; 673 max_epid = dest_epid + 1; 674 675 if ((eth->h_dest[0] == 0x02) && 676 (0x00 == (eth->h_dest[1] | eth->h_dest[2] | 677 eth->h_dest[3] | eth->h_dest[4])) && 678 (dest_epid < hw->max_epid)) { 679 ; 680 } else { 681 dest_epid = 0; 682 max_epid = 0; 683 ret = NETDEV_TX_OK; 684 685 adapter->stats64.tx_packets += 1; 686 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; 687 adapter->stats64.tx_bytes += len; 688 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; 689 } 690 } else { 691 dest_epid = 0; 692 max_epid = 0; 693 ret = NETDEV_TX_OK; 694 695 adapter->stats64.tx_packets += 1; 696 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; 697 adapter->stats64.tx_bytes += len; 698 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; 699 } 700 701 for (; dest_epid < max_epid; dest_epid++) { 702 if (my_epid == dest_epid) 703 continue; 704 705 pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid); 706 if (pstatus != EP_PARTNER_SHARED) { 707 if (!is_multi) 708 hw->ep_shm_info[dest_epid].ep_stats 709 .tx_dropped_not_shared += 1; 710 ret = NETDEV_TX_OK; 711 } else if (!fjes_hw_check_epbuf_version( 712 &adapter->hw.ep_shm_info[dest_epid].rx, 0)) { 713 /* version is NOT 0 */ 714 adapter->stats64.tx_carrier_errors += 1; 715 hw->ep_shm_info[dest_epid].net_stats 716 .tx_carrier_errors += 1; 717 hw->ep_shm_info[dest_epid].ep_stats 718 .tx_dropped_ver_mismatch += 1; 719 720 ret = NETDEV_TX_OK; 721 } else if (!fjes_hw_check_mtu( 722 &adapter->hw.ep_shm_info[dest_epid].rx, 723 netdev->mtu)) { 724 adapter->stats64.tx_dropped += 1; 725 hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1; 726 adapter->stats64.tx_errors += 1; 727 hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1; 728 hw->ep_shm_info[dest_epid].ep_stats 729 .tx_dropped_buf_size_mismatch += 1; 730 731 ret = NETDEV_TX_OK; 732 } else if (vlan && 733 !fjes_hw_check_vlan_id( 734 &adapter->hw.ep_shm_info[dest_epid].rx, 735 vlan_id)) { 736 hw->ep_shm_info[dest_epid].ep_stats 737 .tx_dropped_vlanid_mismatch += 1; 738 ret = NETDEV_TX_OK; 739 } else { 740 if (len < VLAN_ETH_HLEN) { 741 memset(shortpkt, 0, VLAN_ETH_HLEN); 742 memcpy(shortpkt, skb->data, skb->len); 743 len = VLAN_ETH_HLEN; 744 data = shortpkt; 745 } 746 747 if (adapter->tx_retry_count == 0) { 748 adapter->tx_start_jiffies = jiffies; 749 adapter->tx_retry_count = 1; 750 } else { 751 adapter->tx_retry_count++; 752 } 753 754 if (fjes_tx_send(adapter, dest_epid, data, len)) { 755 if (is_multi) { 756 ret = NETDEV_TX_OK; 757 } else if ( 758 ((long)jiffies - 759 (long)adapter->tx_start_jiffies) >= 760 FJES_TX_RETRY_TIMEOUT) { 761 adapter->stats64.tx_fifo_errors += 1; 762 hw->ep_shm_info[dest_epid].net_stats 763 .tx_fifo_errors += 1; 764 adapter->stats64.tx_errors += 1; 765 hw->ep_shm_info[dest_epid].net_stats 766 .tx_errors += 1; 767 768 ret = NETDEV_TX_OK; 769 } else { 770 netif_trans_update(netdev); 771 hw->ep_shm_info[dest_epid].ep_stats 772 .tx_buffer_full += 1; 773 netif_tx_stop_queue(cur_queue); 774 775 if (!work_pending(&adapter->tx_stall_task)) 776 queue_work(adapter->txrx_wq, 777 &adapter->tx_stall_task); 778 779 ret = NETDEV_TX_BUSY; 780 } 781 } else { 782 if (!is_multi) { 783 adapter->stats64.tx_packets += 1; 784 hw->ep_shm_info[dest_epid].net_stats 785 .tx_packets += 1; 786 adapter->stats64.tx_bytes += len; 787 hw->ep_shm_info[dest_epid].net_stats 788 .tx_bytes += len; 789 } 790 791 adapter->tx_retry_count = 0; 792 ret = NETDEV_TX_OK; 793 } 794 } 795 } 796 797 if (ret == NETDEV_TX_OK) { 798 dev_kfree_skb(skb); 799 if (is_multi) { 800 adapter->stats64.tx_packets += 1; 801 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; 802 adapter->stats64.tx_bytes += 1; 803 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; 804 } 805 } 806 807 return ret; 808 } 809 810 static void fjes_tx_retry(struct net_device *netdev) 811 { 812 struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0); 813 814 netif_tx_wake_queue(queue); 815 } 816 817 static void 818 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 819 { 820 struct fjes_adapter *adapter = netdev_priv(netdev); 821 822 memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64)); 823 } 824 825 static int fjes_change_mtu(struct net_device *netdev, int new_mtu) 826 { 827 struct fjes_adapter *adapter = netdev_priv(netdev); 828 bool running = netif_running(netdev); 829 struct fjes_hw *hw = &adapter->hw; 830 unsigned long flags; 831 int ret = -EINVAL; 832 int idx, epidx; 833 834 for (idx = 0; fjes_support_mtu[idx] != 0; idx++) { 835 if (new_mtu <= fjes_support_mtu[idx]) { 836 new_mtu = fjes_support_mtu[idx]; 837 if (new_mtu == netdev->mtu) 838 return 0; 839 840 ret = 0; 841 break; 842 } 843 } 844 845 if (ret) 846 return ret; 847 848 if (running) { 849 spin_lock_irqsave(&hw->rx_status_lock, flags); 850 for (epidx = 0; epidx < hw->max_epid; epidx++) { 851 if (epidx == hw->my_epid) 852 continue; 853 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= 854 ~FJES_RX_MTU_CHANGING_DONE; 855 } 856 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 857 858 netif_tx_stop_all_queues(netdev); 859 netif_carrier_off(netdev); 860 cancel_work_sync(&adapter->tx_stall_task); 861 napi_disable(&adapter->napi); 862 863 msleep(1000); 864 865 netif_tx_stop_all_queues(netdev); 866 } 867 868 netdev->mtu = new_mtu; 869 870 if (running) { 871 for (epidx = 0; epidx < hw->max_epid; epidx++) { 872 if (epidx == hw->my_epid) 873 continue; 874 875 spin_lock_irqsave(&hw->rx_status_lock, flags); 876 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, 877 netdev->dev_addr, 878 netdev->mtu); 879 880 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= 881 FJES_RX_MTU_CHANGING_DONE; 882 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 883 } 884 885 netif_tx_wake_all_queues(netdev); 886 netif_carrier_on(netdev); 887 napi_enable(&adapter->napi); 888 napi_schedule(&adapter->napi); 889 } 890 891 return ret; 892 } 893 894 static int fjes_vlan_rx_add_vid(struct net_device *netdev, 895 __be16 proto, u16 vid) 896 { 897 struct fjes_adapter *adapter = netdev_priv(netdev); 898 bool ret = true; 899 int epid; 900 901 for (epid = 0; epid < adapter->hw.max_epid; epid++) { 902 if (epid == adapter->hw.my_epid) 903 continue; 904 905 if (!fjes_hw_check_vlan_id( 906 &adapter->hw.ep_shm_info[epid].tx, vid)) 907 ret = fjes_hw_set_vlan_id( 908 &adapter->hw.ep_shm_info[epid].tx, vid); 909 } 910 911 return ret ? 0 : -ENOSPC; 912 } 913 914 static int fjes_vlan_rx_kill_vid(struct net_device *netdev, 915 __be16 proto, u16 vid) 916 { 917 struct fjes_adapter *adapter = netdev_priv(netdev); 918 int epid; 919 920 for (epid = 0; epid < adapter->hw.max_epid; epid++) { 921 if (epid == adapter->hw.my_epid) 922 continue; 923 924 fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid); 925 } 926 927 return 0; 928 } 929 930 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter, 931 int src_epid) 932 { 933 struct fjes_hw *hw = &adapter->hw; 934 enum ep_partner_status status; 935 unsigned long flags; 936 937 status = fjes_hw_get_partner_ep_status(hw, src_epid); 938 trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status); 939 switch (status) { 940 case EP_PARTNER_UNSHARE: 941 case EP_PARTNER_COMPLETE: 942 default: 943 break; 944 case EP_PARTNER_WAITING: 945 if (src_epid < hw->my_epid) { 946 spin_lock_irqsave(&hw->rx_status_lock, flags); 947 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= 948 FJES_RX_STOP_REQ_DONE; 949 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 950 951 clear_bit(src_epid, &hw->txrx_stop_req_bit); 952 set_bit(src_epid, &adapter->unshare_watch_bitmask); 953 954 if (!work_pending(&adapter->unshare_watch_task)) 955 queue_work(adapter->control_wq, 956 &adapter->unshare_watch_task); 957 } 958 break; 959 case EP_PARTNER_SHARED: 960 if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status & 961 FJES_RX_STOP_REQ_REQUEST) { 962 set_bit(src_epid, &hw->epstop_req_bit); 963 if (!work_pending(&hw->epstop_task)) 964 queue_work(adapter->control_wq, 965 &hw->epstop_task); 966 } 967 break; 968 } 969 trace_fjes_txrx_stop_req_irq_post(hw, src_epid); 970 } 971 972 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid) 973 { 974 struct fjes_hw *hw = &adapter->hw; 975 enum ep_partner_status status; 976 unsigned long flags; 977 978 set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit); 979 980 status = fjes_hw_get_partner_ep_status(hw, src_epid); 981 trace_fjes_stop_req_irq_pre(hw, src_epid, status); 982 switch (status) { 983 case EP_PARTNER_WAITING: 984 spin_lock_irqsave(&hw->rx_status_lock, flags); 985 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= 986 FJES_RX_STOP_REQ_DONE; 987 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 988 clear_bit(src_epid, &hw->txrx_stop_req_bit); 989 /* fall through */ 990 case EP_PARTNER_UNSHARE: 991 case EP_PARTNER_COMPLETE: 992 default: 993 set_bit(src_epid, &adapter->unshare_watch_bitmask); 994 if (!work_pending(&adapter->unshare_watch_task)) 995 queue_work(adapter->control_wq, 996 &adapter->unshare_watch_task); 997 break; 998 case EP_PARTNER_SHARED: 999 set_bit(src_epid, &hw->epstop_req_bit); 1000 1001 if (!work_pending(&hw->epstop_task)) 1002 queue_work(adapter->control_wq, &hw->epstop_task); 1003 break; 1004 } 1005 trace_fjes_stop_req_irq_post(hw, src_epid); 1006 } 1007 1008 static void fjes_update_zone_irq(struct fjes_adapter *adapter, 1009 int src_epid) 1010 { 1011 struct fjes_hw *hw = &adapter->hw; 1012 1013 if (!work_pending(&hw->update_zone_task)) 1014 queue_work(adapter->control_wq, &hw->update_zone_task); 1015 } 1016 1017 static irqreturn_t fjes_intr(int irq, void *data) 1018 { 1019 struct fjes_adapter *adapter = data; 1020 struct fjes_hw *hw = &adapter->hw; 1021 irqreturn_t ret; 1022 u32 icr; 1023 1024 icr = fjes_hw_capture_interrupt_status(hw); 1025 1026 if (icr & REG_IS_MASK_IS_ASSERT) { 1027 if (icr & REG_ICTL_MASK_RX_DATA) { 1028 fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID); 1029 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats 1030 .recv_intr_rx += 1; 1031 } 1032 1033 if (icr & REG_ICTL_MASK_DEV_STOP_REQ) { 1034 fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); 1035 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats 1036 .recv_intr_stop += 1; 1037 } 1038 1039 if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) { 1040 fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); 1041 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats 1042 .recv_intr_unshare += 1; 1043 } 1044 1045 if (icr & REG_ICTL_MASK_TXRX_STOP_DONE) 1046 fjes_hw_set_irqmask(hw, 1047 REG_ICTL_MASK_TXRX_STOP_DONE, true); 1048 1049 if (icr & REG_ICTL_MASK_INFO_UPDATE) { 1050 fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID); 1051 hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats 1052 .recv_intr_zoneupdate += 1; 1053 } 1054 1055 ret = IRQ_HANDLED; 1056 } else { 1057 ret = IRQ_NONE; 1058 } 1059 1060 return ret; 1061 } 1062 1063 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter, 1064 int start_epid) 1065 { 1066 struct fjes_hw *hw = &adapter->hw; 1067 enum ep_partner_status pstatus; 1068 int max_epid, cur_epid; 1069 int i; 1070 1071 max_epid = hw->max_epid; 1072 start_epid = (start_epid + 1 + max_epid) % max_epid; 1073 1074 for (i = 0; i < max_epid; i++) { 1075 cur_epid = (start_epid + i) % max_epid; 1076 if (cur_epid == hw->my_epid) 1077 continue; 1078 1079 pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid); 1080 if (pstatus == EP_PARTNER_SHARED) { 1081 if (!fjes_hw_epbuf_rx_is_empty( 1082 &hw->ep_shm_info[cur_epid].rx)) 1083 return cur_epid; 1084 } 1085 } 1086 return -1; 1087 } 1088 1089 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize, 1090 int *cur_epid) 1091 { 1092 void *frame; 1093 1094 *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid); 1095 if (*cur_epid < 0) 1096 return NULL; 1097 1098 frame = 1099 fjes_hw_epbuf_rx_curpkt_get_addr( 1100 &adapter->hw.ep_shm_info[*cur_epid].rx, psize); 1101 1102 return frame; 1103 } 1104 1105 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid) 1106 { 1107 fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx); 1108 } 1109 1110 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid) 1111 { 1112 struct fjes_hw *hw = &adapter->hw; 1113 1114 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true); 1115 1116 adapter->unset_rx_last = true; 1117 napi_schedule(&adapter->napi); 1118 } 1119 1120 static int fjes_poll(struct napi_struct *napi, int budget) 1121 { 1122 struct fjes_adapter *adapter = 1123 container_of(napi, struct fjes_adapter, napi); 1124 struct net_device *netdev = napi->dev; 1125 struct fjes_hw *hw = &adapter->hw; 1126 struct sk_buff *skb; 1127 int work_done = 0; 1128 int cur_epid = 0; 1129 int epidx; 1130 size_t frame_len; 1131 void *frame; 1132 1133 spin_lock(&hw->rx_status_lock); 1134 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1135 if (epidx == hw->my_epid) 1136 continue; 1137 1138 if (fjes_hw_get_partner_ep_status(hw, epidx) == 1139 EP_PARTNER_SHARED) 1140 adapter->hw.ep_shm_info[epidx] 1141 .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK; 1142 } 1143 spin_unlock(&hw->rx_status_lock); 1144 1145 while (work_done < budget) { 1146 prefetch(&adapter->hw); 1147 frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid); 1148 1149 if (frame) { 1150 skb = napi_alloc_skb(napi, frame_len); 1151 if (!skb) { 1152 adapter->stats64.rx_dropped += 1; 1153 hw->ep_shm_info[cur_epid].net_stats 1154 .rx_dropped += 1; 1155 adapter->stats64.rx_errors += 1; 1156 hw->ep_shm_info[cur_epid].net_stats 1157 .rx_errors += 1; 1158 } else { 1159 skb_put_data(skb, frame, frame_len); 1160 skb->protocol = eth_type_trans(skb, netdev); 1161 skb->ip_summed = CHECKSUM_UNNECESSARY; 1162 1163 netif_receive_skb(skb); 1164 1165 work_done++; 1166 1167 adapter->stats64.rx_packets += 1; 1168 hw->ep_shm_info[cur_epid].net_stats 1169 .rx_packets += 1; 1170 adapter->stats64.rx_bytes += frame_len; 1171 hw->ep_shm_info[cur_epid].net_stats 1172 .rx_bytes += frame_len; 1173 1174 if (is_multicast_ether_addr( 1175 ((struct ethhdr *)frame)->h_dest)) { 1176 adapter->stats64.multicast += 1; 1177 hw->ep_shm_info[cur_epid].net_stats 1178 .multicast += 1; 1179 } 1180 } 1181 1182 fjes_rxframe_release(adapter, cur_epid); 1183 adapter->unset_rx_last = true; 1184 } else { 1185 break; 1186 } 1187 } 1188 1189 if (work_done < budget) { 1190 napi_complete_done(napi, work_done); 1191 1192 if (adapter->unset_rx_last) { 1193 adapter->rx_last_jiffies = jiffies; 1194 adapter->unset_rx_last = false; 1195 } 1196 1197 if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { 1198 napi_reschedule(napi); 1199 } else { 1200 spin_lock(&hw->rx_status_lock); 1201 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1202 if (epidx == hw->my_epid) 1203 continue; 1204 if (fjes_hw_get_partner_ep_status(hw, epidx) == 1205 EP_PARTNER_SHARED) 1206 adapter->hw.ep_shm_info[epidx].tx 1207 .info->v1i.rx_status &= 1208 ~FJES_RX_POLL_WORK; 1209 } 1210 spin_unlock(&hw->rx_status_lock); 1211 1212 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false); 1213 } 1214 } 1215 1216 return work_done; 1217 } 1218 1219 /* fjes_probe - Device Initialization Routine */ 1220 static int fjes_probe(struct platform_device *plat_dev) 1221 { 1222 struct fjes_adapter *adapter; 1223 struct net_device *netdev; 1224 struct resource *res; 1225 struct fjes_hw *hw; 1226 int err; 1227 1228 err = -ENOMEM; 1229 netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d", 1230 NET_NAME_UNKNOWN, fjes_netdev_setup, 1231 FJES_MAX_QUEUES); 1232 1233 if (!netdev) 1234 goto err_out; 1235 1236 SET_NETDEV_DEV(netdev, &plat_dev->dev); 1237 1238 dev_set_drvdata(&plat_dev->dev, netdev); 1239 adapter = netdev_priv(netdev); 1240 adapter->netdev = netdev; 1241 adapter->plat_dev = plat_dev; 1242 hw = &adapter->hw; 1243 hw->back = adapter; 1244 1245 /* setup the private structure */ 1246 err = fjes_sw_init(adapter); 1247 if (err) 1248 goto err_free_netdev; 1249 1250 INIT_WORK(&adapter->force_close_task, fjes_force_close_task); 1251 adapter->force_reset = false; 1252 adapter->open_guard = false; 1253 1254 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); 1255 adapter->control_wq = alloc_workqueue(DRV_NAME "/control", 1256 WQ_MEM_RECLAIM, 0); 1257 1258 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); 1259 INIT_WORK(&adapter->raise_intr_rxdata_task, 1260 fjes_raise_intr_rxdata_task); 1261 INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task); 1262 adapter->unshare_watch_bitmask = 0; 1263 1264 INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task); 1265 adapter->interrupt_watch_enable = false; 1266 1267 res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); 1268 hw->hw_res.start = res->start; 1269 hw->hw_res.size = resource_size(res); 1270 hw->hw_res.irq = platform_get_irq(plat_dev, 0); 1271 err = fjes_hw_init(&adapter->hw); 1272 if (err) 1273 goto err_free_netdev; 1274 1275 /* setup MAC address (02:00:00:00:00:[epid])*/ 1276 netdev->dev_addr[0] = 2; 1277 netdev->dev_addr[1] = 0; 1278 netdev->dev_addr[2] = 0; 1279 netdev->dev_addr[3] = 0; 1280 netdev->dev_addr[4] = 0; 1281 netdev->dev_addr[5] = hw->my_epid; /* EPID */ 1282 1283 err = register_netdev(netdev); 1284 if (err) 1285 goto err_hw_exit; 1286 1287 netif_carrier_off(netdev); 1288 1289 fjes_dbg_adapter_init(adapter); 1290 1291 return 0; 1292 1293 err_hw_exit: 1294 fjes_hw_exit(&adapter->hw); 1295 err_free_netdev: 1296 free_netdev(netdev); 1297 err_out: 1298 return err; 1299 } 1300 1301 /* fjes_remove - Device Removal Routine */ 1302 static int fjes_remove(struct platform_device *plat_dev) 1303 { 1304 struct net_device *netdev = dev_get_drvdata(&plat_dev->dev); 1305 struct fjes_adapter *adapter = netdev_priv(netdev); 1306 struct fjes_hw *hw = &adapter->hw; 1307 1308 fjes_dbg_adapter_exit(adapter); 1309 1310 cancel_delayed_work_sync(&adapter->interrupt_watch_task); 1311 cancel_work_sync(&adapter->unshare_watch_task); 1312 cancel_work_sync(&adapter->raise_intr_rxdata_task); 1313 cancel_work_sync(&adapter->tx_stall_task); 1314 if (adapter->control_wq) 1315 destroy_workqueue(adapter->control_wq); 1316 if (adapter->txrx_wq) 1317 destroy_workqueue(adapter->txrx_wq); 1318 1319 unregister_netdev(netdev); 1320 1321 fjes_hw_exit(hw); 1322 1323 netif_napi_del(&adapter->napi); 1324 1325 free_netdev(netdev); 1326 1327 return 0; 1328 } 1329 1330 static int fjes_sw_init(struct fjes_adapter *adapter) 1331 { 1332 struct net_device *netdev = adapter->netdev; 1333 1334 netif_napi_add(netdev, &adapter->napi, fjes_poll, 64); 1335 1336 return 0; 1337 } 1338 1339 /* fjes_netdev_setup - netdevice initialization routine */ 1340 static void fjes_netdev_setup(struct net_device *netdev) 1341 { 1342 ether_setup(netdev); 1343 1344 netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL; 1345 netdev->netdev_ops = &fjes_netdev_ops; 1346 fjes_set_ethtool_ops(netdev); 1347 netdev->mtu = fjes_support_mtu[3]; 1348 netdev->min_mtu = fjes_support_mtu[0]; 1349 netdev->max_mtu = fjes_support_mtu[3]; 1350 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1351 } 1352 1353 static void fjes_irq_watch_task(struct work_struct *work) 1354 { 1355 struct fjes_adapter *adapter = container_of(to_delayed_work(work), 1356 struct fjes_adapter, interrupt_watch_task); 1357 1358 local_irq_disable(); 1359 fjes_intr(adapter->hw.hw_res.irq, adapter); 1360 local_irq_enable(); 1361 1362 if (fjes_rxframe_search_exist(adapter, 0) >= 0) 1363 napi_schedule(&adapter->napi); 1364 1365 if (adapter->interrupt_watch_enable) { 1366 if (!delayed_work_pending(&adapter->interrupt_watch_task)) 1367 queue_delayed_work(adapter->control_wq, 1368 &adapter->interrupt_watch_task, 1369 FJES_IRQ_WATCH_DELAY); 1370 } 1371 } 1372 1373 static void fjes_watch_unshare_task(struct work_struct *work) 1374 { 1375 struct fjes_adapter *adapter = 1376 container_of(work, struct fjes_adapter, unshare_watch_task); 1377 1378 struct net_device *netdev = adapter->netdev; 1379 struct fjes_hw *hw = &adapter->hw; 1380 1381 int unshare_watch, unshare_reserve; 1382 int max_epid, my_epid, epidx; 1383 int stop_req, stop_req_done; 1384 ulong unshare_watch_bitmask; 1385 unsigned long flags; 1386 int wait_time = 0; 1387 int is_shared; 1388 int ret; 1389 1390 my_epid = hw->my_epid; 1391 max_epid = hw->max_epid; 1392 1393 unshare_watch_bitmask = adapter->unshare_watch_bitmask; 1394 adapter->unshare_watch_bitmask = 0; 1395 1396 while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) && 1397 (wait_time < 3000)) { 1398 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1399 if (epidx == hw->my_epid) 1400 continue; 1401 1402 is_shared = fjes_hw_epid_is_shared(hw->hw_info.share, 1403 epidx); 1404 1405 stop_req = test_bit(epidx, &hw->txrx_stop_req_bit); 1406 1407 stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status & 1408 FJES_RX_STOP_REQ_DONE; 1409 1410 unshare_watch = test_bit(epidx, &unshare_watch_bitmask); 1411 1412 unshare_reserve = test_bit(epidx, 1413 &hw->hw_info.buffer_unshare_reserve_bit); 1414 1415 if ((!stop_req || 1416 (is_shared && (!is_shared || !stop_req_done))) && 1417 (is_shared || !unshare_watch || !unshare_reserve)) 1418 continue; 1419 1420 mutex_lock(&hw->hw_info.lock); 1421 ret = fjes_hw_unregister_buff_addr(hw, epidx); 1422 switch (ret) { 1423 case 0: 1424 break; 1425 case -ENOMSG: 1426 case -EBUSY: 1427 default: 1428 if (!work_pending( 1429 &adapter->force_close_task)) { 1430 adapter->force_reset = true; 1431 schedule_work( 1432 &adapter->force_close_task); 1433 } 1434 break; 1435 } 1436 mutex_unlock(&hw->hw_info.lock); 1437 hw->ep_shm_info[epidx].ep_stats 1438 .com_unregist_buf_exec += 1; 1439 1440 spin_lock_irqsave(&hw->rx_status_lock, flags); 1441 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, 1442 netdev->dev_addr, netdev->mtu); 1443 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 1444 1445 clear_bit(epidx, &hw->txrx_stop_req_bit); 1446 clear_bit(epidx, &unshare_watch_bitmask); 1447 clear_bit(epidx, 1448 &hw->hw_info.buffer_unshare_reserve_bit); 1449 } 1450 1451 msleep(100); 1452 wait_time += 100; 1453 } 1454 1455 if (hw->hw_info.buffer_unshare_reserve_bit) { 1456 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1457 if (epidx == hw->my_epid) 1458 continue; 1459 1460 if (test_bit(epidx, 1461 &hw->hw_info.buffer_unshare_reserve_bit)) { 1462 mutex_lock(&hw->hw_info.lock); 1463 1464 ret = fjes_hw_unregister_buff_addr(hw, epidx); 1465 switch (ret) { 1466 case 0: 1467 break; 1468 case -ENOMSG: 1469 case -EBUSY: 1470 default: 1471 if (!work_pending( 1472 &adapter->force_close_task)) { 1473 adapter->force_reset = true; 1474 schedule_work( 1475 &adapter->force_close_task); 1476 } 1477 break; 1478 } 1479 mutex_unlock(&hw->hw_info.lock); 1480 1481 hw->ep_shm_info[epidx].ep_stats 1482 .com_unregist_buf_exec += 1; 1483 1484 spin_lock_irqsave(&hw->rx_status_lock, flags); 1485 fjes_hw_setup_epbuf( 1486 &hw->ep_shm_info[epidx].tx, 1487 netdev->dev_addr, netdev->mtu); 1488 spin_unlock_irqrestore(&hw->rx_status_lock, 1489 flags); 1490 1491 clear_bit(epidx, &hw->txrx_stop_req_bit); 1492 clear_bit(epidx, &unshare_watch_bitmask); 1493 clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); 1494 } 1495 1496 if (test_bit(epidx, &unshare_watch_bitmask)) { 1497 spin_lock_irqsave(&hw->rx_status_lock, flags); 1498 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= 1499 ~FJES_RX_STOP_REQ_DONE; 1500 spin_unlock_irqrestore(&hw->rx_status_lock, 1501 flags); 1502 } 1503 } 1504 } 1505 } 1506 1507 static acpi_status 1508 acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level, 1509 void *context, void **return_value) 1510 { 1511 struct acpi_device *device; 1512 bool *found = context; 1513 int result; 1514 1515 result = acpi_bus_get_device(obj_handle, &device); 1516 if (result) 1517 return AE_OK; 1518 1519 if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID)) 1520 return AE_OK; 1521 1522 if (!is_extended_socket_device(device)) 1523 return AE_OK; 1524 1525 if (acpi_check_extended_socket_status(device)) 1526 return AE_OK; 1527 1528 *found = true; 1529 return AE_CTRL_TERMINATE; 1530 } 1531 1532 /* fjes_init_module - Driver Registration Routine */ 1533 static int __init fjes_init_module(void) 1534 { 1535 bool found = false; 1536 int result; 1537 1538 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 1539 acpi_find_extended_socket_device, NULL, &found, 1540 NULL); 1541 1542 if (!found) 1543 return -ENODEV; 1544 1545 pr_info("%s - version %s - %s\n", 1546 fjes_driver_string, fjes_driver_version, fjes_copyright); 1547 1548 fjes_dbg_init(); 1549 1550 result = platform_driver_register(&fjes_driver); 1551 if (result < 0) { 1552 fjes_dbg_exit(); 1553 return result; 1554 } 1555 1556 result = acpi_bus_register_driver(&fjes_acpi_driver); 1557 if (result < 0) 1558 goto fail_acpi_driver; 1559 1560 return 0; 1561 1562 fail_acpi_driver: 1563 platform_driver_unregister(&fjes_driver); 1564 fjes_dbg_exit(); 1565 return result; 1566 } 1567 1568 module_init(fjes_init_module); 1569 1570 /* fjes_exit_module - Driver Exit Cleanup Routine */ 1571 static void __exit fjes_exit_module(void) 1572 { 1573 acpi_bus_unregister_driver(&fjes_acpi_driver); 1574 platform_driver_unregister(&fjes_driver); 1575 fjes_dbg_exit(); 1576 } 1577 1578 module_exit(fjes_exit_module); 1579