1 /* 2 * FUJITSU Extended Socket Network Device driver 3 * Copyright (c) 2015 FUJITSU LIMITED 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * The full GNU General Public License is included in this distribution in 18 * the file called "COPYING". 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/nls.h> 25 #include <linux/platform_device.h> 26 #include <linux/netdevice.h> 27 #include <linux/interrupt.h> 28 29 #include "fjes.h" 30 31 #define MAJ 1 32 #define MIN 1 33 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) 34 #define DRV_NAME "fjes" 35 char fjes_driver_name[] = DRV_NAME; 36 char fjes_driver_version[] = DRV_VERSION; 37 static const char fjes_driver_string[] = 38 "FUJITSU Extended Socket Network Device Driver"; 39 static const char fjes_copyright[] = 40 "Copyright (c) 2015 FUJITSU LIMITED"; 41 42 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>"); 43 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver"); 44 MODULE_LICENSE("GPL"); 45 MODULE_VERSION(DRV_VERSION); 46 47 static int fjes_request_irq(struct fjes_adapter *); 48 static void fjes_free_irq(struct fjes_adapter *); 49 50 static int fjes_open(struct net_device *); 51 static int fjes_close(struct net_device *); 52 static int fjes_setup_resources(struct fjes_adapter *); 53 static void fjes_free_resources(struct fjes_adapter *); 54 static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *); 55 static void fjes_raise_intr_rxdata_task(struct work_struct *); 56 static void fjes_tx_stall_task(struct work_struct *); 57 static void fjes_force_close_task(struct work_struct *); 58 static irqreturn_t fjes_intr(int, void*); 59 static struct rtnl_link_stats64 * 60 fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); 61 static int fjes_change_mtu(struct net_device *, int); 62 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16); 63 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16); 64 static void fjes_tx_retry(struct net_device *); 65 66 static int fjes_acpi_add(struct acpi_device *); 67 static int fjes_acpi_remove(struct acpi_device *); 68 static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*); 69 70 static int fjes_probe(struct platform_device *); 71 static int fjes_remove(struct platform_device *); 72 73 static int fjes_sw_init(struct fjes_adapter *); 74 static void fjes_netdev_setup(struct net_device *); 75 static void fjes_irq_watch_task(struct work_struct *); 76 static void fjes_watch_unshare_task(struct work_struct *); 77 static void fjes_rx_irq(struct fjes_adapter *, int); 78 static int fjes_poll(struct napi_struct *, int); 79 80 static const struct acpi_device_id fjes_acpi_ids[] = { 81 {"PNP0C02", 0}, 82 {"", 0}, 83 }; 84 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); 85 86 static struct acpi_driver fjes_acpi_driver = { 87 .name = DRV_NAME, 88 .class = DRV_NAME, 89 .owner = THIS_MODULE, 90 .ids = fjes_acpi_ids, 91 .ops = { 92 .add = fjes_acpi_add, 93 .remove = fjes_acpi_remove, 94 }, 95 }; 96 97 static struct platform_driver fjes_driver = { 98 .driver = { 99 .name = DRV_NAME, 100 .owner = THIS_MODULE, 101 }, 102 .probe = fjes_probe, 103 .remove = fjes_remove, 104 }; 105 106 static struct resource fjes_resource[] = { 107 { 108 .flags = IORESOURCE_MEM, 109 .start = 0, 110 .end = 0, 111 }, 112 { 113 .flags = IORESOURCE_IRQ, 114 .start = 0, 115 .end = 0, 116 }, 117 }; 118 119 static int fjes_acpi_add(struct acpi_device *device) 120 { 121 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 122 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; 123 struct platform_device *plat_dev; 124 union acpi_object *str; 125 acpi_status status; 126 int result; 127 128 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); 129 if (ACPI_FAILURE(status)) 130 return -ENODEV; 131 132 str = buffer.pointer; 133 result = utf16s_to_utf8s((wchar_t *)str->string.pointer, 134 str->string.length, UTF16_LITTLE_ENDIAN, 135 str_buf, sizeof(str_buf) - 1); 136 str_buf[result] = 0; 137 138 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { 139 kfree(buffer.pointer); 140 return -ENODEV; 141 } 142 kfree(buffer.pointer); 143 144 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 145 fjes_get_acpi_resource, fjes_resource); 146 if (ACPI_FAILURE(status)) 147 return -ENODEV; 148 149 /* create platform_device */ 150 plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, 151 ARRAY_SIZE(fjes_resource)); 152 device->driver_data = plat_dev; 153 154 return 0; 155 } 156 157 static int fjes_acpi_remove(struct acpi_device *device) 158 { 159 struct platform_device *plat_dev; 160 161 plat_dev = (struct platform_device *)acpi_driver_data(device); 162 platform_device_unregister(plat_dev); 163 164 return 0; 165 } 166 167 static acpi_status 168 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data) 169 { 170 struct acpi_resource_address32 *addr; 171 struct acpi_resource_irq *irq; 172 struct resource *res = data; 173 174 switch (acpi_res->type) { 175 case ACPI_RESOURCE_TYPE_ADDRESS32: 176 addr = &acpi_res->data.address32; 177 res[0].start = addr->address.minimum; 178 res[0].end = addr->address.minimum + 179 addr->address.address_length - 1; 180 break; 181 182 case ACPI_RESOURCE_TYPE_IRQ: 183 irq = &acpi_res->data.irq; 184 if (irq->interrupt_count != 1) 185 return AE_ERROR; 186 res[1].start = irq->interrupts[0]; 187 res[1].end = irq->interrupts[0]; 188 break; 189 190 default: 191 break; 192 } 193 194 return AE_OK; 195 } 196 197 static int fjes_request_irq(struct fjes_adapter *adapter) 198 { 199 struct net_device *netdev = adapter->netdev; 200 int result = -1; 201 202 adapter->interrupt_watch_enable = true; 203 if (!delayed_work_pending(&adapter->interrupt_watch_task)) { 204 queue_delayed_work(adapter->control_wq, 205 &adapter->interrupt_watch_task, 206 FJES_IRQ_WATCH_DELAY); 207 } 208 209 if (!adapter->irq_registered) { 210 result = request_irq(adapter->hw.hw_res.irq, fjes_intr, 211 IRQF_SHARED, netdev->name, adapter); 212 if (result) 213 adapter->irq_registered = false; 214 else 215 adapter->irq_registered = true; 216 } 217 218 return result; 219 } 220 221 static void fjes_free_irq(struct fjes_adapter *adapter) 222 { 223 struct fjes_hw *hw = &adapter->hw; 224 225 adapter->interrupt_watch_enable = false; 226 cancel_delayed_work_sync(&adapter->interrupt_watch_task); 227 228 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); 229 230 if (adapter->irq_registered) { 231 free_irq(adapter->hw.hw_res.irq, adapter); 232 adapter->irq_registered = false; 233 } 234 } 235 236 static const struct net_device_ops fjes_netdev_ops = { 237 .ndo_open = fjes_open, 238 .ndo_stop = fjes_close, 239 .ndo_start_xmit = fjes_xmit_frame, 240 .ndo_get_stats64 = fjes_get_stats64, 241 .ndo_change_mtu = fjes_change_mtu, 242 .ndo_tx_timeout = fjes_tx_retry, 243 .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid, 244 .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid, 245 }; 246 247 /* fjes_open - Called when a network interface is made active */ 248 static int fjes_open(struct net_device *netdev) 249 { 250 struct fjes_adapter *adapter = netdev_priv(netdev); 251 struct fjes_hw *hw = &adapter->hw; 252 int result; 253 254 if (adapter->open_guard) 255 return -ENXIO; 256 257 result = fjes_setup_resources(adapter); 258 if (result) 259 goto err_setup_res; 260 261 hw->txrx_stop_req_bit = 0; 262 hw->epstop_req_bit = 0; 263 264 napi_enable(&adapter->napi); 265 266 fjes_hw_capture_interrupt_status(hw); 267 268 result = fjes_request_irq(adapter); 269 if (result) 270 goto err_req_irq; 271 272 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false); 273 274 netif_tx_start_all_queues(netdev); 275 netif_carrier_on(netdev); 276 277 return 0; 278 279 err_req_irq: 280 fjes_free_irq(adapter); 281 napi_disable(&adapter->napi); 282 283 err_setup_res: 284 fjes_free_resources(adapter); 285 return result; 286 } 287 288 /* fjes_close - Disables a network interface */ 289 static int fjes_close(struct net_device *netdev) 290 { 291 struct fjes_adapter *adapter = netdev_priv(netdev); 292 struct fjes_hw *hw = &adapter->hw; 293 unsigned long flags; 294 int epidx; 295 296 netif_tx_stop_all_queues(netdev); 297 netif_carrier_off(netdev); 298 299 fjes_hw_raise_epstop(hw); 300 301 napi_disable(&adapter->napi); 302 303 spin_lock_irqsave(&hw->rx_status_lock, flags); 304 for (epidx = 0; epidx < hw->max_epid; epidx++) { 305 if (epidx == hw->my_epid) 306 continue; 307 308 if (fjes_hw_get_partner_ep_status(hw, epidx) == 309 EP_PARTNER_SHARED) 310 adapter->hw.ep_shm_info[epidx] 311 .tx.info->v1i.rx_status &= 312 ~FJES_RX_POLL_WORK; 313 } 314 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 315 316 fjes_free_irq(adapter); 317 318 cancel_delayed_work_sync(&adapter->interrupt_watch_task); 319 cancel_work_sync(&adapter->unshare_watch_task); 320 adapter->unshare_watch_bitmask = 0; 321 cancel_work_sync(&adapter->raise_intr_rxdata_task); 322 cancel_work_sync(&adapter->tx_stall_task); 323 324 cancel_work_sync(&hw->update_zone_task); 325 cancel_work_sync(&hw->epstop_task); 326 327 fjes_hw_wait_epstop(hw); 328 329 fjes_free_resources(adapter); 330 331 return 0; 332 } 333 334 static int fjes_setup_resources(struct fjes_adapter *adapter) 335 { 336 struct net_device *netdev = adapter->netdev; 337 struct ep_share_mem_info *buf_pair; 338 struct fjes_hw *hw = &adapter->hw; 339 unsigned long flags; 340 int result; 341 int epidx; 342 343 mutex_lock(&hw->hw_info.lock); 344 result = fjes_hw_request_info(hw); 345 switch (result) { 346 case 0: 347 for (epidx = 0; epidx < hw->max_epid; epidx++) { 348 hw->ep_shm_info[epidx].es_status = 349 hw->hw_info.res_buf->info.info[epidx].es_status; 350 hw->ep_shm_info[epidx].zone = 351 hw->hw_info.res_buf->info.info[epidx].zone; 352 } 353 break; 354 default: 355 case -ENOMSG: 356 case -EBUSY: 357 adapter->force_reset = true; 358 359 mutex_unlock(&hw->hw_info.lock); 360 return result; 361 } 362 mutex_unlock(&hw->hw_info.lock); 363 364 for (epidx = 0; epidx < (hw->max_epid); epidx++) { 365 if ((epidx != hw->my_epid) && 366 (hw->ep_shm_info[epidx].es_status == 367 FJES_ZONING_STATUS_ENABLE)) { 368 fjes_hw_raise_interrupt(hw, epidx, 369 REG_ICTL_MASK_INFO_UPDATE); 370 } 371 } 372 373 msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid); 374 375 for (epidx = 0; epidx < (hw->max_epid); epidx++) { 376 if (epidx == hw->my_epid) 377 continue; 378 379 buf_pair = &hw->ep_shm_info[epidx]; 380 381 spin_lock_irqsave(&hw->rx_status_lock, flags); 382 fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr, 383 netdev->mtu); 384 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 385 386 if (fjes_hw_epid_is_same_zone(hw, epidx)) { 387 mutex_lock(&hw->hw_info.lock); 388 result = 389 fjes_hw_register_buff_addr(hw, epidx, buf_pair); 390 mutex_unlock(&hw->hw_info.lock); 391 392 switch (result) { 393 case 0: 394 break; 395 case -ENOMSG: 396 case -EBUSY: 397 default: 398 adapter->force_reset = true; 399 return result; 400 } 401 } 402 } 403 404 return 0; 405 } 406 407 static void fjes_free_resources(struct fjes_adapter *adapter) 408 { 409 struct net_device *netdev = adapter->netdev; 410 struct fjes_device_command_param param; 411 struct ep_share_mem_info *buf_pair; 412 struct fjes_hw *hw = &adapter->hw; 413 bool reset_flag = false; 414 unsigned long flags; 415 int result; 416 int epidx; 417 418 for (epidx = 0; epidx < hw->max_epid; epidx++) { 419 if (epidx == hw->my_epid) 420 continue; 421 422 mutex_lock(&hw->hw_info.lock); 423 result = fjes_hw_unregister_buff_addr(hw, epidx); 424 mutex_unlock(&hw->hw_info.lock); 425 426 if (result) 427 reset_flag = true; 428 429 buf_pair = &hw->ep_shm_info[epidx]; 430 431 spin_lock_irqsave(&hw->rx_status_lock, flags); 432 fjes_hw_setup_epbuf(&buf_pair->tx, 433 netdev->dev_addr, netdev->mtu); 434 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 435 436 clear_bit(epidx, &hw->txrx_stop_req_bit); 437 } 438 439 if (reset_flag || adapter->force_reset) { 440 result = fjes_hw_reset(hw); 441 442 adapter->force_reset = false; 443 444 if (result) 445 adapter->open_guard = true; 446 447 hw->hw_info.buffer_share_bit = 0; 448 449 memset((void *)¶m, 0, sizeof(param)); 450 451 param.req_len = hw->hw_info.req_buf_size; 452 param.req_start = __pa(hw->hw_info.req_buf); 453 param.res_len = hw->hw_info.res_buf_size; 454 param.res_start = __pa(hw->hw_info.res_buf); 455 param.share_start = __pa(hw->hw_info.share->ep_status); 456 457 fjes_hw_init_command_registers(hw, ¶m); 458 } 459 } 460 461 static void fjes_tx_stall_task(struct work_struct *work) 462 { 463 struct fjes_adapter *adapter = container_of(work, 464 struct fjes_adapter, tx_stall_task); 465 struct net_device *netdev = adapter->netdev; 466 struct fjes_hw *hw = &adapter->hw; 467 int all_queue_available, sendable; 468 enum ep_partner_status pstatus; 469 int max_epid, my_epid, epid; 470 union ep_buffer_info *info; 471 int i; 472 473 if (((long)jiffies - 474 dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) { 475 netif_wake_queue(netdev); 476 return; 477 } 478 479 my_epid = hw->my_epid; 480 max_epid = hw->max_epid; 481 482 for (i = 0; i < 5; i++) { 483 all_queue_available = 1; 484 485 for (epid = 0; epid < max_epid; epid++) { 486 if (my_epid == epid) 487 continue; 488 489 pstatus = fjes_hw_get_partner_ep_status(hw, epid); 490 sendable = (pstatus == EP_PARTNER_SHARED); 491 if (!sendable) 492 continue; 493 494 info = adapter->hw.ep_shm_info[epid].tx.info; 495 496 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE)) 497 return; 498 499 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, 500 info->v1i.count_max)) { 501 all_queue_available = 0; 502 break; 503 } 504 } 505 506 if (all_queue_available) { 507 netif_wake_queue(netdev); 508 return; 509 } 510 } 511 512 usleep_range(50, 100); 513 514 queue_work(adapter->txrx_wq, &adapter->tx_stall_task); 515 } 516 517 static void fjes_force_close_task(struct work_struct *work) 518 { 519 struct fjes_adapter *adapter = container_of(work, 520 struct fjes_adapter, force_close_task); 521 struct net_device *netdev = adapter->netdev; 522 523 rtnl_lock(); 524 dev_close(netdev); 525 rtnl_unlock(); 526 } 527 528 static void fjes_raise_intr_rxdata_task(struct work_struct *work) 529 { 530 struct fjes_adapter *adapter = container_of(work, 531 struct fjes_adapter, raise_intr_rxdata_task); 532 struct fjes_hw *hw = &adapter->hw; 533 enum ep_partner_status pstatus; 534 int max_epid, my_epid, epid; 535 536 my_epid = hw->my_epid; 537 max_epid = hw->max_epid; 538 539 for (epid = 0; epid < max_epid; epid++) 540 hw->ep_shm_info[epid].tx_status_work = 0; 541 542 for (epid = 0; epid < max_epid; epid++) { 543 if (epid == my_epid) 544 continue; 545 546 pstatus = fjes_hw_get_partner_ep_status(hw, epid); 547 if (pstatus == EP_PARTNER_SHARED) { 548 hw->ep_shm_info[epid].tx_status_work = 549 hw->ep_shm_info[epid].tx.info->v1i.tx_status; 550 551 if (hw->ep_shm_info[epid].tx_status_work == 552 FJES_TX_DELAY_SEND_PENDING) { 553 hw->ep_shm_info[epid].tx.info->v1i.tx_status = 554 FJES_TX_DELAY_SEND_NONE; 555 } 556 } 557 } 558 559 for (epid = 0; epid < max_epid; epid++) { 560 if (epid == my_epid) 561 continue; 562 563 pstatus = fjes_hw_get_partner_ep_status(hw, epid); 564 if ((hw->ep_shm_info[epid].tx_status_work == 565 FJES_TX_DELAY_SEND_PENDING) && 566 (pstatus == EP_PARTNER_SHARED) && 567 !(hw->ep_shm_info[epid].rx.info->v1i.rx_status & 568 FJES_RX_POLL_WORK)) { 569 fjes_hw_raise_interrupt(hw, epid, 570 REG_ICTL_MASK_RX_DATA); 571 } 572 } 573 574 usleep_range(500, 1000); 575 } 576 577 static int fjes_tx_send(struct fjes_adapter *adapter, int dest, 578 void *data, size_t len) 579 { 580 int retval; 581 582 retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx, 583 data, len); 584 if (retval) 585 return retval; 586 587 adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status = 588 FJES_TX_DELAY_SEND_PENDING; 589 if (!work_pending(&adapter->raise_intr_rxdata_task)) 590 queue_work(adapter->txrx_wq, 591 &adapter->raise_intr_rxdata_task); 592 593 retval = 0; 594 return retval; 595 } 596 597 static netdev_tx_t 598 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 599 { 600 struct fjes_adapter *adapter = netdev_priv(netdev); 601 struct fjes_hw *hw = &adapter->hw; 602 603 int max_epid, my_epid, dest_epid; 604 enum ep_partner_status pstatus; 605 struct netdev_queue *cur_queue; 606 char shortpkt[VLAN_ETH_HLEN]; 607 bool is_multi, vlan; 608 struct ethhdr *eth; 609 u16 queue_no = 0; 610 u16 vlan_id = 0; 611 netdev_tx_t ret; 612 char *data; 613 int len; 614 615 ret = NETDEV_TX_OK; 616 is_multi = false; 617 cur_queue = netdev_get_tx_queue(netdev, queue_no); 618 619 eth = (struct ethhdr *)skb->data; 620 my_epid = hw->my_epid; 621 622 vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false; 623 624 data = skb->data; 625 len = skb->len; 626 627 if (is_multicast_ether_addr(eth->h_dest)) { 628 dest_epid = 0; 629 max_epid = hw->max_epid; 630 is_multi = true; 631 } else if (is_local_ether_addr(eth->h_dest)) { 632 dest_epid = eth->h_dest[ETH_ALEN - 1]; 633 max_epid = dest_epid + 1; 634 635 if ((eth->h_dest[0] == 0x02) && 636 (0x00 == (eth->h_dest[1] | eth->h_dest[2] | 637 eth->h_dest[3] | eth->h_dest[4])) && 638 (dest_epid < hw->max_epid)) { 639 ; 640 } else { 641 dest_epid = 0; 642 max_epid = 0; 643 ret = NETDEV_TX_OK; 644 645 adapter->stats64.tx_packets += 1; 646 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; 647 adapter->stats64.tx_bytes += len; 648 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; 649 } 650 } else { 651 dest_epid = 0; 652 max_epid = 0; 653 ret = NETDEV_TX_OK; 654 655 adapter->stats64.tx_packets += 1; 656 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; 657 adapter->stats64.tx_bytes += len; 658 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; 659 } 660 661 for (; dest_epid < max_epid; dest_epid++) { 662 if (my_epid == dest_epid) 663 continue; 664 665 pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid); 666 if (pstatus != EP_PARTNER_SHARED) { 667 ret = NETDEV_TX_OK; 668 } else if (!fjes_hw_check_epbuf_version( 669 &adapter->hw.ep_shm_info[dest_epid].rx, 0)) { 670 /* version is NOT 0 */ 671 adapter->stats64.tx_carrier_errors += 1; 672 hw->ep_shm_info[dest_epid].net_stats 673 .tx_carrier_errors += 1; 674 675 ret = NETDEV_TX_OK; 676 } else if (!fjes_hw_check_mtu( 677 &adapter->hw.ep_shm_info[dest_epid].rx, 678 netdev->mtu)) { 679 adapter->stats64.tx_dropped += 1; 680 hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1; 681 adapter->stats64.tx_errors += 1; 682 hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1; 683 684 ret = NETDEV_TX_OK; 685 } else if (vlan && 686 !fjes_hw_check_vlan_id( 687 &adapter->hw.ep_shm_info[dest_epid].rx, 688 vlan_id)) { 689 ret = NETDEV_TX_OK; 690 } else { 691 if (len < VLAN_ETH_HLEN) { 692 memset(shortpkt, 0, VLAN_ETH_HLEN); 693 memcpy(shortpkt, skb->data, skb->len); 694 len = VLAN_ETH_HLEN; 695 data = shortpkt; 696 } 697 698 if (adapter->tx_retry_count == 0) { 699 adapter->tx_start_jiffies = jiffies; 700 adapter->tx_retry_count = 1; 701 } else { 702 adapter->tx_retry_count++; 703 } 704 705 if (fjes_tx_send(adapter, dest_epid, data, len)) { 706 if (is_multi) { 707 ret = NETDEV_TX_OK; 708 } else if ( 709 ((long)jiffies - 710 (long)adapter->tx_start_jiffies) >= 711 FJES_TX_RETRY_TIMEOUT) { 712 adapter->stats64.tx_fifo_errors += 1; 713 hw->ep_shm_info[dest_epid].net_stats 714 .tx_fifo_errors += 1; 715 adapter->stats64.tx_errors += 1; 716 hw->ep_shm_info[dest_epid].net_stats 717 .tx_errors += 1; 718 719 ret = NETDEV_TX_OK; 720 } else { 721 netif_trans_update(netdev); 722 netif_tx_stop_queue(cur_queue); 723 724 if (!work_pending(&adapter->tx_stall_task)) 725 queue_work(adapter->txrx_wq, 726 &adapter->tx_stall_task); 727 728 ret = NETDEV_TX_BUSY; 729 } 730 } else { 731 if (!is_multi) { 732 adapter->stats64.tx_packets += 1; 733 hw->ep_shm_info[dest_epid].net_stats 734 .tx_packets += 1; 735 adapter->stats64.tx_bytes += len; 736 hw->ep_shm_info[dest_epid].net_stats 737 .tx_bytes += len; 738 } 739 740 adapter->tx_retry_count = 0; 741 ret = NETDEV_TX_OK; 742 } 743 } 744 } 745 746 if (ret == NETDEV_TX_OK) { 747 dev_kfree_skb(skb); 748 if (is_multi) { 749 adapter->stats64.tx_packets += 1; 750 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; 751 adapter->stats64.tx_bytes += 1; 752 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; 753 } 754 } 755 756 return ret; 757 } 758 759 static void fjes_tx_retry(struct net_device *netdev) 760 { 761 struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0); 762 763 netif_tx_wake_queue(queue); 764 } 765 766 static struct rtnl_link_stats64 * 767 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 768 { 769 struct fjes_adapter *adapter = netdev_priv(netdev); 770 771 memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64)); 772 773 return stats; 774 } 775 776 static int fjes_change_mtu(struct net_device *netdev, int new_mtu) 777 { 778 struct fjes_adapter *adapter = netdev_priv(netdev); 779 bool running = netif_running(netdev); 780 struct fjes_hw *hw = &adapter->hw; 781 unsigned long flags; 782 int ret = -EINVAL; 783 int idx, epidx; 784 785 for (idx = 0; fjes_support_mtu[idx] != 0; idx++) { 786 if (new_mtu <= fjes_support_mtu[idx]) { 787 new_mtu = fjes_support_mtu[idx]; 788 if (new_mtu == netdev->mtu) 789 return 0; 790 791 ret = 0; 792 break; 793 } 794 } 795 796 if (ret) 797 return ret; 798 799 if (running) { 800 spin_lock_irqsave(&hw->rx_status_lock, flags); 801 for (epidx = 0; epidx < hw->max_epid; epidx++) { 802 if (epidx == hw->my_epid) 803 continue; 804 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= 805 ~FJES_RX_MTU_CHANGING_DONE; 806 } 807 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 808 809 netif_tx_stop_all_queues(netdev); 810 netif_carrier_off(netdev); 811 cancel_work_sync(&adapter->tx_stall_task); 812 napi_disable(&adapter->napi); 813 814 msleep(1000); 815 816 netif_tx_stop_all_queues(netdev); 817 } 818 819 netdev->mtu = new_mtu; 820 821 if (running) { 822 for (epidx = 0; epidx < hw->max_epid; epidx++) { 823 if (epidx == hw->my_epid) 824 continue; 825 826 spin_lock_irqsave(&hw->rx_status_lock, flags); 827 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, 828 netdev->dev_addr, 829 netdev->mtu); 830 831 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= 832 FJES_RX_MTU_CHANGING_DONE; 833 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 834 } 835 836 netif_tx_wake_all_queues(netdev); 837 netif_carrier_on(netdev); 838 napi_enable(&adapter->napi); 839 napi_schedule(&adapter->napi); 840 } 841 842 return ret; 843 } 844 845 static int fjes_vlan_rx_add_vid(struct net_device *netdev, 846 __be16 proto, u16 vid) 847 { 848 struct fjes_adapter *adapter = netdev_priv(netdev); 849 bool ret = true; 850 int epid; 851 852 for (epid = 0; epid < adapter->hw.max_epid; epid++) { 853 if (epid == adapter->hw.my_epid) 854 continue; 855 856 if (!fjes_hw_check_vlan_id( 857 &adapter->hw.ep_shm_info[epid].tx, vid)) 858 ret = fjes_hw_set_vlan_id( 859 &adapter->hw.ep_shm_info[epid].tx, vid); 860 } 861 862 return ret ? 0 : -ENOSPC; 863 } 864 865 static int fjes_vlan_rx_kill_vid(struct net_device *netdev, 866 __be16 proto, u16 vid) 867 { 868 struct fjes_adapter *adapter = netdev_priv(netdev); 869 int epid; 870 871 for (epid = 0; epid < adapter->hw.max_epid; epid++) { 872 if (epid == adapter->hw.my_epid) 873 continue; 874 875 fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid); 876 } 877 878 return 0; 879 } 880 881 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter, 882 int src_epid) 883 { 884 struct fjes_hw *hw = &adapter->hw; 885 enum ep_partner_status status; 886 unsigned long flags; 887 888 status = fjes_hw_get_partner_ep_status(hw, src_epid); 889 switch (status) { 890 case EP_PARTNER_UNSHARE: 891 case EP_PARTNER_COMPLETE: 892 default: 893 break; 894 case EP_PARTNER_WAITING: 895 if (src_epid < hw->my_epid) { 896 spin_lock_irqsave(&hw->rx_status_lock, flags); 897 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= 898 FJES_RX_STOP_REQ_DONE; 899 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 900 901 clear_bit(src_epid, &hw->txrx_stop_req_bit); 902 set_bit(src_epid, &adapter->unshare_watch_bitmask); 903 904 if (!work_pending(&adapter->unshare_watch_task)) 905 queue_work(adapter->control_wq, 906 &adapter->unshare_watch_task); 907 } 908 break; 909 case EP_PARTNER_SHARED: 910 if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status & 911 FJES_RX_STOP_REQ_REQUEST) { 912 set_bit(src_epid, &hw->epstop_req_bit); 913 if (!work_pending(&hw->epstop_task)) 914 queue_work(adapter->control_wq, 915 &hw->epstop_task); 916 } 917 break; 918 } 919 } 920 921 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid) 922 { 923 struct fjes_hw *hw = &adapter->hw; 924 enum ep_partner_status status; 925 unsigned long flags; 926 927 set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit); 928 929 status = fjes_hw_get_partner_ep_status(hw, src_epid); 930 switch (status) { 931 case EP_PARTNER_WAITING: 932 spin_lock_irqsave(&hw->rx_status_lock, flags); 933 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= 934 FJES_RX_STOP_REQ_DONE; 935 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 936 clear_bit(src_epid, &hw->txrx_stop_req_bit); 937 /* fall through */ 938 case EP_PARTNER_UNSHARE: 939 case EP_PARTNER_COMPLETE: 940 default: 941 set_bit(src_epid, &adapter->unshare_watch_bitmask); 942 if (!work_pending(&adapter->unshare_watch_task)) 943 queue_work(adapter->control_wq, 944 &adapter->unshare_watch_task); 945 break; 946 case EP_PARTNER_SHARED: 947 set_bit(src_epid, &hw->epstop_req_bit); 948 949 if (!work_pending(&hw->epstop_task)) 950 queue_work(adapter->control_wq, &hw->epstop_task); 951 break; 952 } 953 } 954 955 static void fjes_update_zone_irq(struct fjes_adapter *adapter, 956 int src_epid) 957 { 958 struct fjes_hw *hw = &adapter->hw; 959 960 if (!work_pending(&hw->update_zone_task)) 961 queue_work(adapter->control_wq, &hw->update_zone_task); 962 } 963 964 static irqreturn_t fjes_intr(int irq, void *data) 965 { 966 struct fjes_adapter *adapter = data; 967 struct fjes_hw *hw = &adapter->hw; 968 irqreturn_t ret; 969 u32 icr; 970 971 icr = fjes_hw_capture_interrupt_status(hw); 972 973 if (icr & REG_IS_MASK_IS_ASSERT) { 974 if (icr & REG_ICTL_MASK_RX_DATA) 975 fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID); 976 977 if (icr & REG_ICTL_MASK_DEV_STOP_REQ) 978 fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); 979 980 if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) 981 fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); 982 983 if (icr & REG_ICTL_MASK_TXRX_STOP_DONE) 984 fjes_hw_set_irqmask(hw, 985 REG_ICTL_MASK_TXRX_STOP_DONE, true); 986 987 if (icr & REG_ICTL_MASK_INFO_UPDATE) 988 fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID); 989 990 ret = IRQ_HANDLED; 991 } else { 992 ret = IRQ_NONE; 993 } 994 995 return ret; 996 } 997 998 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter, 999 int start_epid) 1000 { 1001 struct fjes_hw *hw = &adapter->hw; 1002 enum ep_partner_status pstatus; 1003 int max_epid, cur_epid; 1004 int i; 1005 1006 max_epid = hw->max_epid; 1007 start_epid = (start_epid + 1 + max_epid) % max_epid; 1008 1009 for (i = 0; i < max_epid; i++) { 1010 cur_epid = (start_epid + i) % max_epid; 1011 if (cur_epid == hw->my_epid) 1012 continue; 1013 1014 pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid); 1015 if (pstatus == EP_PARTNER_SHARED) { 1016 if (!fjes_hw_epbuf_rx_is_empty( 1017 &hw->ep_shm_info[cur_epid].rx)) 1018 return cur_epid; 1019 } 1020 } 1021 return -1; 1022 } 1023 1024 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize, 1025 int *cur_epid) 1026 { 1027 void *frame; 1028 1029 *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid); 1030 if (*cur_epid < 0) 1031 return NULL; 1032 1033 frame = 1034 fjes_hw_epbuf_rx_curpkt_get_addr( 1035 &adapter->hw.ep_shm_info[*cur_epid].rx, psize); 1036 1037 return frame; 1038 } 1039 1040 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid) 1041 { 1042 fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx); 1043 } 1044 1045 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid) 1046 { 1047 struct fjes_hw *hw = &adapter->hw; 1048 1049 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true); 1050 1051 adapter->unset_rx_last = true; 1052 napi_schedule(&adapter->napi); 1053 } 1054 1055 static int fjes_poll(struct napi_struct *napi, int budget) 1056 { 1057 struct fjes_adapter *adapter = 1058 container_of(napi, struct fjes_adapter, napi); 1059 struct net_device *netdev = napi->dev; 1060 struct fjes_hw *hw = &adapter->hw; 1061 struct sk_buff *skb; 1062 int work_done = 0; 1063 int cur_epid = 0; 1064 int epidx; 1065 size_t frame_len; 1066 void *frame; 1067 1068 spin_lock(&hw->rx_status_lock); 1069 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1070 if (epidx == hw->my_epid) 1071 continue; 1072 1073 if (fjes_hw_get_partner_ep_status(hw, epidx) == 1074 EP_PARTNER_SHARED) 1075 adapter->hw.ep_shm_info[epidx] 1076 .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK; 1077 } 1078 spin_unlock(&hw->rx_status_lock); 1079 1080 while (work_done < budget) { 1081 prefetch(&adapter->hw); 1082 frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid); 1083 1084 if (frame) { 1085 skb = napi_alloc_skb(napi, frame_len); 1086 if (!skb) { 1087 adapter->stats64.rx_dropped += 1; 1088 hw->ep_shm_info[cur_epid].net_stats 1089 .rx_dropped += 1; 1090 adapter->stats64.rx_errors += 1; 1091 hw->ep_shm_info[cur_epid].net_stats 1092 .rx_errors += 1; 1093 } else { 1094 memcpy(skb_put(skb, frame_len), 1095 frame, frame_len); 1096 skb->protocol = eth_type_trans(skb, netdev); 1097 skb->ip_summed = CHECKSUM_UNNECESSARY; 1098 1099 netif_receive_skb(skb); 1100 1101 work_done++; 1102 1103 adapter->stats64.rx_packets += 1; 1104 hw->ep_shm_info[cur_epid].net_stats 1105 .rx_packets += 1; 1106 adapter->stats64.rx_bytes += frame_len; 1107 hw->ep_shm_info[cur_epid].net_stats 1108 .rx_bytes += frame_len; 1109 1110 if (is_multicast_ether_addr( 1111 ((struct ethhdr *)frame)->h_dest)) { 1112 adapter->stats64.multicast += 1; 1113 hw->ep_shm_info[cur_epid].net_stats 1114 .multicast += 1; 1115 } 1116 } 1117 1118 fjes_rxframe_release(adapter, cur_epid); 1119 adapter->unset_rx_last = true; 1120 } else { 1121 break; 1122 } 1123 } 1124 1125 if (work_done < budget) { 1126 napi_complete(napi); 1127 1128 if (adapter->unset_rx_last) { 1129 adapter->rx_last_jiffies = jiffies; 1130 adapter->unset_rx_last = false; 1131 } 1132 1133 if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { 1134 napi_reschedule(napi); 1135 } else { 1136 spin_lock(&hw->rx_status_lock); 1137 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1138 if (epidx == hw->my_epid) 1139 continue; 1140 if (fjes_hw_get_partner_ep_status(hw, epidx) == 1141 EP_PARTNER_SHARED) 1142 adapter->hw.ep_shm_info[epidx].tx 1143 .info->v1i.rx_status &= 1144 ~FJES_RX_POLL_WORK; 1145 } 1146 spin_unlock(&hw->rx_status_lock); 1147 1148 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false); 1149 } 1150 } 1151 1152 return work_done; 1153 } 1154 1155 /* fjes_probe - Device Initialization Routine */ 1156 static int fjes_probe(struct platform_device *plat_dev) 1157 { 1158 struct fjes_adapter *adapter; 1159 struct net_device *netdev; 1160 struct resource *res; 1161 struct fjes_hw *hw; 1162 int err; 1163 1164 err = -ENOMEM; 1165 netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d", 1166 NET_NAME_UNKNOWN, fjes_netdev_setup, 1167 FJES_MAX_QUEUES); 1168 1169 if (!netdev) 1170 goto err_out; 1171 1172 SET_NETDEV_DEV(netdev, &plat_dev->dev); 1173 1174 dev_set_drvdata(&plat_dev->dev, netdev); 1175 adapter = netdev_priv(netdev); 1176 adapter->netdev = netdev; 1177 adapter->plat_dev = plat_dev; 1178 hw = &adapter->hw; 1179 hw->back = adapter; 1180 1181 /* setup the private structure */ 1182 err = fjes_sw_init(adapter); 1183 if (err) 1184 goto err_free_netdev; 1185 1186 INIT_WORK(&adapter->force_close_task, fjes_force_close_task); 1187 adapter->force_reset = false; 1188 adapter->open_guard = false; 1189 1190 adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx"); 1191 adapter->control_wq = create_workqueue(DRV_NAME "/control"); 1192 1193 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); 1194 INIT_WORK(&adapter->raise_intr_rxdata_task, 1195 fjes_raise_intr_rxdata_task); 1196 INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task); 1197 adapter->unshare_watch_bitmask = 0; 1198 1199 INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task); 1200 adapter->interrupt_watch_enable = false; 1201 1202 res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); 1203 hw->hw_res.start = res->start; 1204 hw->hw_res.size = resource_size(res); 1205 hw->hw_res.irq = platform_get_irq(plat_dev, 0); 1206 err = fjes_hw_init(&adapter->hw); 1207 if (err) 1208 goto err_free_netdev; 1209 1210 /* setup MAC address (02:00:00:00:00:[epid])*/ 1211 netdev->dev_addr[0] = 2; 1212 netdev->dev_addr[1] = 0; 1213 netdev->dev_addr[2] = 0; 1214 netdev->dev_addr[3] = 0; 1215 netdev->dev_addr[4] = 0; 1216 netdev->dev_addr[5] = hw->my_epid; /* EPID */ 1217 1218 err = register_netdev(netdev); 1219 if (err) 1220 goto err_hw_exit; 1221 1222 netif_carrier_off(netdev); 1223 1224 return 0; 1225 1226 err_hw_exit: 1227 fjes_hw_exit(&adapter->hw); 1228 err_free_netdev: 1229 free_netdev(netdev); 1230 err_out: 1231 return err; 1232 } 1233 1234 /* fjes_remove - Device Removal Routine */ 1235 static int fjes_remove(struct platform_device *plat_dev) 1236 { 1237 struct net_device *netdev = dev_get_drvdata(&plat_dev->dev); 1238 struct fjes_adapter *adapter = netdev_priv(netdev); 1239 struct fjes_hw *hw = &adapter->hw; 1240 1241 cancel_delayed_work_sync(&adapter->interrupt_watch_task); 1242 cancel_work_sync(&adapter->unshare_watch_task); 1243 cancel_work_sync(&adapter->raise_intr_rxdata_task); 1244 cancel_work_sync(&adapter->tx_stall_task); 1245 if (adapter->control_wq) 1246 destroy_workqueue(adapter->control_wq); 1247 if (adapter->txrx_wq) 1248 destroy_workqueue(adapter->txrx_wq); 1249 1250 unregister_netdev(netdev); 1251 1252 fjes_hw_exit(hw); 1253 1254 netif_napi_del(&adapter->napi); 1255 1256 free_netdev(netdev); 1257 1258 return 0; 1259 } 1260 1261 static int fjes_sw_init(struct fjes_adapter *adapter) 1262 { 1263 struct net_device *netdev = adapter->netdev; 1264 1265 netif_napi_add(netdev, &adapter->napi, fjes_poll, 64); 1266 1267 return 0; 1268 } 1269 1270 /* fjes_netdev_setup - netdevice initialization routine */ 1271 static void fjes_netdev_setup(struct net_device *netdev) 1272 { 1273 ether_setup(netdev); 1274 1275 netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL; 1276 netdev->netdev_ops = &fjes_netdev_ops; 1277 fjes_set_ethtool_ops(netdev); 1278 netdev->mtu = fjes_support_mtu[3]; 1279 netdev->flags |= IFF_BROADCAST; 1280 netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; 1281 } 1282 1283 static void fjes_irq_watch_task(struct work_struct *work) 1284 { 1285 struct fjes_adapter *adapter = container_of(to_delayed_work(work), 1286 struct fjes_adapter, interrupt_watch_task); 1287 1288 local_irq_disable(); 1289 fjes_intr(adapter->hw.hw_res.irq, adapter); 1290 local_irq_enable(); 1291 1292 if (fjes_rxframe_search_exist(adapter, 0) >= 0) 1293 napi_schedule(&adapter->napi); 1294 1295 if (adapter->interrupt_watch_enable) { 1296 if (!delayed_work_pending(&adapter->interrupt_watch_task)) 1297 queue_delayed_work(adapter->control_wq, 1298 &adapter->interrupt_watch_task, 1299 FJES_IRQ_WATCH_DELAY); 1300 } 1301 } 1302 1303 static void fjes_watch_unshare_task(struct work_struct *work) 1304 { 1305 struct fjes_adapter *adapter = 1306 container_of(work, struct fjes_adapter, unshare_watch_task); 1307 1308 struct net_device *netdev = adapter->netdev; 1309 struct fjes_hw *hw = &adapter->hw; 1310 1311 int unshare_watch, unshare_reserve; 1312 int max_epid, my_epid, epidx; 1313 int stop_req, stop_req_done; 1314 ulong unshare_watch_bitmask; 1315 unsigned long flags; 1316 int wait_time = 0; 1317 int is_shared; 1318 int ret; 1319 1320 my_epid = hw->my_epid; 1321 max_epid = hw->max_epid; 1322 1323 unshare_watch_bitmask = adapter->unshare_watch_bitmask; 1324 adapter->unshare_watch_bitmask = 0; 1325 1326 while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) && 1327 (wait_time < 3000)) { 1328 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1329 if (epidx == hw->my_epid) 1330 continue; 1331 1332 is_shared = fjes_hw_epid_is_shared(hw->hw_info.share, 1333 epidx); 1334 1335 stop_req = test_bit(epidx, &hw->txrx_stop_req_bit); 1336 1337 stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status & 1338 FJES_RX_STOP_REQ_DONE; 1339 1340 unshare_watch = test_bit(epidx, &unshare_watch_bitmask); 1341 1342 unshare_reserve = test_bit(epidx, 1343 &hw->hw_info.buffer_unshare_reserve_bit); 1344 1345 if ((!stop_req || 1346 (is_shared && (!is_shared || !stop_req_done))) && 1347 (is_shared || !unshare_watch || !unshare_reserve)) 1348 continue; 1349 1350 mutex_lock(&hw->hw_info.lock); 1351 ret = fjes_hw_unregister_buff_addr(hw, epidx); 1352 switch (ret) { 1353 case 0: 1354 break; 1355 case -ENOMSG: 1356 case -EBUSY: 1357 default: 1358 if (!work_pending( 1359 &adapter->force_close_task)) { 1360 adapter->force_reset = true; 1361 schedule_work( 1362 &adapter->force_close_task); 1363 } 1364 break; 1365 } 1366 mutex_unlock(&hw->hw_info.lock); 1367 1368 spin_lock_irqsave(&hw->rx_status_lock, flags); 1369 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, 1370 netdev->dev_addr, netdev->mtu); 1371 spin_unlock_irqrestore(&hw->rx_status_lock, flags); 1372 1373 clear_bit(epidx, &hw->txrx_stop_req_bit); 1374 clear_bit(epidx, &unshare_watch_bitmask); 1375 clear_bit(epidx, 1376 &hw->hw_info.buffer_unshare_reserve_bit); 1377 } 1378 1379 msleep(100); 1380 wait_time += 100; 1381 } 1382 1383 if (hw->hw_info.buffer_unshare_reserve_bit) { 1384 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1385 if (epidx == hw->my_epid) 1386 continue; 1387 1388 if (test_bit(epidx, 1389 &hw->hw_info.buffer_unshare_reserve_bit)) { 1390 mutex_lock(&hw->hw_info.lock); 1391 1392 ret = fjes_hw_unregister_buff_addr(hw, epidx); 1393 switch (ret) { 1394 case 0: 1395 break; 1396 case -ENOMSG: 1397 case -EBUSY: 1398 default: 1399 if (!work_pending( 1400 &adapter->force_close_task)) { 1401 adapter->force_reset = true; 1402 schedule_work( 1403 &adapter->force_close_task); 1404 } 1405 break; 1406 } 1407 mutex_unlock(&hw->hw_info.lock); 1408 1409 spin_lock_irqsave(&hw->rx_status_lock, flags); 1410 fjes_hw_setup_epbuf( 1411 &hw->ep_shm_info[epidx].tx, 1412 netdev->dev_addr, netdev->mtu); 1413 spin_unlock_irqrestore(&hw->rx_status_lock, 1414 flags); 1415 1416 clear_bit(epidx, &hw->txrx_stop_req_bit); 1417 clear_bit(epidx, &unshare_watch_bitmask); 1418 clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); 1419 } 1420 1421 if (test_bit(epidx, &unshare_watch_bitmask)) { 1422 spin_lock_irqsave(&hw->rx_status_lock, flags); 1423 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= 1424 ~FJES_RX_STOP_REQ_DONE; 1425 spin_unlock_irqrestore(&hw->rx_status_lock, 1426 flags); 1427 } 1428 } 1429 } 1430 } 1431 1432 /* fjes_init_module - Driver Registration Routine */ 1433 static int __init fjes_init_module(void) 1434 { 1435 int result; 1436 1437 pr_info("%s - version %s - %s\n", 1438 fjes_driver_string, fjes_driver_version, fjes_copyright); 1439 1440 result = platform_driver_register(&fjes_driver); 1441 if (result < 0) 1442 return result; 1443 1444 result = acpi_bus_register_driver(&fjes_acpi_driver); 1445 if (result < 0) 1446 goto fail_acpi_driver; 1447 1448 return 0; 1449 1450 fail_acpi_driver: 1451 platform_driver_unregister(&fjes_driver); 1452 return result; 1453 } 1454 1455 module_init(fjes_init_module); 1456 1457 /* fjes_exit_module - Driver Exit Cleanup Routine */ 1458 static void __exit fjes_exit_module(void) 1459 { 1460 acpi_bus_unregister_driver(&fjes_acpi_driver); 1461 platform_driver_unregister(&fjes_driver); 1462 } 1463 1464 module_exit(fjes_exit_module); 1465