1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2015 Microchip Technology 4 */ 5 #include <linux/version.h> 6 #include <linux/module.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/usb.h> 11 #include <linux/crc32.h> 12 #include <linux/signal.h> 13 #include <linux/slab.h> 14 #include <linux/if_vlan.h> 15 #include <linux/uaccess.h> 16 #include <linux/list.h> 17 #include <linux/ip.h> 18 #include <linux/ipv6.h> 19 #include <linux/mdio.h> 20 #include <linux/phy.h> 21 #include <net/ip6_checksum.h> 22 #include <linux/interrupt.h> 23 #include <linux/irqdomain.h> 24 #include <linux/irq.h> 25 #include <linux/irqchip/chained_irq.h> 26 #include <linux/microchipphy.h> 27 #include <linux/phy_fixed.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include "lan78xx.h" 31 32 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" 33 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" 34 #define DRIVER_NAME "lan78xx" 35 36 #define TX_TIMEOUT_JIFFIES (5 * HZ) 37 #define THROTTLE_JIFFIES (HZ / 8) 38 #define UNLINK_TIMEOUT_MS 3 39 40 #define RX_MAX_QUEUE_MEMORY (60 * 1518) 41 42 #define SS_USB_PKT_SIZE (1024) 43 #define HS_USB_PKT_SIZE (512) 44 #define FS_USB_PKT_SIZE (64) 45 46 #define MAX_RX_FIFO_SIZE (12 * 1024) 47 #define MAX_TX_FIFO_SIZE (12 * 1024) 48 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE) 49 #define DEFAULT_BULK_IN_DELAY (0x0800) 50 #define MAX_SINGLE_PACKET_SIZE (9000) 51 #define DEFAULT_TX_CSUM_ENABLE (true) 52 #define DEFAULT_RX_CSUM_ENABLE (true) 53 #define DEFAULT_TSO_CSUM_ENABLE (true) 54 #define DEFAULT_VLAN_FILTER_ENABLE (true) 55 #define DEFAULT_VLAN_RX_OFFLOAD (true) 56 #define TX_OVERHEAD (8) 57 #define RXW_PADDING 2 58 59 #define LAN78XX_USB_VENDOR_ID (0x0424) 60 #define LAN7800_USB_PRODUCT_ID (0x7800) 61 #define LAN7850_USB_PRODUCT_ID (0x7850) 62 #define LAN7801_USB_PRODUCT_ID (0x7801) 63 #define LAN78XX_EEPROM_MAGIC (0x78A5) 64 #define LAN78XX_OTP_MAGIC (0x78F3) 65 66 #define MII_READ 1 67 #define MII_WRITE 0 68 69 #define EEPROM_INDICATOR (0xA5) 70 #define EEPROM_MAC_OFFSET (0x01) 71 #define MAX_EEPROM_SIZE 512 72 #define OTP_INDICATOR_1 (0xF3) 73 #define OTP_INDICATOR_2 (0xF7) 74 75 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \ 76 WAKE_MCAST | WAKE_BCAST | \ 77 WAKE_ARP | WAKE_MAGIC) 78 79 /* USB related defines */ 80 #define BULK_IN_PIPE 1 81 #define BULK_OUT_PIPE 2 82 83 /* default autosuspend delay (mSec)*/ 84 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000) 85 86 /* statistic update interval (mSec) */ 87 #define STAT_UPDATE_TIMER (1 * 1000) 88 89 /* defines interrupts from interrupt EP */ 90 #define MAX_INT_EP (32) 91 #define INT_EP_INTEP (31) 92 #define INT_EP_OTP_WR_DONE (28) 93 #define INT_EP_EEE_TX_LPI_START (26) 94 #define INT_EP_EEE_TX_LPI_STOP (25) 95 #define INT_EP_EEE_RX_LPI (24) 96 #define INT_EP_MAC_RESET_TIMEOUT (23) 97 #define INT_EP_RDFO (22) 98 #define INT_EP_TXE (21) 99 #define INT_EP_USB_STATUS (20) 100 #define INT_EP_TX_DIS (19) 101 #define INT_EP_RX_DIS (18) 102 #define INT_EP_PHY (17) 103 #define INT_EP_DP (16) 104 #define INT_EP_MAC_ERR (15) 105 #define INT_EP_TDFU (14) 106 #define INT_EP_TDFO (13) 107 #define INT_EP_UTX (12) 108 #define INT_EP_GPIO_11 (11) 109 #define INT_EP_GPIO_10 (10) 110 #define INT_EP_GPIO_9 (9) 111 #define INT_EP_GPIO_8 (8) 112 #define INT_EP_GPIO_7 (7) 113 #define INT_EP_GPIO_6 (6) 114 #define INT_EP_GPIO_5 (5) 115 #define INT_EP_GPIO_4 (4) 116 #define INT_EP_GPIO_3 (3) 117 #define INT_EP_GPIO_2 (2) 118 #define INT_EP_GPIO_1 (1) 119 #define INT_EP_GPIO_0 (0) 120 121 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = { 122 "RX FCS Errors", 123 "RX Alignment Errors", 124 "Rx Fragment Errors", 125 "RX Jabber Errors", 126 "RX Undersize Frame Errors", 127 "RX Oversize Frame Errors", 128 "RX Dropped Frames", 129 "RX Unicast Byte Count", 130 "RX Broadcast Byte Count", 131 "RX Multicast Byte Count", 132 "RX Unicast Frames", 133 "RX Broadcast Frames", 134 "RX Multicast Frames", 135 "RX Pause Frames", 136 "RX 64 Byte Frames", 137 "RX 65 - 127 Byte Frames", 138 "RX 128 - 255 Byte Frames", 139 "RX 256 - 511 Bytes Frames", 140 "RX 512 - 1023 Byte Frames", 141 "RX 1024 - 1518 Byte Frames", 142 "RX Greater 1518 Byte Frames", 143 "EEE RX LPI Transitions", 144 "EEE RX LPI Time", 145 "TX FCS Errors", 146 "TX Excess Deferral Errors", 147 "TX Carrier Errors", 148 "TX Bad Byte Count", 149 "TX Single Collisions", 150 "TX Multiple Collisions", 151 "TX Excessive Collision", 152 "TX Late Collisions", 153 "TX Unicast Byte Count", 154 "TX Broadcast Byte Count", 155 "TX Multicast Byte Count", 156 "TX Unicast Frames", 157 "TX Broadcast Frames", 158 "TX Multicast Frames", 159 "TX Pause Frames", 160 "TX 64 Byte Frames", 161 "TX 65 - 127 Byte Frames", 162 "TX 128 - 255 Byte Frames", 163 "TX 256 - 511 Bytes Frames", 164 "TX 512 - 1023 Byte Frames", 165 "TX 1024 - 1518 Byte Frames", 166 "TX Greater 1518 Byte Frames", 167 "EEE TX LPI Transitions", 168 "EEE TX LPI Time", 169 }; 170 171 struct lan78xx_statstage { 172 u32 rx_fcs_errors; 173 u32 rx_alignment_errors; 174 u32 rx_fragment_errors; 175 u32 rx_jabber_errors; 176 u32 rx_undersize_frame_errors; 177 u32 rx_oversize_frame_errors; 178 u32 rx_dropped_frames; 179 u32 rx_unicast_byte_count; 180 u32 rx_broadcast_byte_count; 181 u32 rx_multicast_byte_count; 182 u32 rx_unicast_frames; 183 u32 rx_broadcast_frames; 184 u32 rx_multicast_frames; 185 u32 rx_pause_frames; 186 u32 rx_64_byte_frames; 187 u32 rx_65_127_byte_frames; 188 u32 rx_128_255_byte_frames; 189 u32 rx_256_511_bytes_frames; 190 u32 rx_512_1023_byte_frames; 191 u32 rx_1024_1518_byte_frames; 192 u32 rx_greater_1518_byte_frames; 193 u32 eee_rx_lpi_transitions; 194 u32 eee_rx_lpi_time; 195 u32 tx_fcs_errors; 196 u32 tx_excess_deferral_errors; 197 u32 tx_carrier_errors; 198 u32 tx_bad_byte_count; 199 u32 tx_single_collisions; 200 u32 tx_multiple_collisions; 201 u32 tx_excessive_collision; 202 u32 tx_late_collisions; 203 u32 tx_unicast_byte_count; 204 u32 tx_broadcast_byte_count; 205 u32 tx_multicast_byte_count; 206 u32 tx_unicast_frames; 207 u32 tx_broadcast_frames; 208 u32 tx_multicast_frames; 209 u32 tx_pause_frames; 210 u32 tx_64_byte_frames; 211 u32 tx_65_127_byte_frames; 212 u32 tx_128_255_byte_frames; 213 u32 tx_256_511_bytes_frames; 214 u32 tx_512_1023_byte_frames; 215 u32 tx_1024_1518_byte_frames; 216 u32 tx_greater_1518_byte_frames; 217 u32 eee_tx_lpi_transitions; 218 u32 eee_tx_lpi_time; 219 }; 220 221 struct lan78xx_statstage64 { 222 u64 rx_fcs_errors; 223 u64 rx_alignment_errors; 224 u64 rx_fragment_errors; 225 u64 rx_jabber_errors; 226 u64 rx_undersize_frame_errors; 227 u64 rx_oversize_frame_errors; 228 u64 rx_dropped_frames; 229 u64 rx_unicast_byte_count; 230 u64 rx_broadcast_byte_count; 231 u64 rx_multicast_byte_count; 232 u64 rx_unicast_frames; 233 u64 rx_broadcast_frames; 234 u64 rx_multicast_frames; 235 u64 rx_pause_frames; 236 u64 rx_64_byte_frames; 237 u64 rx_65_127_byte_frames; 238 u64 rx_128_255_byte_frames; 239 u64 rx_256_511_bytes_frames; 240 u64 rx_512_1023_byte_frames; 241 u64 rx_1024_1518_byte_frames; 242 u64 rx_greater_1518_byte_frames; 243 u64 eee_rx_lpi_transitions; 244 u64 eee_rx_lpi_time; 245 u64 tx_fcs_errors; 246 u64 tx_excess_deferral_errors; 247 u64 tx_carrier_errors; 248 u64 tx_bad_byte_count; 249 u64 tx_single_collisions; 250 u64 tx_multiple_collisions; 251 u64 tx_excessive_collision; 252 u64 tx_late_collisions; 253 u64 tx_unicast_byte_count; 254 u64 tx_broadcast_byte_count; 255 u64 tx_multicast_byte_count; 256 u64 tx_unicast_frames; 257 u64 tx_broadcast_frames; 258 u64 tx_multicast_frames; 259 u64 tx_pause_frames; 260 u64 tx_64_byte_frames; 261 u64 tx_65_127_byte_frames; 262 u64 tx_128_255_byte_frames; 263 u64 tx_256_511_bytes_frames; 264 u64 tx_512_1023_byte_frames; 265 u64 tx_1024_1518_byte_frames; 266 u64 tx_greater_1518_byte_frames; 267 u64 eee_tx_lpi_transitions; 268 u64 eee_tx_lpi_time; 269 }; 270 271 static u32 lan78xx_regs[] = { 272 ID_REV, 273 INT_STS, 274 HW_CFG, 275 PMT_CTL, 276 E2P_CMD, 277 E2P_DATA, 278 USB_STATUS, 279 VLAN_TYPE, 280 MAC_CR, 281 MAC_RX, 282 MAC_TX, 283 FLOW, 284 ERR_STS, 285 MII_ACC, 286 MII_DATA, 287 EEE_TX_LPI_REQ_DLY, 288 EEE_TW_TX_SYS, 289 EEE_TX_LPI_REM_DLY, 290 WUCSR 291 }; 292 293 #define PHY_REG_SIZE (32 * sizeof(u32)) 294 295 struct lan78xx_net; 296 297 struct lan78xx_priv { 298 struct lan78xx_net *dev; 299 u32 rfe_ctl; 300 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */ 301 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */ 302 u32 vlan_table[DP_SEL_VHF_VLAN_LEN]; 303 struct mutex dataport_mutex; /* for dataport access */ 304 spinlock_t rfe_ctl_lock; /* for rfe register access */ 305 struct work_struct set_multicast; 306 struct work_struct set_vlan; 307 u32 wol; 308 }; 309 310 enum skb_state { 311 illegal = 0, 312 tx_start, 313 tx_done, 314 rx_start, 315 rx_done, 316 rx_cleanup, 317 unlink_start 318 }; 319 320 struct skb_data { /* skb->cb is one of these */ 321 struct urb *urb; 322 struct lan78xx_net *dev; 323 enum skb_state state; 324 size_t length; 325 int num_of_packet; 326 }; 327 328 struct usb_context { 329 struct usb_ctrlrequest req; 330 struct lan78xx_net *dev; 331 }; 332 333 #define EVENT_TX_HALT 0 334 #define EVENT_RX_HALT 1 335 #define EVENT_RX_MEMORY 2 336 #define EVENT_STS_SPLIT 3 337 #define EVENT_LINK_RESET 4 338 #define EVENT_RX_PAUSED 5 339 #define EVENT_DEV_WAKING 6 340 #define EVENT_DEV_ASLEEP 7 341 #define EVENT_DEV_OPEN 8 342 #define EVENT_STAT_UPDATE 9 343 344 struct statstage { 345 struct mutex access_lock; /* for stats access */ 346 struct lan78xx_statstage saved; 347 struct lan78xx_statstage rollover_count; 348 struct lan78xx_statstage rollover_max; 349 struct lan78xx_statstage64 curr_stat; 350 }; 351 352 struct irq_domain_data { 353 struct irq_domain *irqdomain; 354 unsigned int phyirq; 355 struct irq_chip *irqchip; 356 irq_flow_handler_t irq_handler; 357 u32 irqenable; 358 struct mutex irq_lock; /* for irq bus access */ 359 }; 360 361 struct lan78xx_net { 362 struct net_device *net; 363 struct usb_device *udev; 364 struct usb_interface *intf; 365 void *driver_priv; 366 367 int rx_qlen; 368 int tx_qlen; 369 struct sk_buff_head rxq; 370 struct sk_buff_head txq; 371 struct sk_buff_head done; 372 struct sk_buff_head rxq_pause; 373 struct sk_buff_head txq_pend; 374 375 struct tasklet_struct bh; 376 struct delayed_work wq; 377 378 struct usb_host_endpoint *ep_blkin; 379 struct usb_host_endpoint *ep_blkout; 380 struct usb_host_endpoint *ep_intr; 381 382 int msg_enable; 383 384 struct urb *urb_intr; 385 struct usb_anchor deferred; 386 387 struct mutex phy_mutex; /* for phy access */ 388 unsigned pipe_in, pipe_out, pipe_intr; 389 390 u32 hard_mtu; /* count any extra framing */ 391 size_t rx_urb_size; /* size for rx urbs */ 392 393 unsigned long flags; 394 395 wait_queue_head_t *wait; 396 unsigned char suspend_count; 397 398 unsigned maxpacket; 399 struct timer_list delay; 400 struct timer_list stat_monitor; 401 402 unsigned long data[5]; 403 404 int link_on; 405 u8 mdix_ctrl; 406 407 u32 chipid; 408 u32 chiprev; 409 struct mii_bus *mdiobus; 410 phy_interface_t interface; 411 412 int fc_autoneg; 413 u8 fc_request_control; 414 415 int delta; 416 struct statstage stats; 417 418 struct irq_domain_data domain_data; 419 }; 420 421 /* define external phy id */ 422 #define PHY_LAN8835 (0x0007C130) 423 #define PHY_KSZ9031RNX (0x00221620) 424 425 /* use ethtool to change the level for any given device */ 426 static int msg_level = -1; 427 module_param(msg_level, int, 0); 428 MODULE_PARM_DESC(msg_level, "Override default message level"); 429 430 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) 431 { 432 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); 433 int ret; 434 435 if (!buf) 436 return -ENOMEM; 437 438 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 439 USB_VENDOR_REQUEST_READ_REGISTER, 440 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 441 0, index, buf, 4, USB_CTRL_GET_TIMEOUT); 442 if (likely(ret >= 0)) { 443 le32_to_cpus(buf); 444 *data = *buf; 445 } else { 446 netdev_warn(dev->net, 447 "Failed to read register index 0x%08x. ret = %d", 448 index, ret); 449 } 450 451 kfree(buf); 452 453 return ret; 454 } 455 456 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) 457 { 458 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); 459 int ret; 460 461 if (!buf) 462 return -ENOMEM; 463 464 *buf = data; 465 cpu_to_le32s(buf); 466 467 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 468 USB_VENDOR_REQUEST_WRITE_REGISTER, 469 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 470 0, index, buf, 4, USB_CTRL_SET_TIMEOUT); 471 if (unlikely(ret < 0)) { 472 netdev_warn(dev->net, 473 "Failed to write register index 0x%08x. ret = %d", 474 index, ret); 475 } 476 477 kfree(buf); 478 479 return ret; 480 } 481 482 static int lan78xx_read_stats(struct lan78xx_net *dev, 483 struct lan78xx_statstage *data) 484 { 485 int ret = 0; 486 int i; 487 struct lan78xx_statstage *stats; 488 u32 *src; 489 u32 *dst; 490 491 stats = kmalloc(sizeof(*stats), GFP_KERNEL); 492 if (!stats) 493 return -ENOMEM; 494 495 ret = usb_control_msg(dev->udev, 496 usb_rcvctrlpipe(dev->udev, 0), 497 USB_VENDOR_REQUEST_GET_STATS, 498 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 499 0, 500 0, 501 (void *)stats, 502 sizeof(*stats), 503 USB_CTRL_SET_TIMEOUT); 504 if (likely(ret >= 0)) { 505 src = (u32 *)stats; 506 dst = (u32 *)data; 507 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) { 508 le32_to_cpus(&src[i]); 509 dst[i] = src[i]; 510 } 511 } else { 512 netdev_warn(dev->net, 513 "Failed to read stat ret = 0x%x", ret); 514 } 515 516 kfree(stats); 517 518 return ret; 519 } 520 521 #define check_counter_rollover(struct1, dev_stats, member) { \ 522 if (struct1->member < dev_stats.saved.member) \ 523 dev_stats.rollover_count.member++; \ 524 } 525 526 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev, 527 struct lan78xx_statstage *stats) 528 { 529 check_counter_rollover(stats, dev->stats, rx_fcs_errors); 530 check_counter_rollover(stats, dev->stats, rx_alignment_errors); 531 check_counter_rollover(stats, dev->stats, rx_fragment_errors); 532 check_counter_rollover(stats, dev->stats, rx_jabber_errors); 533 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors); 534 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors); 535 check_counter_rollover(stats, dev->stats, rx_dropped_frames); 536 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count); 537 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count); 538 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count); 539 check_counter_rollover(stats, dev->stats, rx_unicast_frames); 540 check_counter_rollover(stats, dev->stats, rx_broadcast_frames); 541 check_counter_rollover(stats, dev->stats, rx_multicast_frames); 542 check_counter_rollover(stats, dev->stats, rx_pause_frames); 543 check_counter_rollover(stats, dev->stats, rx_64_byte_frames); 544 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames); 545 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames); 546 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames); 547 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames); 548 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames); 549 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames); 550 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions); 551 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time); 552 check_counter_rollover(stats, dev->stats, tx_fcs_errors); 553 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors); 554 check_counter_rollover(stats, dev->stats, tx_carrier_errors); 555 check_counter_rollover(stats, dev->stats, tx_bad_byte_count); 556 check_counter_rollover(stats, dev->stats, tx_single_collisions); 557 check_counter_rollover(stats, dev->stats, tx_multiple_collisions); 558 check_counter_rollover(stats, dev->stats, tx_excessive_collision); 559 check_counter_rollover(stats, dev->stats, tx_late_collisions); 560 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count); 561 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count); 562 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count); 563 check_counter_rollover(stats, dev->stats, tx_unicast_frames); 564 check_counter_rollover(stats, dev->stats, tx_broadcast_frames); 565 check_counter_rollover(stats, dev->stats, tx_multicast_frames); 566 check_counter_rollover(stats, dev->stats, tx_pause_frames); 567 check_counter_rollover(stats, dev->stats, tx_64_byte_frames); 568 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames); 569 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames); 570 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames); 571 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames); 572 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames); 573 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames); 574 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions); 575 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time); 576 577 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage)); 578 } 579 580 static void lan78xx_update_stats(struct lan78xx_net *dev) 581 { 582 u32 *p, *count, *max; 583 u64 *data; 584 int i; 585 struct lan78xx_statstage lan78xx_stats; 586 587 if (usb_autopm_get_interface(dev->intf) < 0) 588 return; 589 590 p = (u32 *)&lan78xx_stats; 591 count = (u32 *)&dev->stats.rollover_count; 592 max = (u32 *)&dev->stats.rollover_max; 593 data = (u64 *)&dev->stats.curr_stat; 594 595 mutex_lock(&dev->stats.access_lock); 596 597 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0) 598 lan78xx_check_stat_rollover(dev, &lan78xx_stats); 599 600 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++) 601 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1)); 602 603 mutex_unlock(&dev->stats.access_lock); 604 605 usb_autopm_put_interface(dev->intf); 606 } 607 608 /* Loop until the read is completed with timeout called with phy_mutex held */ 609 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) 610 { 611 unsigned long start_time = jiffies; 612 u32 val; 613 int ret; 614 615 do { 616 ret = lan78xx_read_reg(dev, MII_ACC, &val); 617 if (unlikely(ret < 0)) 618 return -EIO; 619 620 if (!(val & MII_ACC_MII_BUSY_)) 621 return 0; 622 } while (!time_after(jiffies, start_time + HZ)); 623 624 return -EIO; 625 } 626 627 static inline u32 mii_access(int id, int index, int read) 628 { 629 u32 ret; 630 631 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_; 632 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_; 633 if (read) 634 ret |= MII_ACC_MII_READ_; 635 else 636 ret |= MII_ACC_MII_WRITE_; 637 ret |= MII_ACC_MII_BUSY_; 638 639 return ret; 640 } 641 642 static int lan78xx_wait_eeprom(struct lan78xx_net *dev) 643 { 644 unsigned long start_time = jiffies; 645 u32 val; 646 int ret; 647 648 do { 649 ret = lan78xx_read_reg(dev, E2P_CMD, &val); 650 if (unlikely(ret < 0)) 651 return -EIO; 652 653 if (!(val & E2P_CMD_EPC_BUSY_) || 654 (val & E2P_CMD_EPC_TIMEOUT_)) 655 break; 656 usleep_range(40, 100); 657 } while (!time_after(jiffies, start_time + HZ)); 658 659 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { 660 netdev_warn(dev->net, "EEPROM read operation timeout"); 661 return -EIO; 662 } 663 664 return 0; 665 } 666 667 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev) 668 { 669 unsigned long start_time = jiffies; 670 u32 val; 671 int ret; 672 673 do { 674 ret = lan78xx_read_reg(dev, E2P_CMD, &val); 675 if (unlikely(ret < 0)) 676 return -EIO; 677 678 if (!(val & E2P_CMD_EPC_BUSY_)) 679 return 0; 680 681 usleep_range(40, 100); 682 } while (!time_after(jiffies, start_time + HZ)); 683 684 netdev_warn(dev->net, "EEPROM is busy"); 685 return -EIO; 686 } 687 688 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, 689 u32 length, u8 *data) 690 { 691 u32 val; 692 u32 saved; 693 int i, ret; 694 int retval; 695 696 /* depends on chip, some EEPROM pins are muxed with LED function. 697 * disable & restore LED function to access EEPROM. 698 */ 699 ret = lan78xx_read_reg(dev, HW_CFG, &val); 700 saved = val; 701 if (dev->chipid == ID_REV_CHIP_ID_7800_) { 702 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); 703 ret = lan78xx_write_reg(dev, HW_CFG, val); 704 } 705 706 retval = lan78xx_eeprom_confirm_not_busy(dev); 707 if (retval) 708 return retval; 709 710 for (i = 0; i < length; i++) { 711 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; 712 val |= (offset & E2P_CMD_EPC_ADDR_MASK_); 713 ret = lan78xx_write_reg(dev, E2P_CMD, val); 714 if (unlikely(ret < 0)) { 715 retval = -EIO; 716 goto exit; 717 } 718 719 retval = lan78xx_wait_eeprom(dev); 720 if (retval < 0) 721 goto exit; 722 723 ret = lan78xx_read_reg(dev, E2P_DATA, &val); 724 if (unlikely(ret < 0)) { 725 retval = -EIO; 726 goto exit; 727 } 728 729 data[i] = val & 0xFF; 730 offset++; 731 } 732 733 retval = 0; 734 exit: 735 if (dev->chipid == ID_REV_CHIP_ID_7800_) 736 ret = lan78xx_write_reg(dev, HW_CFG, saved); 737 738 return retval; 739 } 740 741 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, 742 u32 length, u8 *data) 743 { 744 u8 sig; 745 int ret; 746 747 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); 748 if ((ret == 0) && (sig == EEPROM_INDICATOR)) 749 ret = lan78xx_read_raw_eeprom(dev, offset, length, data); 750 else 751 ret = -EINVAL; 752 753 return ret; 754 } 755 756 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, 757 u32 length, u8 *data) 758 { 759 u32 val; 760 u32 saved; 761 int i, ret; 762 int retval; 763 764 /* depends on chip, some EEPROM pins are muxed with LED function. 765 * disable & restore LED function to access EEPROM. 766 */ 767 ret = lan78xx_read_reg(dev, HW_CFG, &val); 768 saved = val; 769 if (dev->chipid == ID_REV_CHIP_ID_7800_) { 770 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); 771 ret = lan78xx_write_reg(dev, HW_CFG, val); 772 } 773 774 retval = lan78xx_eeprom_confirm_not_busy(dev); 775 if (retval) 776 goto exit; 777 778 /* Issue write/erase enable command */ 779 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; 780 ret = lan78xx_write_reg(dev, E2P_CMD, val); 781 if (unlikely(ret < 0)) { 782 retval = -EIO; 783 goto exit; 784 } 785 786 retval = lan78xx_wait_eeprom(dev); 787 if (retval < 0) 788 goto exit; 789 790 for (i = 0; i < length; i++) { 791 /* Fill data register */ 792 val = data[i]; 793 ret = lan78xx_write_reg(dev, E2P_DATA, val); 794 if (ret < 0) { 795 retval = -EIO; 796 goto exit; 797 } 798 799 /* Send "write" command */ 800 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; 801 val |= (offset & E2P_CMD_EPC_ADDR_MASK_); 802 ret = lan78xx_write_reg(dev, E2P_CMD, val); 803 if (ret < 0) { 804 retval = -EIO; 805 goto exit; 806 } 807 808 retval = lan78xx_wait_eeprom(dev); 809 if (retval < 0) 810 goto exit; 811 812 offset++; 813 } 814 815 retval = 0; 816 exit: 817 if (dev->chipid == ID_REV_CHIP_ID_7800_) 818 ret = lan78xx_write_reg(dev, HW_CFG, saved); 819 820 return retval; 821 } 822 823 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, 824 u32 length, u8 *data) 825 { 826 int i; 827 int ret; 828 u32 buf; 829 unsigned long timeout; 830 831 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); 832 833 if (buf & OTP_PWR_DN_PWRDN_N_) { 834 /* clear it and wait to be cleared */ 835 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); 836 837 timeout = jiffies + HZ; 838 do { 839 usleep_range(1, 10); 840 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); 841 if (time_after(jiffies, timeout)) { 842 netdev_warn(dev->net, 843 "timeout on OTP_PWR_DN"); 844 return -EIO; 845 } 846 } while (buf & OTP_PWR_DN_PWRDN_N_); 847 } 848 849 for (i = 0; i < length; i++) { 850 ret = lan78xx_write_reg(dev, OTP_ADDR1, 851 ((offset + i) >> 8) & OTP_ADDR1_15_11); 852 ret = lan78xx_write_reg(dev, OTP_ADDR2, 853 ((offset + i) & OTP_ADDR2_10_3)); 854 855 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); 856 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); 857 858 timeout = jiffies + HZ; 859 do { 860 udelay(1); 861 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); 862 if (time_after(jiffies, timeout)) { 863 netdev_warn(dev->net, 864 "timeout on OTP_STATUS"); 865 return -EIO; 866 } 867 } while (buf & OTP_STATUS_BUSY_); 868 869 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf); 870 871 data[i] = (u8)(buf & 0xFF); 872 } 873 874 return 0; 875 } 876 877 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, 878 u32 length, u8 *data) 879 { 880 int i; 881 int ret; 882 u32 buf; 883 unsigned long timeout; 884 885 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); 886 887 if (buf & OTP_PWR_DN_PWRDN_N_) { 888 /* clear it and wait to be cleared */ 889 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); 890 891 timeout = jiffies + HZ; 892 do { 893 udelay(1); 894 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); 895 if (time_after(jiffies, timeout)) { 896 netdev_warn(dev->net, 897 "timeout on OTP_PWR_DN completion"); 898 return -EIO; 899 } 900 } while (buf & OTP_PWR_DN_PWRDN_N_); 901 } 902 903 /* set to BYTE program mode */ 904 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); 905 906 for (i = 0; i < length; i++) { 907 ret = lan78xx_write_reg(dev, OTP_ADDR1, 908 ((offset + i) >> 8) & OTP_ADDR1_15_11); 909 ret = lan78xx_write_reg(dev, OTP_ADDR2, 910 ((offset + i) & OTP_ADDR2_10_3)); 911 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); 912 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); 913 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); 914 915 timeout = jiffies + HZ; 916 do { 917 udelay(1); 918 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); 919 if (time_after(jiffies, timeout)) { 920 netdev_warn(dev->net, 921 "Timeout on OTP_STATUS completion"); 922 return -EIO; 923 } 924 } while (buf & OTP_STATUS_BUSY_); 925 } 926 927 return 0; 928 } 929 930 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, 931 u32 length, u8 *data) 932 { 933 u8 sig; 934 int ret; 935 936 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); 937 938 if (ret == 0) { 939 if (sig == OTP_INDICATOR_1) 940 offset = offset; 941 else if (sig == OTP_INDICATOR_2) 942 offset += 0x100; 943 else 944 ret = -EINVAL; 945 if (!ret) 946 ret = lan78xx_read_raw_otp(dev, offset, length, data); 947 } 948 949 return ret; 950 } 951 952 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) 953 { 954 int i, ret; 955 956 for (i = 0; i < 100; i++) { 957 u32 dp_sel; 958 959 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); 960 if (unlikely(ret < 0)) 961 return -EIO; 962 963 if (dp_sel & DP_SEL_DPRDY_) 964 return 0; 965 966 usleep_range(40, 100); 967 } 968 969 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out"); 970 971 return -EIO; 972 } 973 974 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select, 975 u32 addr, u32 length, u32 *buf) 976 { 977 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 978 u32 dp_sel; 979 int i, ret; 980 981 if (usb_autopm_get_interface(dev->intf) < 0) 982 return 0; 983 984 mutex_lock(&pdata->dataport_mutex); 985 986 ret = lan78xx_dataport_wait_not_busy(dev); 987 if (ret < 0) 988 goto done; 989 990 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); 991 992 dp_sel &= ~DP_SEL_RSEL_MASK_; 993 dp_sel |= ram_select; 994 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel); 995 996 for (i = 0; i < length; i++) { 997 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i); 998 999 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]); 1000 1001 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_); 1002 1003 ret = lan78xx_dataport_wait_not_busy(dev); 1004 if (ret < 0) 1005 goto done; 1006 } 1007 1008 done: 1009 mutex_unlock(&pdata->dataport_mutex); 1010 usb_autopm_put_interface(dev->intf); 1011 1012 return ret; 1013 } 1014 1015 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata, 1016 int index, u8 addr[ETH_ALEN]) 1017 { 1018 u32 temp; 1019 1020 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) { 1021 temp = addr[3]; 1022 temp = addr[2] | (temp << 8); 1023 temp = addr[1] | (temp << 8); 1024 temp = addr[0] | (temp << 8); 1025 pdata->pfilter_table[index][1] = temp; 1026 temp = addr[5]; 1027 temp = addr[4] | (temp << 8); 1028 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_; 1029 pdata->pfilter_table[index][0] = temp; 1030 } 1031 } 1032 1033 /* returns hash bit number for given MAC address */ 1034 static inline u32 lan78xx_hash(char addr[ETH_ALEN]) 1035 { 1036 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; 1037 } 1038 1039 static void lan78xx_deferred_multicast_write(struct work_struct *param) 1040 { 1041 struct lan78xx_priv *pdata = 1042 container_of(param, struct lan78xx_priv, set_multicast); 1043 struct lan78xx_net *dev = pdata->dev; 1044 int i; 1045 int ret; 1046 1047 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", 1048 pdata->rfe_ctl); 1049 1050 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN, 1051 DP_SEL_VHF_HASH_LEN, pdata->mchash_table); 1052 1053 for (i = 1; i < NUM_OF_MAF; i++) { 1054 ret = lan78xx_write_reg(dev, MAF_HI(i), 0); 1055 ret = lan78xx_write_reg(dev, MAF_LO(i), 1056 pdata->pfilter_table[i][1]); 1057 ret = lan78xx_write_reg(dev, MAF_HI(i), 1058 pdata->pfilter_table[i][0]); 1059 } 1060 1061 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 1062 } 1063 1064 static void lan78xx_set_multicast(struct net_device *netdev) 1065 { 1066 struct lan78xx_net *dev = netdev_priv(netdev); 1067 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 1068 unsigned long flags; 1069 int i; 1070 1071 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); 1072 1073 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ | 1074 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); 1075 1076 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) 1077 pdata->mchash_table[i] = 0; 1078 /* pfilter_table[0] has own HW address */ 1079 for (i = 1; i < NUM_OF_MAF; i++) { 1080 pdata->pfilter_table[i][0] = 1081 pdata->pfilter_table[i][1] = 0; 1082 } 1083 1084 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_; 1085 1086 if (dev->net->flags & IFF_PROMISC) { 1087 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); 1088 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_; 1089 } else { 1090 if (dev->net->flags & IFF_ALLMULTI) { 1091 netif_dbg(dev, drv, dev->net, 1092 "receive all multicast enabled"); 1093 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_; 1094 } 1095 } 1096 1097 if (netdev_mc_count(dev->net)) { 1098 struct netdev_hw_addr *ha; 1099 int i; 1100 1101 netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); 1102 1103 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_; 1104 1105 i = 1; 1106 netdev_for_each_mc_addr(ha, netdev) { 1107 /* set first 32 into Perfect Filter */ 1108 if (i < 33) { 1109 lan78xx_set_addr_filter(pdata, i, ha->addr); 1110 } else { 1111 u32 bitnum = lan78xx_hash(ha->addr); 1112 1113 pdata->mchash_table[bitnum / 32] |= 1114 (1 << (bitnum % 32)); 1115 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_; 1116 } 1117 i++; 1118 } 1119 } 1120 1121 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); 1122 1123 /* defer register writes to a sleepable context */ 1124 schedule_work(&pdata->set_multicast); 1125 } 1126 1127 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, 1128 u16 lcladv, u16 rmtadv) 1129 { 1130 u32 flow = 0, fct_flow = 0; 1131 int ret; 1132 u8 cap; 1133 1134 if (dev->fc_autoneg) 1135 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1136 else 1137 cap = dev->fc_request_control; 1138 1139 if (cap & FLOW_CTRL_TX) 1140 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF); 1141 1142 if (cap & FLOW_CTRL_RX) 1143 flow |= FLOW_CR_RX_FCEN_; 1144 1145 if (dev->udev->speed == USB_SPEED_SUPER) 1146 fct_flow = 0x817; 1147 else if (dev->udev->speed == USB_SPEED_HIGH) 1148 fct_flow = 0x211; 1149 1150 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", 1151 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), 1152 (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); 1153 1154 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow); 1155 1156 /* threshold value should be set before enabling flow */ 1157 ret = lan78xx_write_reg(dev, FLOW, flow); 1158 1159 return 0; 1160 } 1161 1162 static int lan78xx_link_reset(struct lan78xx_net *dev) 1163 { 1164 struct phy_device *phydev = dev->net->phydev; 1165 struct ethtool_link_ksettings ecmd; 1166 int ladv, radv, ret; 1167 u32 buf; 1168 1169 /* clear LAN78xx interrupt status */ 1170 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); 1171 if (unlikely(ret < 0)) 1172 return -EIO; 1173 1174 phy_read_status(phydev); 1175 1176 if (!phydev->link && dev->link_on) { 1177 dev->link_on = false; 1178 1179 /* reset MAC */ 1180 ret = lan78xx_read_reg(dev, MAC_CR, &buf); 1181 if (unlikely(ret < 0)) 1182 return -EIO; 1183 buf |= MAC_CR_RST_; 1184 ret = lan78xx_write_reg(dev, MAC_CR, buf); 1185 if (unlikely(ret < 0)) 1186 return -EIO; 1187 1188 del_timer(&dev->stat_monitor); 1189 } else if (phydev->link && !dev->link_on) { 1190 dev->link_on = true; 1191 1192 phy_ethtool_ksettings_get(phydev, &ecmd); 1193 1194 if (dev->udev->speed == USB_SPEED_SUPER) { 1195 if (ecmd.base.speed == 1000) { 1196 /* disable U2 */ 1197 ret = lan78xx_read_reg(dev, USB_CFG1, &buf); 1198 buf &= ~USB_CFG1_DEV_U2_INIT_EN_; 1199 ret = lan78xx_write_reg(dev, USB_CFG1, buf); 1200 /* enable U1 */ 1201 ret = lan78xx_read_reg(dev, USB_CFG1, &buf); 1202 buf |= USB_CFG1_DEV_U1_INIT_EN_; 1203 ret = lan78xx_write_reg(dev, USB_CFG1, buf); 1204 } else { 1205 /* enable U1 & U2 */ 1206 ret = lan78xx_read_reg(dev, USB_CFG1, &buf); 1207 buf |= USB_CFG1_DEV_U2_INIT_EN_; 1208 buf |= USB_CFG1_DEV_U1_INIT_EN_; 1209 ret = lan78xx_write_reg(dev, USB_CFG1, buf); 1210 } 1211 } 1212 1213 ladv = phy_read(phydev, MII_ADVERTISE); 1214 if (ladv < 0) 1215 return ladv; 1216 1217 radv = phy_read(phydev, MII_LPA); 1218 if (radv < 0) 1219 return radv; 1220 1221 netif_dbg(dev, link, dev->net, 1222 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x", 1223 ecmd.base.speed, ecmd.base.duplex, ladv, radv); 1224 1225 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv, 1226 radv); 1227 1228 if (!timer_pending(&dev->stat_monitor)) { 1229 dev->delta = 1; 1230 mod_timer(&dev->stat_monitor, 1231 jiffies + STAT_UPDATE_TIMER); 1232 } 1233 1234 tasklet_schedule(&dev->bh); 1235 } 1236 1237 return ret; 1238 } 1239 1240 /* some work can't be done in tasklets, so we use keventd 1241 * 1242 * NOTE: annoying asymmetry: if it's active, schedule_work() fails, 1243 * but tasklet_schedule() doesn't. hope the failure is rare. 1244 */ 1245 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work) 1246 { 1247 set_bit(work, &dev->flags); 1248 if (!schedule_delayed_work(&dev->wq, 0)) 1249 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 1250 } 1251 1252 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) 1253 { 1254 u32 intdata; 1255 1256 if (urb->actual_length != 4) { 1257 netdev_warn(dev->net, 1258 "unexpected urb length %d", urb->actual_length); 1259 return; 1260 } 1261 1262 memcpy(&intdata, urb->transfer_buffer, 4); 1263 le32_to_cpus(&intdata); 1264 1265 if (intdata & INT_ENP_PHY_INT) { 1266 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); 1267 lan78xx_defer_kevent(dev, EVENT_LINK_RESET); 1268 1269 if (dev->domain_data.phyirq > 0) 1270 generic_handle_irq(dev->domain_data.phyirq); 1271 } else 1272 netdev_warn(dev->net, 1273 "unexpected interrupt: 0x%08x\n", intdata); 1274 } 1275 1276 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev) 1277 { 1278 return MAX_EEPROM_SIZE; 1279 } 1280 1281 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, 1282 struct ethtool_eeprom *ee, u8 *data) 1283 { 1284 struct lan78xx_net *dev = netdev_priv(netdev); 1285 int ret; 1286 1287 ret = usb_autopm_get_interface(dev->intf); 1288 if (ret) 1289 return ret; 1290 1291 ee->magic = LAN78XX_EEPROM_MAGIC; 1292 1293 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); 1294 1295 usb_autopm_put_interface(dev->intf); 1296 1297 return ret; 1298 } 1299 1300 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, 1301 struct ethtool_eeprom *ee, u8 *data) 1302 { 1303 struct lan78xx_net *dev = netdev_priv(netdev); 1304 int ret; 1305 1306 ret = usb_autopm_get_interface(dev->intf); 1307 if (ret) 1308 return ret; 1309 1310 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure 1311 * to load data from EEPROM 1312 */ 1313 if (ee->magic == LAN78XX_EEPROM_MAGIC) 1314 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); 1315 else if ((ee->magic == LAN78XX_OTP_MAGIC) && 1316 (ee->offset == 0) && 1317 (ee->len == 512) && 1318 (data[0] == OTP_INDICATOR_1)) 1319 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); 1320 1321 usb_autopm_put_interface(dev->intf); 1322 1323 return ret; 1324 } 1325 1326 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, 1327 u8 *data) 1328 { 1329 if (stringset == ETH_SS_STATS) 1330 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings)); 1331 } 1332 1333 static int lan78xx_get_sset_count(struct net_device *netdev, int sset) 1334 { 1335 if (sset == ETH_SS_STATS) 1336 return ARRAY_SIZE(lan78xx_gstrings); 1337 else 1338 return -EOPNOTSUPP; 1339 } 1340 1341 static void lan78xx_get_stats(struct net_device *netdev, 1342 struct ethtool_stats *stats, u64 *data) 1343 { 1344 struct lan78xx_net *dev = netdev_priv(netdev); 1345 1346 lan78xx_update_stats(dev); 1347 1348 mutex_lock(&dev->stats.access_lock); 1349 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat)); 1350 mutex_unlock(&dev->stats.access_lock); 1351 } 1352 1353 static void lan78xx_get_wol(struct net_device *netdev, 1354 struct ethtool_wolinfo *wol) 1355 { 1356 struct lan78xx_net *dev = netdev_priv(netdev); 1357 int ret; 1358 u32 buf; 1359 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 1360 1361 if (usb_autopm_get_interface(dev->intf) < 0) 1362 return; 1363 1364 ret = lan78xx_read_reg(dev, USB_CFG0, &buf); 1365 if (unlikely(ret < 0)) { 1366 wol->supported = 0; 1367 wol->wolopts = 0; 1368 } else { 1369 if (buf & USB_CFG_RMT_WKP_) { 1370 wol->supported = WAKE_ALL; 1371 wol->wolopts = pdata->wol; 1372 } else { 1373 wol->supported = 0; 1374 wol->wolopts = 0; 1375 } 1376 } 1377 1378 usb_autopm_put_interface(dev->intf); 1379 } 1380 1381 static int lan78xx_set_wol(struct net_device *netdev, 1382 struct ethtool_wolinfo *wol) 1383 { 1384 struct lan78xx_net *dev = netdev_priv(netdev); 1385 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 1386 int ret; 1387 1388 ret = usb_autopm_get_interface(dev->intf); 1389 if (ret < 0) 1390 return ret; 1391 1392 pdata->wol = 0; 1393 if (wol->wolopts & WAKE_UCAST) 1394 pdata->wol |= WAKE_UCAST; 1395 if (wol->wolopts & WAKE_MCAST) 1396 pdata->wol |= WAKE_MCAST; 1397 if (wol->wolopts & WAKE_BCAST) 1398 pdata->wol |= WAKE_BCAST; 1399 if (wol->wolopts & WAKE_MAGIC) 1400 pdata->wol |= WAKE_MAGIC; 1401 if (wol->wolopts & WAKE_PHY) 1402 pdata->wol |= WAKE_PHY; 1403 if (wol->wolopts & WAKE_ARP) 1404 pdata->wol |= WAKE_ARP; 1405 1406 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); 1407 1408 phy_ethtool_set_wol(netdev->phydev, wol); 1409 1410 usb_autopm_put_interface(dev->intf); 1411 1412 return ret; 1413 } 1414 1415 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata) 1416 { 1417 struct lan78xx_net *dev = netdev_priv(net); 1418 struct phy_device *phydev = net->phydev; 1419 int ret; 1420 u32 buf; 1421 1422 ret = usb_autopm_get_interface(dev->intf); 1423 if (ret < 0) 1424 return ret; 1425 1426 ret = phy_ethtool_get_eee(phydev, edata); 1427 if (ret < 0) 1428 goto exit; 1429 1430 ret = lan78xx_read_reg(dev, MAC_CR, &buf); 1431 if (buf & MAC_CR_EEE_EN_) { 1432 edata->eee_enabled = true; 1433 edata->eee_active = !!(edata->advertised & 1434 edata->lp_advertised); 1435 edata->tx_lpi_enabled = true; 1436 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ 1437 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf); 1438 edata->tx_lpi_timer = buf; 1439 } else { 1440 edata->eee_enabled = false; 1441 edata->eee_active = false; 1442 edata->tx_lpi_enabled = false; 1443 edata->tx_lpi_timer = 0; 1444 } 1445 1446 ret = 0; 1447 exit: 1448 usb_autopm_put_interface(dev->intf); 1449 1450 return ret; 1451 } 1452 1453 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata) 1454 { 1455 struct lan78xx_net *dev = netdev_priv(net); 1456 int ret; 1457 u32 buf; 1458 1459 ret = usb_autopm_get_interface(dev->intf); 1460 if (ret < 0) 1461 return ret; 1462 1463 if (edata->eee_enabled) { 1464 ret = lan78xx_read_reg(dev, MAC_CR, &buf); 1465 buf |= MAC_CR_EEE_EN_; 1466 ret = lan78xx_write_reg(dev, MAC_CR, buf); 1467 1468 phy_ethtool_set_eee(net->phydev, edata); 1469 1470 buf = (u32)edata->tx_lpi_timer; 1471 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf); 1472 } else { 1473 ret = lan78xx_read_reg(dev, MAC_CR, &buf); 1474 buf &= ~MAC_CR_EEE_EN_; 1475 ret = lan78xx_write_reg(dev, MAC_CR, buf); 1476 } 1477 1478 usb_autopm_put_interface(dev->intf); 1479 1480 return 0; 1481 } 1482 1483 static u32 lan78xx_get_link(struct net_device *net) 1484 { 1485 phy_read_status(net->phydev); 1486 1487 return net->phydev->link; 1488 } 1489 1490 static void lan78xx_get_drvinfo(struct net_device *net, 1491 struct ethtool_drvinfo *info) 1492 { 1493 struct lan78xx_net *dev = netdev_priv(net); 1494 1495 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); 1496 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); 1497 } 1498 1499 static u32 lan78xx_get_msglevel(struct net_device *net) 1500 { 1501 struct lan78xx_net *dev = netdev_priv(net); 1502 1503 return dev->msg_enable; 1504 } 1505 1506 static void lan78xx_set_msglevel(struct net_device *net, u32 level) 1507 { 1508 struct lan78xx_net *dev = netdev_priv(net); 1509 1510 dev->msg_enable = level; 1511 } 1512 1513 static int lan78xx_get_link_ksettings(struct net_device *net, 1514 struct ethtool_link_ksettings *cmd) 1515 { 1516 struct lan78xx_net *dev = netdev_priv(net); 1517 struct phy_device *phydev = net->phydev; 1518 int ret; 1519 1520 ret = usb_autopm_get_interface(dev->intf); 1521 if (ret < 0) 1522 return ret; 1523 1524 phy_ethtool_ksettings_get(phydev, cmd); 1525 1526 usb_autopm_put_interface(dev->intf); 1527 1528 return ret; 1529 } 1530 1531 static int lan78xx_set_link_ksettings(struct net_device *net, 1532 const struct ethtool_link_ksettings *cmd) 1533 { 1534 struct lan78xx_net *dev = netdev_priv(net); 1535 struct phy_device *phydev = net->phydev; 1536 int ret = 0; 1537 int temp; 1538 1539 ret = usb_autopm_get_interface(dev->intf); 1540 if (ret < 0) 1541 return ret; 1542 1543 /* change speed & duplex */ 1544 ret = phy_ethtool_ksettings_set(phydev, cmd); 1545 1546 if (!cmd->base.autoneg) { 1547 /* force link down */ 1548 temp = phy_read(phydev, MII_BMCR); 1549 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK); 1550 mdelay(1); 1551 phy_write(phydev, MII_BMCR, temp); 1552 } 1553 1554 usb_autopm_put_interface(dev->intf); 1555 1556 return ret; 1557 } 1558 1559 static void lan78xx_get_pause(struct net_device *net, 1560 struct ethtool_pauseparam *pause) 1561 { 1562 struct lan78xx_net *dev = netdev_priv(net); 1563 struct phy_device *phydev = net->phydev; 1564 struct ethtool_link_ksettings ecmd; 1565 1566 phy_ethtool_ksettings_get(phydev, &ecmd); 1567 1568 pause->autoneg = dev->fc_autoneg; 1569 1570 if (dev->fc_request_control & FLOW_CTRL_TX) 1571 pause->tx_pause = 1; 1572 1573 if (dev->fc_request_control & FLOW_CTRL_RX) 1574 pause->rx_pause = 1; 1575 } 1576 1577 static int lan78xx_set_pause(struct net_device *net, 1578 struct ethtool_pauseparam *pause) 1579 { 1580 struct lan78xx_net *dev = netdev_priv(net); 1581 struct phy_device *phydev = net->phydev; 1582 struct ethtool_link_ksettings ecmd; 1583 int ret; 1584 1585 phy_ethtool_ksettings_get(phydev, &ecmd); 1586 1587 if (pause->autoneg && !ecmd.base.autoneg) { 1588 ret = -EINVAL; 1589 goto exit; 1590 } 1591 1592 dev->fc_request_control = 0; 1593 if (pause->rx_pause) 1594 dev->fc_request_control |= FLOW_CTRL_RX; 1595 1596 if (pause->tx_pause) 1597 dev->fc_request_control |= FLOW_CTRL_TX; 1598 1599 if (ecmd.base.autoneg) { 1600 u32 mii_adv; 1601 u32 advertising; 1602 1603 ethtool_convert_link_mode_to_legacy_u32( 1604 &advertising, ecmd.link_modes.advertising); 1605 1606 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 1607 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); 1608 advertising |= mii_adv_to_ethtool_adv_t(mii_adv); 1609 1610 ethtool_convert_legacy_u32_to_link_mode( 1611 ecmd.link_modes.advertising, advertising); 1612 1613 phy_ethtool_ksettings_set(phydev, &ecmd); 1614 } 1615 1616 dev->fc_autoneg = pause->autoneg; 1617 1618 ret = 0; 1619 exit: 1620 return ret; 1621 } 1622 1623 static int lan78xx_get_regs_len(struct net_device *netdev) 1624 { 1625 if (!netdev->phydev) 1626 return (sizeof(lan78xx_regs)); 1627 else 1628 return (sizeof(lan78xx_regs) + PHY_REG_SIZE); 1629 } 1630 1631 static void 1632 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs, 1633 void *buf) 1634 { 1635 u32 *data = buf; 1636 int i, j; 1637 struct lan78xx_net *dev = netdev_priv(netdev); 1638 1639 /* Read Device/MAC registers */ 1640 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) 1641 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]); 1642 1643 if (!netdev->phydev) 1644 return; 1645 1646 /* Read PHY registers */ 1647 for (j = 0; j < 32; i++, j++) 1648 data[i] = phy_read(netdev->phydev, j); 1649 } 1650 1651 static const struct ethtool_ops lan78xx_ethtool_ops = { 1652 .get_link = lan78xx_get_link, 1653 .nway_reset = phy_ethtool_nway_reset, 1654 .get_drvinfo = lan78xx_get_drvinfo, 1655 .get_msglevel = lan78xx_get_msglevel, 1656 .set_msglevel = lan78xx_set_msglevel, 1657 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len, 1658 .get_eeprom = lan78xx_ethtool_get_eeprom, 1659 .set_eeprom = lan78xx_ethtool_set_eeprom, 1660 .get_ethtool_stats = lan78xx_get_stats, 1661 .get_sset_count = lan78xx_get_sset_count, 1662 .get_strings = lan78xx_get_strings, 1663 .get_wol = lan78xx_get_wol, 1664 .set_wol = lan78xx_set_wol, 1665 .get_eee = lan78xx_get_eee, 1666 .set_eee = lan78xx_set_eee, 1667 .get_pauseparam = lan78xx_get_pause, 1668 .set_pauseparam = lan78xx_set_pause, 1669 .get_link_ksettings = lan78xx_get_link_ksettings, 1670 .set_link_ksettings = lan78xx_set_link_ksettings, 1671 .get_regs_len = lan78xx_get_regs_len, 1672 .get_regs = lan78xx_get_regs, 1673 }; 1674 1675 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 1676 { 1677 if (!netif_running(netdev)) 1678 return -EINVAL; 1679 1680 return phy_mii_ioctl(netdev->phydev, rq, cmd); 1681 } 1682 1683 static void lan78xx_init_mac_address(struct lan78xx_net *dev) 1684 { 1685 u32 addr_lo, addr_hi; 1686 int ret; 1687 u8 addr[6]; 1688 1689 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); 1690 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); 1691 1692 addr[0] = addr_lo & 0xFF; 1693 addr[1] = (addr_lo >> 8) & 0xFF; 1694 addr[2] = (addr_lo >> 16) & 0xFF; 1695 addr[3] = (addr_lo >> 24) & 0xFF; 1696 addr[4] = addr_hi & 0xFF; 1697 addr[5] = (addr_hi >> 8) & 0xFF; 1698 1699 if (!is_valid_ether_addr(addr)) { 1700 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) { 1701 /* valid address present in Device Tree */ 1702 netif_dbg(dev, ifup, dev->net, 1703 "MAC address read from Device Tree"); 1704 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, 1705 ETH_ALEN, addr) == 0) || 1706 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, 1707 ETH_ALEN, addr) == 0)) && 1708 is_valid_ether_addr(addr)) { 1709 /* eeprom values are valid so use them */ 1710 netif_dbg(dev, ifup, dev->net, 1711 "MAC address read from EEPROM"); 1712 } else { 1713 /* generate random MAC */ 1714 eth_random_addr(addr); 1715 netif_dbg(dev, ifup, dev->net, 1716 "MAC address set to random addr"); 1717 } 1718 1719 addr_lo = addr[0] | (addr[1] << 8) | 1720 (addr[2] << 16) | (addr[3] << 24); 1721 addr_hi = addr[4] | (addr[5] << 8); 1722 1723 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); 1724 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); 1725 } 1726 1727 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); 1728 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); 1729 1730 ether_addr_copy(dev->net->dev_addr, addr); 1731 } 1732 1733 /* MDIO read and write wrappers for phylib */ 1734 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx) 1735 { 1736 struct lan78xx_net *dev = bus->priv; 1737 u32 val, addr; 1738 int ret; 1739 1740 ret = usb_autopm_get_interface(dev->intf); 1741 if (ret < 0) 1742 return ret; 1743 1744 mutex_lock(&dev->phy_mutex); 1745 1746 /* confirm MII not busy */ 1747 ret = lan78xx_phy_wait_not_busy(dev); 1748 if (ret < 0) 1749 goto done; 1750 1751 /* set the address, index & direction (read from PHY) */ 1752 addr = mii_access(phy_id, idx, MII_READ); 1753 ret = lan78xx_write_reg(dev, MII_ACC, addr); 1754 1755 ret = lan78xx_phy_wait_not_busy(dev); 1756 if (ret < 0) 1757 goto done; 1758 1759 ret = lan78xx_read_reg(dev, MII_DATA, &val); 1760 1761 ret = (int)(val & 0xFFFF); 1762 1763 done: 1764 mutex_unlock(&dev->phy_mutex); 1765 usb_autopm_put_interface(dev->intf); 1766 1767 return ret; 1768 } 1769 1770 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx, 1771 u16 regval) 1772 { 1773 struct lan78xx_net *dev = bus->priv; 1774 u32 val, addr; 1775 int ret; 1776 1777 ret = usb_autopm_get_interface(dev->intf); 1778 if (ret < 0) 1779 return ret; 1780 1781 mutex_lock(&dev->phy_mutex); 1782 1783 /* confirm MII not busy */ 1784 ret = lan78xx_phy_wait_not_busy(dev); 1785 if (ret < 0) 1786 goto done; 1787 1788 val = (u32)regval; 1789 ret = lan78xx_write_reg(dev, MII_DATA, val); 1790 1791 /* set the address, index & direction (write to PHY) */ 1792 addr = mii_access(phy_id, idx, MII_WRITE); 1793 ret = lan78xx_write_reg(dev, MII_ACC, addr); 1794 1795 ret = lan78xx_phy_wait_not_busy(dev); 1796 if (ret < 0) 1797 goto done; 1798 1799 done: 1800 mutex_unlock(&dev->phy_mutex); 1801 usb_autopm_put_interface(dev->intf); 1802 return 0; 1803 } 1804 1805 static int lan78xx_mdio_init(struct lan78xx_net *dev) 1806 { 1807 struct device_node *node; 1808 int ret; 1809 1810 dev->mdiobus = mdiobus_alloc(); 1811 if (!dev->mdiobus) { 1812 netdev_err(dev->net, "can't allocate MDIO bus\n"); 1813 return -ENOMEM; 1814 } 1815 1816 dev->mdiobus->priv = (void *)dev; 1817 dev->mdiobus->read = lan78xx_mdiobus_read; 1818 dev->mdiobus->write = lan78xx_mdiobus_write; 1819 dev->mdiobus->name = "lan78xx-mdiobus"; 1820 1821 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", 1822 dev->udev->bus->busnum, dev->udev->devnum); 1823 1824 switch (dev->chipid) { 1825 case ID_REV_CHIP_ID_7800_: 1826 case ID_REV_CHIP_ID_7850_: 1827 /* set to internal PHY id */ 1828 dev->mdiobus->phy_mask = ~(1 << 1); 1829 break; 1830 case ID_REV_CHIP_ID_7801_: 1831 /* scan thru PHYAD[2..0] */ 1832 dev->mdiobus->phy_mask = ~(0xFF); 1833 break; 1834 } 1835 1836 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); 1837 ret = of_mdiobus_register(dev->mdiobus, node); 1838 if (node) 1839 of_node_put(node); 1840 if (ret) { 1841 netdev_err(dev->net, "can't register MDIO bus\n"); 1842 goto exit1; 1843 } 1844 1845 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id); 1846 return 0; 1847 exit1: 1848 mdiobus_free(dev->mdiobus); 1849 return ret; 1850 } 1851 1852 static void lan78xx_remove_mdio(struct lan78xx_net *dev) 1853 { 1854 mdiobus_unregister(dev->mdiobus); 1855 mdiobus_free(dev->mdiobus); 1856 } 1857 1858 static void lan78xx_link_status_change(struct net_device *net) 1859 { 1860 struct phy_device *phydev = net->phydev; 1861 int ret, temp; 1862 1863 /* At forced 100 F/H mode, chip may fail to set mode correctly 1864 * when cable is switched between long(~50+m) and short one. 1865 * As workaround, set to 10 before setting to 100 1866 * at forced 100 F/H mode. 1867 */ 1868 if (!phydev->autoneg && (phydev->speed == 100)) { 1869 /* disable phy interrupt */ 1870 temp = phy_read(phydev, LAN88XX_INT_MASK); 1871 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; 1872 ret = phy_write(phydev, LAN88XX_INT_MASK, temp); 1873 1874 temp = phy_read(phydev, MII_BMCR); 1875 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); 1876 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */ 1877 temp |= BMCR_SPEED100; 1878 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */ 1879 1880 /* clear pending interrupt generated while workaround */ 1881 temp = phy_read(phydev, LAN88XX_INT_STS); 1882 1883 /* enable phy interrupt back */ 1884 temp = phy_read(phydev, LAN88XX_INT_MASK); 1885 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; 1886 ret = phy_write(phydev, LAN88XX_INT_MASK, temp); 1887 } 1888 } 1889 1890 static int irq_map(struct irq_domain *d, unsigned int irq, 1891 irq_hw_number_t hwirq) 1892 { 1893 struct irq_domain_data *data = d->host_data; 1894 1895 irq_set_chip_data(irq, data); 1896 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler); 1897 irq_set_noprobe(irq); 1898 1899 return 0; 1900 } 1901 1902 static void irq_unmap(struct irq_domain *d, unsigned int irq) 1903 { 1904 irq_set_chip_and_handler(irq, NULL, NULL); 1905 irq_set_chip_data(irq, NULL); 1906 } 1907 1908 static const struct irq_domain_ops chip_domain_ops = { 1909 .map = irq_map, 1910 .unmap = irq_unmap, 1911 }; 1912 1913 static void lan78xx_irq_mask(struct irq_data *irqd) 1914 { 1915 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); 1916 1917 data->irqenable &= ~BIT(irqd_to_hwirq(irqd)); 1918 } 1919 1920 static void lan78xx_irq_unmask(struct irq_data *irqd) 1921 { 1922 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); 1923 1924 data->irqenable |= BIT(irqd_to_hwirq(irqd)); 1925 } 1926 1927 static void lan78xx_irq_bus_lock(struct irq_data *irqd) 1928 { 1929 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); 1930 1931 mutex_lock(&data->irq_lock); 1932 } 1933 1934 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd) 1935 { 1936 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); 1937 struct lan78xx_net *dev = 1938 container_of(data, struct lan78xx_net, domain_data); 1939 u32 buf; 1940 int ret; 1941 1942 /* call register access here because irq_bus_lock & irq_bus_sync_unlock 1943 * are only two callbacks executed in non-atomic contex. 1944 */ 1945 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); 1946 if (buf != data->irqenable) 1947 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); 1948 1949 mutex_unlock(&data->irq_lock); 1950 } 1951 1952 static struct irq_chip lan78xx_irqchip = { 1953 .name = "lan78xx-irqs", 1954 .irq_mask = lan78xx_irq_mask, 1955 .irq_unmask = lan78xx_irq_unmask, 1956 .irq_bus_lock = lan78xx_irq_bus_lock, 1957 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock, 1958 }; 1959 1960 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) 1961 { 1962 struct device_node *of_node; 1963 struct irq_domain *irqdomain; 1964 unsigned int irqmap = 0; 1965 u32 buf; 1966 int ret = 0; 1967 1968 of_node = dev->udev->dev.parent->of_node; 1969 1970 mutex_init(&dev->domain_data.irq_lock); 1971 1972 lan78xx_read_reg(dev, INT_EP_CTL, &buf); 1973 dev->domain_data.irqenable = buf; 1974 1975 dev->domain_data.irqchip = &lan78xx_irqchip; 1976 dev->domain_data.irq_handler = handle_simple_irq; 1977 1978 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0, 1979 &chip_domain_ops, &dev->domain_data); 1980 if (irqdomain) { 1981 /* create mapping for PHY interrupt */ 1982 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY); 1983 if (!irqmap) { 1984 irq_domain_remove(irqdomain); 1985 1986 irqdomain = NULL; 1987 ret = -EINVAL; 1988 } 1989 } else { 1990 ret = -EINVAL; 1991 } 1992 1993 dev->domain_data.irqdomain = irqdomain; 1994 dev->domain_data.phyirq = irqmap; 1995 1996 return ret; 1997 } 1998 1999 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) 2000 { 2001 if (dev->domain_data.phyirq > 0) { 2002 irq_dispose_mapping(dev->domain_data.phyirq); 2003 2004 if (dev->domain_data.irqdomain) 2005 irq_domain_remove(dev->domain_data.irqdomain); 2006 } 2007 dev->domain_data.phyirq = 0; 2008 dev->domain_data.irqdomain = NULL; 2009 } 2010 2011 static int lan8835_fixup(struct phy_device *phydev) 2012 { 2013 int buf; 2014 int ret; 2015 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); 2016 2017 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */ 2018 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010); 2019 buf &= ~0x1800; 2020 buf |= 0x0800; 2021 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf); 2022 2023 /* RGMII MAC TXC Delay Enable */ 2024 ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 2025 MAC_RGMII_ID_TXC_DELAY_EN_); 2026 2027 /* RGMII TX DLL Tune Adjust */ 2028 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); 2029 2030 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; 2031 2032 return 1; 2033 } 2034 2035 static int ksz9031rnx_fixup(struct phy_device *phydev) 2036 { 2037 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); 2038 2039 /* Micrel9301RNX PHY configuration */ 2040 /* RGMII Control Signal Pad Skew */ 2041 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077); 2042 /* RGMII RX Data Pad Skew */ 2043 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777); 2044 /* RGMII RX Clock Pad Skew */ 2045 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF); 2046 2047 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID; 2048 2049 return 1; 2050 } 2051 2052 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) 2053 { 2054 u32 buf; 2055 int ret; 2056 struct fixed_phy_status fphy_status = { 2057 .link = 1, 2058 .speed = SPEED_1000, 2059 .duplex = DUPLEX_FULL, 2060 }; 2061 struct phy_device *phydev; 2062 2063 phydev = phy_find_first(dev->mdiobus); 2064 if (!phydev) { 2065 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n"); 2066 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, 2067 NULL); 2068 if (IS_ERR(phydev)) { 2069 netdev_err(dev->net, "No PHY/fixed_PHY found\n"); 2070 return NULL; 2071 } 2072 netdev_dbg(dev->net, "Registered FIXED PHY\n"); 2073 dev->interface = PHY_INTERFACE_MODE_RGMII; 2074 ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 2075 MAC_RGMII_ID_TXC_DELAY_EN_); 2076 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); 2077 ret = lan78xx_read_reg(dev, HW_CFG, &buf); 2078 buf |= HW_CFG_CLK125_EN_; 2079 buf |= HW_CFG_REFCLK25_EN_; 2080 ret = lan78xx_write_reg(dev, HW_CFG, buf); 2081 } else { 2082 if (!phydev->drv) { 2083 netdev_err(dev->net, "no PHY driver found\n"); 2084 return NULL; 2085 } 2086 dev->interface = PHY_INTERFACE_MODE_RGMII; 2087 /* external PHY fixup for KSZ9031RNX */ 2088 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, 2089 ksz9031rnx_fixup); 2090 if (ret < 0) { 2091 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n"); 2092 return NULL; 2093 } 2094 /* external PHY fixup for LAN8835 */ 2095 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, 2096 lan8835_fixup); 2097 if (ret < 0) { 2098 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n"); 2099 return NULL; 2100 } 2101 /* add more external PHY fixup here if needed */ 2102 2103 phydev->is_internal = false; 2104 } 2105 return phydev; 2106 } 2107 2108 static int lan78xx_phy_init(struct lan78xx_net *dev) 2109 { 2110 int ret; 2111 u32 mii_adv; 2112 struct phy_device *phydev; 2113 2114 switch (dev->chipid) { 2115 case ID_REV_CHIP_ID_7801_: 2116 phydev = lan7801_phy_init(dev); 2117 if (!phydev) { 2118 netdev_err(dev->net, "lan7801: PHY Init Failed"); 2119 return -EIO; 2120 } 2121 break; 2122 2123 case ID_REV_CHIP_ID_7800_: 2124 case ID_REV_CHIP_ID_7850_: 2125 phydev = phy_find_first(dev->mdiobus); 2126 if (!phydev) { 2127 netdev_err(dev->net, "no PHY found\n"); 2128 return -EIO; 2129 } 2130 phydev->is_internal = true; 2131 dev->interface = PHY_INTERFACE_MODE_GMII; 2132 break; 2133 2134 default: 2135 netdev_err(dev->net, "Unknown CHIP ID found\n"); 2136 return -EIO; 2137 } 2138 2139 /* if phyirq is not set, use polling mode in phylib */ 2140 if (dev->domain_data.phyirq > 0) 2141 phydev->irq = dev->domain_data.phyirq; 2142 else 2143 phydev->irq = 0; 2144 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); 2145 2146 /* set to AUTOMDIX */ 2147 phydev->mdix = ETH_TP_MDI_AUTO; 2148 2149 ret = phy_connect_direct(dev->net, phydev, 2150 lan78xx_link_status_change, 2151 dev->interface); 2152 if (ret) { 2153 netdev_err(dev->net, "can't attach PHY to %s\n", 2154 dev->mdiobus->id); 2155 if (dev->chipid == ID_REV_CHIP_ID_7801_) { 2156 if (phy_is_pseudo_fixed_link(phydev)) { 2157 fixed_phy_unregister(phydev); 2158 } else { 2159 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 2160 0xfffffff0); 2161 phy_unregister_fixup_for_uid(PHY_LAN8835, 2162 0xfffffff0); 2163 } 2164 } 2165 return -EIO; 2166 } 2167 2168 /* MAC doesn't support 1000T Half */ 2169 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 2170 2171 /* support both flow controls */ 2172 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); 2173 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 2174 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); 2175 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); 2176 2177 if (phydev->mdio.dev.of_node) { 2178 u32 reg; 2179 int len; 2180 2181 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node, 2182 "microchip,led-modes", 2183 sizeof(u32)); 2184 if (len >= 0) { 2185 /* Ensure the appropriate LEDs are enabled */ 2186 lan78xx_read_reg(dev, HW_CFG, ®); 2187 reg &= ~(HW_CFG_LED0_EN_ | 2188 HW_CFG_LED1_EN_ | 2189 HW_CFG_LED2_EN_ | 2190 HW_CFG_LED3_EN_); 2191 reg |= (len > 0) * HW_CFG_LED0_EN_ | 2192 (len > 1) * HW_CFG_LED1_EN_ | 2193 (len > 2) * HW_CFG_LED2_EN_ | 2194 (len > 3) * HW_CFG_LED3_EN_; 2195 lan78xx_write_reg(dev, HW_CFG, reg); 2196 } 2197 } 2198 2199 genphy_config_aneg(phydev); 2200 2201 dev->fc_autoneg = phydev->autoneg; 2202 2203 return 0; 2204 } 2205 2206 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) 2207 { 2208 int ret = 0; 2209 u32 buf; 2210 bool rxenabled; 2211 2212 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 2213 2214 rxenabled = ((buf & MAC_RX_RXEN_) != 0); 2215 2216 if (rxenabled) { 2217 buf &= ~MAC_RX_RXEN_; 2218 ret = lan78xx_write_reg(dev, MAC_RX, buf); 2219 } 2220 2221 /* add 4 to size for FCS */ 2222 buf &= ~MAC_RX_MAX_SIZE_MASK_; 2223 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); 2224 2225 ret = lan78xx_write_reg(dev, MAC_RX, buf); 2226 2227 if (rxenabled) { 2228 buf |= MAC_RX_RXEN_; 2229 ret = lan78xx_write_reg(dev, MAC_RX, buf); 2230 } 2231 2232 return 0; 2233 } 2234 2235 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q) 2236 { 2237 struct sk_buff *skb; 2238 unsigned long flags; 2239 int count = 0; 2240 2241 spin_lock_irqsave(&q->lock, flags); 2242 while (!skb_queue_empty(q)) { 2243 struct skb_data *entry; 2244 struct urb *urb; 2245 int ret; 2246 2247 skb_queue_walk(q, skb) { 2248 entry = (struct skb_data *)skb->cb; 2249 if (entry->state != unlink_start) 2250 goto found; 2251 } 2252 break; 2253 found: 2254 entry->state = unlink_start; 2255 urb = entry->urb; 2256 2257 /* Get reference count of the URB to avoid it to be 2258 * freed during usb_unlink_urb, which may trigger 2259 * use-after-free problem inside usb_unlink_urb since 2260 * usb_unlink_urb is always racing with .complete 2261 * handler(include defer_bh). 2262 */ 2263 usb_get_urb(urb); 2264 spin_unlock_irqrestore(&q->lock, flags); 2265 /* during some PM-driven resume scenarios, 2266 * these (async) unlinks complete immediately 2267 */ 2268 ret = usb_unlink_urb(urb); 2269 if (ret != -EINPROGRESS && ret != 0) 2270 netdev_dbg(dev->net, "unlink urb err, %d\n", ret); 2271 else 2272 count++; 2273 usb_put_urb(urb); 2274 spin_lock_irqsave(&q->lock, flags); 2275 } 2276 spin_unlock_irqrestore(&q->lock, flags); 2277 return count; 2278 } 2279 2280 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) 2281 { 2282 struct lan78xx_net *dev = netdev_priv(netdev); 2283 int ll_mtu = new_mtu + netdev->hard_header_len; 2284 int old_hard_mtu = dev->hard_mtu; 2285 int old_rx_urb_size = dev->rx_urb_size; 2286 int ret; 2287 2288 /* no second zero-length packet read wanted after mtu-sized packets */ 2289 if ((ll_mtu % dev->maxpacket) == 0) 2290 return -EDOM; 2291 2292 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); 2293 2294 netdev->mtu = new_mtu; 2295 2296 dev->hard_mtu = netdev->mtu + netdev->hard_header_len; 2297 if (dev->rx_urb_size == old_hard_mtu) { 2298 dev->rx_urb_size = dev->hard_mtu; 2299 if (dev->rx_urb_size > old_rx_urb_size) { 2300 if (netif_running(dev->net)) { 2301 unlink_urbs(dev, &dev->rxq); 2302 tasklet_schedule(&dev->bh); 2303 } 2304 } 2305 } 2306 2307 return 0; 2308 } 2309 2310 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) 2311 { 2312 struct lan78xx_net *dev = netdev_priv(netdev); 2313 struct sockaddr *addr = p; 2314 u32 addr_lo, addr_hi; 2315 int ret; 2316 2317 if (netif_running(netdev)) 2318 return -EBUSY; 2319 2320 if (!is_valid_ether_addr(addr->sa_data)) 2321 return -EADDRNOTAVAIL; 2322 2323 ether_addr_copy(netdev->dev_addr, addr->sa_data); 2324 2325 addr_lo = netdev->dev_addr[0] | 2326 netdev->dev_addr[1] << 8 | 2327 netdev->dev_addr[2] << 16 | 2328 netdev->dev_addr[3] << 24; 2329 addr_hi = netdev->dev_addr[4] | 2330 netdev->dev_addr[5] << 8; 2331 2332 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); 2333 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); 2334 2335 return 0; 2336 } 2337 2338 /* Enable or disable Rx checksum offload engine */ 2339 static int lan78xx_set_features(struct net_device *netdev, 2340 netdev_features_t features) 2341 { 2342 struct lan78xx_net *dev = netdev_priv(netdev); 2343 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 2344 unsigned long flags; 2345 int ret; 2346 2347 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); 2348 2349 if (features & NETIF_F_RXCSUM) { 2350 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_; 2351 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_; 2352 } else { 2353 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_); 2354 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_); 2355 } 2356 2357 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2358 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; 2359 else 2360 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; 2361 2362 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 2363 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; 2364 else 2365 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; 2366 2367 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); 2368 2369 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 2370 2371 return 0; 2372 } 2373 2374 static void lan78xx_deferred_vlan_write(struct work_struct *param) 2375 { 2376 struct lan78xx_priv *pdata = 2377 container_of(param, struct lan78xx_priv, set_vlan); 2378 struct lan78xx_net *dev = pdata->dev; 2379 2380 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0, 2381 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table); 2382 } 2383 2384 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev, 2385 __be16 proto, u16 vid) 2386 { 2387 struct lan78xx_net *dev = netdev_priv(netdev); 2388 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 2389 u16 vid_bit_index; 2390 u16 vid_dword_index; 2391 2392 vid_dword_index = (vid >> 5) & 0x7F; 2393 vid_bit_index = vid & 0x1F; 2394 2395 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index); 2396 2397 /* defer register writes to a sleepable context */ 2398 schedule_work(&pdata->set_vlan); 2399 2400 return 0; 2401 } 2402 2403 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev, 2404 __be16 proto, u16 vid) 2405 { 2406 struct lan78xx_net *dev = netdev_priv(netdev); 2407 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 2408 u16 vid_bit_index; 2409 u16 vid_dword_index; 2410 2411 vid_dword_index = (vid >> 5) & 0x7F; 2412 vid_bit_index = vid & 0x1F; 2413 2414 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index); 2415 2416 /* defer register writes to a sleepable context */ 2417 schedule_work(&pdata->set_vlan); 2418 2419 return 0; 2420 } 2421 2422 static void lan78xx_init_ltm(struct lan78xx_net *dev) 2423 { 2424 int ret; 2425 u32 buf; 2426 u32 regs[6] = { 0 }; 2427 2428 ret = lan78xx_read_reg(dev, USB_CFG1, &buf); 2429 if (buf & USB_CFG1_LTM_ENABLE_) { 2430 u8 temp[2]; 2431 /* Get values from EEPROM first */ 2432 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) { 2433 if (temp[0] == 24) { 2434 ret = lan78xx_read_raw_eeprom(dev, 2435 temp[1] * 2, 2436 24, 2437 (u8 *)regs); 2438 if (ret < 0) 2439 return; 2440 } 2441 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) { 2442 if (temp[0] == 24) { 2443 ret = lan78xx_read_raw_otp(dev, 2444 temp[1] * 2, 2445 24, 2446 (u8 *)regs); 2447 if (ret < 0) 2448 return; 2449 } 2450 } 2451 } 2452 2453 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]); 2454 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]); 2455 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]); 2456 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]); 2457 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]); 2458 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); 2459 } 2460 2461 static int lan78xx_reset(struct lan78xx_net *dev) 2462 { 2463 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 2464 u32 buf; 2465 int ret = 0; 2466 unsigned long timeout; 2467 u8 sig; 2468 2469 ret = lan78xx_read_reg(dev, HW_CFG, &buf); 2470 buf |= HW_CFG_LRST_; 2471 ret = lan78xx_write_reg(dev, HW_CFG, buf); 2472 2473 timeout = jiffies + HZ; 2474 do { 2475 mdelay(1); 2476 ret = lan78xx_read_reg(dev, HW_CFG, &buf); 2477 if (time_after(jiffies, timeout)) { 2478 netdev_warn(dev->net, 2479 "timeout on completion of LiteReset"); 2480 return -EIO; 2481 } 2482 } while (buf & HW_CFG_LRST_); 2483 2484 lan78xx_init_mac_address(dev); 2485 2486 /* save DEVID for later usage */ 2487 ret = lan78xx_read_reg(dev, ID_REV, &buf); 2488 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16; 2489 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_; 2490 2491 /* Respond to the IN token with a NAK */ 2492 ret = lan78xx_read_reg(dev, USB_CFG0, &buf); 2493 buf |= USB_CFG_BIR_; 2494 ret = lan78xx_write_reg(dev, USB_CFG0, buf); 2495 2496 /* Init LTM */ 2497 lan78xx_init_ltm(dev); 2498 2499 if (dev->udev->speed == USB_SPEED_SUPER) { 2500 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2501 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2502 dev->rx_qlen = 4; 2503 dev->tx_qlen = 4; 2504 } else if (dev->udev->speed == USB_SPEED_HIGH) { 2505 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE; 2506 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2507 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size; 2508 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu; 2509 } else { 2510 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; 2511 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2512 dev->rx_qlen = 4; 2513 dev->tx_qlen = 4; 2514 } 2515 2516 ret = lan78xx_write_reg(dev, BURST_CAP, buf); 2517 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); 2518 2519 ret = lan78xx_read_reg(dev, HW_CFG, &buf); 2520 buf |= HW_CFG_MEF_; 2521 ret = lan78xx_write_reg(dev, HW_CFG, buf); 2522 2523 ret = lan78xx_read_reg(dev, USB_CFG0, &buf); 2524 buf |= USB_CFG_BCE_; 2525 ret = lan78xx_write_reg(dev, USB_CFG0, buf); 2526 2527 /* set FIFO sizes */ 2528 buf = (MAX_RX_FIFO_SIZE - 512) / 512; 2529 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf); 2530 2531 buf = (MAX_TX_FIFO_SIZE - 512) / 512; 2532 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf); 2533 2534 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); 2535 ret = lan78xx_write_reg(dev, FLOW, 0); 2536 ret = lan78xx_write_reg(dev, FCT_FLOW, 0); 2537 2538 /* Don't need rfe_ctl_lock during initialisation */ 2539 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); 2540 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_; 2541 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 2542 2543 /* Enable or disable checksum offload engines */ 2544 lan78xx_set_features(dev->net, dev->net->features); 2545 2546 lan78xx_set_multicast(dev->net); 2547 2548 /* reset PHY */ 2549 ret = lan78xx_read_reg(dev, PMT_CTL, &buf); 2550 buf |= PMT_CTL_PHY_RST_; 2551 ret = lan78xx_write_reg(dev, PMT_CTL, buf); 2552 2553 timeout = jiffies + HZ; 2554 do { 2555 mdelay(1); 2556 ret = lan78xx_read_reg(dev, PMT_CTL, &buf); 2557 if (time_after(jiffies, timeout)) { 2558 netdev_warn(dev->net, "timeout waiting for PHY Reset"); 2559 return -EIO; 2560 } 2561 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_)); 2562 2563 ret = lan78xx_read_reg(dev, MAC_CR, &buf); 2564 /* LAN7801 only has RGMII mode */ 2565 if (dev->chipid == ID_REV_CHIP_ID_7801_) 2566 buf &= ~MAC_CR_GMII_EN_; 2567 2568 if (dev->chipid == ID_REV_CHIP_ID_7800_) { 2569 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); 2570 if (!ret && sig != EEPROM_INDICATOR) { 2571 /* Implies there is no external eeprom. Set mac speed */ 2572 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); 2573 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; 2574 } 2575 } 2576 ret = lan78xx_write_reg(dev, MAC_CR, buf); 2577 2578 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 2579 buf |= MAC_TX_TXEN_; 2580 ret = lan78xx_write_reg(dev, MAC_TX, buf); 2581 2582 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf); 2583 buf |= FCT_TX_CTL_EN_; 2584 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); 2585 2586 ret = lan78xx_set_rx_max_frame_length(dev, 2587 dev->net->mtu + VLAN_ETH_HLEN); 2588 2589 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 2590 buf |= MAC_RX_RXEN_; 2591 ret = lan78xx_write_reg(dev, MAC_RX, buf); 2592 2593 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf); 2594 buf |= FCT_RX_CTL_EN_; 2595 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf); 2596 2597 return 0; 2598 } 2599 2600 static void lan78xx_init_stats(struct lan78xx_net *dev) 2601 { 2602 u32 *p; 2603 int i; 2604 2605 /* initialize for stats update 2606 * some counters are 20bits and some are 32bits 2607 */ 2608 p = (u32 *)&dev->stats.rollover_max; 2609 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++) 2610 p[i] = 0xFFFFF; 2611 2612 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF; 2613 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF; 2614 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF; 2615 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF; 2616 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF; 2617 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF; 2618 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF; 2619 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF; 2620 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF; 2621 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF; 2622 2623 set_bit(EVENT_STAT_UPDATE, &dev->flags); 2624 } 2625 2626 static int lan78xx_open(struct net_device *net) 2627 { 2628 struct lan78xx_net *dev = netdev_priv(net); 2629 int ret; 2630 2631 ret = usb_autopm_get_interface(dev->intf); 2632 if (ret < 0) 2633 goto out; 2634 2635 phy_start(net->phydev); 2636 2637 netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); 2638 2639 /* for Link Check */ 2640 if (dev->urb_intr) { 2641 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL); 2642 if (ret < 0) { 2643 netif_err(dev, ifup, dev->net, 2644 "intr submit %d\n", ret); 2645 goto done; 2646 } 2647 } 2648 2649 lan78xx_init_stats(dev); 2650 2651 set_bit(EVENT_DEV_OPEN, &dev->flags); 2652 2653 netif_start_queue(net); 2654 2655 dev->link_on = false; 2656 2657 lan78xx_defer_kevent(dev, EVENT_LINK_RESET); 2658 done: 2659 usb_autopm_put_interface(dev->intf); 2660 2661 out: 2662 return ret; 2663 } 2664 2665 static void lan78xx_terminate_urbs(struct lan78xx_net *dev) 2666 { 2667 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); 2668 DECLARE_WAITQUEUE(wait, current); 2669 int temp; 2670 2671 /* ensure there are no more active urbs */ 2672 add_wait_queue(&unlink_wakeup, &wait); 2673 set_current_state(TASK_UNINTERRUPTIBLE); 2674 dev->wait = &unlink_wakeup; 2675 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); 2676 2677 /* maybe wait for deletions to finish. */ 2678 while (!skb_queue_empty(&dev->rxq) && 2679 !skb_queue_empty(&dev->txq) && 2680 !skb_queue_empty(&dev->done)) { 2681 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); 2682 set_current_state(TASK_UNINTERRUPTIBLE); 2683 netif_dbg(dev, ifdown, dev->net, 2684 "waited for %d urb completions\n", temp); 2685 } 2686 set_current_state(TASK_RUNNING); 2687 dev->wait = NULL; 2688 remove_wait_queue(&unlink_wakeup, &wait); 2689 } 2690 2691 static int lan78xx_stop(struct net_device *net) 2692 { 2693 struct lan78xx_net *dev = netdev_priv(net); 2694 2695 if (timer_pending(&dev->stat_monitor)) 2696 del_timer_sync(&dev->stat_monitor); 2697 2698 if (net->phydev) 2699 phy_stop(net->phydev); 2700 2701 clear_bit(EVENT_DEV_OPEN, &dev->flags); 2702 netif_stop_queue(net); 2703 2704 netif_info(dev, ifdown, dev->net, 2705 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", 2706 net->stats.rx_packets, net->stats.tx_packets, 2707 net->stats.rx_errors, net->stats.tx_errors); 2708 2709 lan78xx_terminate_urbs(dev); 2710 2711 usb_kill_urb(dev->urb_intr); 2712 2713 skb_queue_purge(&dev->rxq_pause); 2714 2715 /* deferred work (task, timer, softirq) must also stop. 2716 * can't flush_scheduled_work() until we drop rtnl (later), 2717 * else workers could deadlock; so make workers a NOP. 2718 */ 2719 dev->flags = 0; 2720 cancel_delayed_work_sync(&dev->wq); 2721 tasklet_kill(&dev->bh); 2722 2723 usb_autopm_put_interface(dev->intf); 2724 2725 return 0; 2726 } 2727 2728 static int lan78xx_linearize(struct sk_buff *skb) 2729 { 2730 return skb_linearize(skb); 2731 } 2732 2733 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, 2734 struct sk_buff *skb, gfp_t flags) 2735 { 2736 u32 tx_cmd_a, tx_cmd_b; 2737 2738 if (skb_cow_head(skb, TX_OVERHEAD)) { 2739 dev_kfree_skb_any(skb); 2740 return NULL; 2741 } 2742 2743 if (lan78xx_linearize(skb) < 0) 2744 return NULL; 2745 2746 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; 2747 2748 if (skb->ip_summed == CHECKSUM_PARTIAL) 2749 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_; 2750 2751 tx_cmd_b = 0; 2752 if (skb_is_gso(skb)) { 2753 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_); 2754 2755 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_; 2756 2757 tx_cmd_a |= TX_CMD_A_LSO_; 2758 } 2759 2760 if (skb_vlan_tag_present(skb)) { 2761 tx_cmd_a |= TX_CMD_A_IVTG_; 2762 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; 2763 } 2764 2765 skb_push(skb, 4); 2766 cpu_to_le32s(&tx_cmd_b); 2767 memcpy(skb->data, &tx_cmd_b, 4); 2768 2769 skb_push(skb, 4); 2770 cpu_to_le32s(&tx_cmd_a); 2771 memcpy(skb->data, &tx_cmd_a, 4); 2772 2773 return skb; 2774 } 2775 2776 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, 2777 struct sk_buff_head *list, enum skb_state state) 2778 { 2779 unsigned long flags; 2780 enum skb_state old_state; 2781 struct skb_data *entry = (struct skb_data *)skb->cb; 2782 2783 spin_lock_irqsave(&list->lock, flags); 2784 old_state = entry->state; 2785 entry->state = state; 2786 2787 __skb_unlink(skb, list); 2788 spin_unlock(&list->lock); 2789 spin_lock(&dev->done.lock); 2790 2791 __skb_queue_tail(&dev->done, skb); 2792 if (skb_queue_len(&dev->done) == 1) 2793 tasklet_schedule(&dev->bh); 2794 spin_unlock_irqrestore(&dev->done.lock, flags); 2795 2796 return old_state; 2797 } 2798 2799 static void tx_complete(struct urb *urb) 2800 { 2801 struct sk_buff *skb = (struct sk_buff *)urb->context; 2802 struct skb_data *entry = (struct skb_data *)skb->cb; 2803 struct lan78xx_net *dev = entry->dev; 2804 2805 if (urb->status == 0) { 2806 dev->net->stats.tx_packets += entry->num_of_packet; 2807 dev->net->stats.tx_bytes += entry->length; 2808 } else { 2809 dev->net->stats.tx_errors++; 2810 2811 switch (urb->status) { 2812 case -EPIPE: 2813 lan78xx_defer_kevent(dev, EVENT_TX_HALT); 2814 break; 2815 2816 /* software-driven interface shutdown */ 2817 case -ECONNRESET: 2818 case -ESHUTDOWN: 2819 break; 2820 2821 case -EPROTO: 2822 case -ETIME: 2823 case -EILSEQ: 2824 netif_stop_queue(dev->net); 2825 break; 2826 default: 2827 netif_dbg(dev, tx_err, dev->net, 2828 "tx err %d\n", entry->urb->status); 2829 break; 2830 } 2831 } 2832 2833 usb_autopm_put_interface_async(dev->intf); 2834 2835 defer_bh(dev, skb, &dev->txq, tx_done); 2836 } 2837 2838 static void lan78xx_queue_skb(struct sk_buff_head *list, 2839 struct sk_buff *newsk, enum skb_state state) 2840 { 2841 struct skb_data *entry = (struct skb_data *)newsk->cb; 2842 2843 __skb_queue_tail(list, newsk); 2844 entry->state = state; 2845 } 2846 2847 static netdev_tx_t 2848 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) 2849 { 2850 struct lan78xx_net *dev = netdev_priv(net); 2851 struct sk_buff *skb2 = NULL; 2852 2853 if (skb) { 2854 skb_tx_timestamp(skb); 2855 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); 2856 } 2857 2858 if (skb2) { 2859 skb_queue_tail(&dev->txq_pend, skb2); 2860 2861 /* throttle TX patch at slower than SUPER SPEED USB */ 2862 if ((dev->udev->speed < USB_SPEED_SUPER) && 2863 (skb_queue_len(&dev->txq_pend) > 10)) 2864 netif_stop_queue(net); 2865 } else { 2866 netif_dbg(dev, tx_err, dev->net, 2867 "lan78xx_tx_prep return NULL\n"); 2868 dev->net->stats.tx_errors++; 2869 dev->net->stats.tx_dropped++; 2870 } 2871 2872 tasklet_schedule(&dev->bh); 2873 2874 return NETDEV_TX_OK; 2875 } 2876 2877 static int 2878 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) 2879 { 2880 int tmp; 2881 struct usb_host_interface *alt = NULL; 2882 struct usb_host_endpoint *in = NULL, *out = NULL; 2883 struct usb_host_endpoint *status = NULL; 2884 2885 for (tmp = 0; tmp < intf->num_altsetting; tmp++) { 2886 unsigned ep; 2887 2888 in = NULL; 2889 out = NULL; 2890 status = NULL; 2891 alt = intf->altsetting + tmp; 2892 2893 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { 2894 struct usb_host_endpoint *e; 2895 int intr = 0; 2896 2897 e = alt->endpoint + ep; 2898 switch (e->desc.bmAttributes) { 2899 case USB_ENDPOINT_XFER_INT: 2900 if (!usb_endpoint_dir_in(&e->desc)) 2901 continue; 2902 intr = 1; 2903 /* FALLTHROUGH */ 2904 case USB_ENDPOINT_XFER_BULK: 2905 break; 2906 default: 2907 continue; 2908 } 2909 if (usb_endpoint_dir_in(&e->desc)) { 2910 if (!intr && !in) 2911 in = e; 2912 else if (intr && !status) 2913 status = e; 2914 } else { 2915 if (!out) 2916 out = e; 2917 } 2918 } 2919 if (in && out) 2920 break; 2921 } 2922 if (!alt || !in || !out) 2923 return -EINVAL; 2924 2925 dev->pipe_in = usb_rcvbulkpipe(dev->udev, 2926 in->desc.bEndpointAddress & 2927 USB_ENDPOINT_NUMBER_MASK); 2928 dev->pipe_out = usb_sndbulkpipe(dev->udev, 2929 out->desc.bEndpointAddress & 2930 USB_ENDPOINT_NUMBER_MASK); 2931 dev->ep_intr = status; 2932 2933 return 0; 2934 } 2935 2936 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) 2937 { 2938 struct lan78xx_priv *pdata = NULL; 2939 int ret; 2940 int i; 2941 2942 ret = lan78xx_get_endpoints(dev, intf); 2943 if (ret) { 2944 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", 2945 ret); 2946 return ret; 2947 } 2948 2949 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); 2950 2951 pdata = (struct lan78xx_priv *)(dev->data[0]); 2952 if (!pdata) { 2953 netdev_warn(dev->net, "Unable to allocate lan78xx_priv"); 2954 return -ENOMEM; 2955 } 2956 2957 pdata->dev = dev; 2958 2959 spin_lock_init(&pdata->rfe_ctl_lock); 2960 mutex_init(&pdata->dataport_mutex); 2961 2962 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write); 2963 2964 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++) 2965 pdata->vlan_table[i] = 0; 2966 2967 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write); 2968 2969 dev->net->features = 0; 2970 2971 if (DEFAULT_TX_CSUM_ENABLE) 2972 dev->net->features |= NETIF_F_HW_CSUM; 2973 2974 if (DEFAULT_RX_CSUM_ENABLE) 2975 dev->net->features |= NETIF_F_RXCSUM; 2976 2977 if (DEFAULT_TSO_CSUM_ENABLE) 2978 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; 2979 2980 if (DEFAULT_VLAN_RX_OFFLOAD) 2981 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; 2982 2983 if (DEFAULT_VLAN_FILTER_ENABLE) 2984 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2985 2986 dev->net->hw_features = dev->net->features; 2987 2988 ret = lan78xx_setup_irq_domain(dev); 2989 if (ret < 0) { 2990 netdev_warn(dev->net, 2991 "lan78xx_setup_irq_domain() failed : %d", ret); 2992 goto out1; 2993 } 2994 2995 dev->net->hard_header_len += TX_OVERHEAD; 2996 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 2997 2998 /* Init all registers */ 2999 ret = lan78xx_reset(dev); 3000 if (ret) { 3001 netdev_warn(dev->net, "Registers INIT FAILED...."); 3002 goto out2; 3003 } 3004 3005 ret = lan78xx_mdio_init(dev); 3006 if (ret) { 3007 netdev_warn(dev->net, "MDIO INIT FAILED....."); 3008 goto out2; 3009 } 3010 3011 dev->net->flags |= IFF_MULTICAST; 3012 3013 pdata->wol = WAKE_MAGIC; 3014 3015 return ret; 3016 3017 out2: 3018 lan78xx_remove_irq_domain(dev); 3019 3020 out1: 3021 netdev_warn(dev->net, "Bind routine FAILED"); 3022 cancel_work_sync(&pdata->set_multicast); 3023 cancel_work_sync(&pdata->set_vlan); 3024 kfree(pdata); 3025 return ret; 3026 } 3027 3028 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) 3029 { 3030 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 3031 3032 lan78xx_remove_irq_domain(dev); 3033 3034 lan78xx_remove_mdio(dev); 3035 3036 if (pdata) { 3037 cancel_work_sync(&pdata->set_multicast); 3038 cancel_work_sync(&pdata->set_vlan); 3039 netif_dbg(dev, ifdown, dev->net, "free pdata"); 3040 kfree(pdata); 3041 pdata = NULL; 3042 dev->data[0] = 0; 3043 } 3044 } 3045 3046 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, 3047 struct sk_buff *skb, 3048 u32 rx_cmd_a, u32 rx_cmd_b) 3049 { 3050 /* HW Checksum offload appears to be flawed if used when not stripping 3051 * VLAN headers. Drop back to S/W checksums under these conditions. 3052 */ 3053 if (!(dev->net->features & NETIF_F_RXCSUM) || 3054 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || 3055 ((rx_cmd_a & RX_CMD_A_FVTG_) && 3056 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { 3057 skb->ip_summed = CHECKSUM_NONE; 3058 } else { 3059 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); 3060 skb->ip_summed = CHECKSUM_COMPLETE; 3061 } 3062 } 3063 3064 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, 3065 struct sk_buff *skb, 3066 u32 rx_cmd_a, u32 rx_cmd_b) 3067 { 3068 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && 3069 (rx_cmd_a & RX_CMD_A_FVTG_)) 3070 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 3071 (rx_cmd_b & 0xffff)); 3072 } 3073 3074 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) 3075 { 3076 int status; 3077 3078 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 3079 skb_queue_tail(&dev->rxq_pause, skb); 3080 return; 3081 } 3082 3083 dev->net->stats.rx_packets++; 3084 dev->net->stats.rx_bytes += skb->len; 3085 3086 skb->protocol = eth_type_trans(skb, dev->net); 3087 3088 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 3089 skb->len + sizeof(struct ethhdr), skb->protocol); 3090 memset(skb->cb, 0, sizeof(struct skb_data)); 3091 3092 if (skb_defer_rx_timestamp(skb)) 3093 return; 3094 3095 status = netif_rx(skb); 3096 if (status != NET_RX_SUCCESS) 3097 netif_dbg(dev, rx_err, dev->net, 3098 "netif_rx status %d\n", status); 3099 } 3100 3101 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) 3102 { 3103 if (skb->len < dev->net->hard_header_len) 3104 return 0; 3105 3106 while (skb->len > 0) { 3107 u32 rx_cmd_a, rx_cmd_b, align_count, size; 3108 u16 rx_cmd_c; 3109 struct sk_buff *skb2; 3110 unsigned char *packet; 3111 3112 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); 3113 le32_to_cpus(&rx_cmd_a); 3114 skb_pull(skb, sizeof(rx_cmd_a)); 3115 3116 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); 3117 le32_to_cpus(&rx_cmd_b); 3118 skb_pull(skb, sizeof(rx_cmd_b)); 3119 3120 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c)); 3121 le16_to_cpus(&rx_cmd_c); 3122 skb_pull(skb, sizeof(rx_cmd_c)); 3123 3124 packet = skb->data; 3125 3126 /* get the packet length */ 3127 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_); 3128 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; 3129 3130 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { 3131 netif_dbg(dev, rx_err, dev->net, 3132 "Error rx_cmd_a=0x%08x", rx_cmd_a); 3133 } else { 3134 /* last frame in this batch */ 3135 if (skb->len == size) { 3136 lan78xx_rx_csum_offload(dev, skb, 3137 rx_cmd_a, rx_cmd_b); 3138 lan78xx_rx_vlan_offload(dev, skb, 3139 rx_cmd_a, rx_cmd_b); 3140 3141 skb_trim(skb, skb->len - 4); /* remove fcs */ 3142 skb->truesize = size + sizeof(struct sk_buff); 3143 3144 return 1; 3145 } 3146 3147 skb2 = skb_clone(skb, GFP_ATOMIC); 3148 if (unlikely(!skb2)) { 3149 netdev_warn(dev->net, "Error allocating skb"); 3150 return 0; 3151 } 3152 3153 skb2->len = size; 3154 skb2->data = packet; 3155 skb_set_tail_pointer(skb2, size); 3156 3157 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); 3158 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); 3159 3160 skb_trim(skb2, skb2->len - 4); /* remove fcs */ 3161 skb2->truesize = size + sizeof(struct sk_buff); 3162 3163 lan78xx_skb_return(dev, skb2); 3164 } 3165 3166 skb_pull(skb, size); 3167 3168 /* padding bytes before the next frame starts */ 3169 if (skb->len) 3170 skb_pull(skb, align_count); 3171 } 3172 3173 return 1; 3174 } 3175 3176 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb) 3177 { 3178 if (!lan78xx_rx(dev, skb)) { 3179 dev->net->stats.rx_errors++; 3180 goto done; 3181 } 3182 3183 if (skb->len) { 3184 lan78xx_skb_return(dev, skb); 3185 return; 3186 } 3187 3188 netif_dbg(dev, rx_err, dev->net, "drop\n"); 3189 dev->net->stats.rx_errors++; 3190 done: 3191 skb_queue_tail(&dev->done, skb); 3192 } 3193 3194 static void rx_complete(struct urb *urb); 3195 3196 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags) 3197 { 3198 struct sk_buff *skb; 3199 struct skb_data *entry; 3200 unsigned long lockflags; 3201 size_t size = dev->rx_urb_size; 3202 int ret = 0; 3203 3204 skb = netdev_alloc_skb_ip_align(dev->net, size); 3205 if (!skb) { 3206 usb_free_urb(urb); 3207 return -ENOMEM; 3208 } 3209 3210 entry = (struct skb_data *)skb->cb; 3211 entry->urb = urb; 3212 entry->dev = dev; 3213 entry->length = 0; 3214 3215 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in, 3216 skb->data, size, rx_complete, skb); 3217 3218 spin_lock_irqsave(&dev->rxq.lock, lockflags); 3219 3220 if (netif_device_present(dev->net) && 3221 netif_running(dev->net) && 3222 !test_bit(EVENT_RX_HALT, &dev->flags) && 3223 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 3224 ret = usb_submit_urb(urb, GFP_ATOMIC); 3225 switch (ret) { 3226 case 0: 3227 lan78xx_queue_skb(&dev->rxq, skb, rx_start); 3228 break; 3229 case -EPIPE: 3230 lan78xx_defer_kevent(dev, EVENT_RX_HALT); 3231 break; 3232 case -ENODEV: 3233 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 3234 netif_device_detach(dev->net); 3235 break; 3236 case -EHOSTUNREACH: 3237 ret = -ENOLINK; 3238 break; 3239 default: 3240 netif_dbg(dev, rx_err, dev->net, 3241 "rx submit, %d\n", ret); 3242 tasklet_schedule(&dev->bh); 3243 } 3244 } else { 3245 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); 3246 ret = -ENOLINK; 3247 } 3248 spin_unlock_irqrestore(&dev->rxq.lock, lockflags); 3249 if (ret) { 3250 dev_kfree_skb_any(skb); 3251 usb_free_urb(urb); 3252 } 3253 return ret; 3254 } 3255 3256 static void rx_complete(struct urb *urb) 3257 { 3258 struct sk_buff *skb = (struct sk_buff *)urb->context; 3259 struct skb_data *entry = (struct skb_data *)skb->cb; 3260 struct lan78xx_net *dev = entry->dev; 3261 int urb_status = urb->status; 3262 enum skb_state state; 3263 3264 skb_put(skb, urb->actual_length); 3265 state = rx_done; 3266 entry->urb = NULL; 3267 3268 switch (urb_status) { 3269 case 0: 3270 if (skb->len < dev->net->hard_header_len) { 3271 state = rx_cleanup; 3272 dev->net->stats.rx_errors++; 3273 dev->net->stats.rx_length_errors++; 3274 netif_dbg(dev, rx_err, dev->net, 3275 "rx length %d\n", skb->len); 3276 } 3277 usb_mark_last_busy(dev->udev); 3278 break; 3279 case -EPIPE: 3280 dev->net->stats.rx_errors++; 3281 lan78xx_defer_kevent(dev, EVENT_RX_HALT); 3282 /* FALLTHROUGH */ 3283 case -ECONNRESET: /* async unlink */ 3284 case -ESHUTDOWN: /* hardware gone */ 3285 netif_dbg(dev, ifdown, dev->net, 3286 "rx shutdown, code %d\n", urb_status); 3287 state = rx_cleanup; 3288 entry->urb = urb; 3289 urb = NULL; 3290 break; 3291 case -EPROTO: 3292 case -ETIME: 3293 case -EILSEQ: 3294 dev->net->stats.rx_errors++; 3295 state = rx_cleanup; 3296 entry->urb = urb; 3297 urb = NULL; 3298 break; 3299 3300 /* data overrun ... flush fifo? */ 3301 case -EOVERFLOW: 3302 dev->net->stats.rx_over_errors++; 3303 /* FALLTHROUGH */ 3304 3305 default: 3306 state = rx_cleanup; 3307 dev->net->stats.rx_errors++; 3308 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 3309 break; 3310 } 3311 3312 state = defer_bh(dev, skb, &dev->rxq, state); 3313 3314 if (urb) { 3315 if (netif_running(dev->net) && 3316 !test_bit(EVENT_RX_HALT, &dev->flags) && 3317 state != unlink_start) { 3318 rx_submit(dev, urb, GFP_ATOMIC); 3319 return; 3320 } 3321 usb_free_urb(urb); 3322 } 3323 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 3324 } 3325 3326 static void lan78xx_tx_bh(struct lan78xx_net *dev) 3327 { 3328 int length; 3329 struct urb *urb = NULL; 3330 struct skb_data *entry; 3331 unsigned long flags; 3332 struct sk_buff_head *tqp = &dev->txq_pend; 3333 struct sk_buff *skb, *skb2; 3334 int ret; 3335 int count, pos; 3336 int skb_totallen, pkt_cnt; 3337 3338 skb_totallen = 0; 3339 pkt_cnt = 0; 3340 count = 0; 3341 length = 0; 3342 spin_lock_irqsave(&tqp->lock, flags); 3343 skb_queue_walk(tqp, skb) { 3344 if (skb_is_gso(skb)) { 3345 if (!skb_queue_is_first(tqp, skb)) { 3346 /* handle previous packets first */ 3347 break; 3348 } 3349 count = 1; 3350 length = skb->len - TX_OVERHEAD; 3351 __skb_unlink(skb, tqp); 3352 spin_unlock_irqrestore(&tqp->lock, flags); 3353 goto gso_skb; 3354 } 3355 3356 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE) 3357 break; 3358 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); 3359 pkt_cnt++; 3360 } 3361 spin_unlock_irqrestore(&tqp->lock, flags); 3362 3363 /* copy to a single skb */ 3364 skb = alloc_skb(skb_totallen, GFP_ATOMIC); 3365 if (!skb) 3366 goto drop; 3367 3368 skb_put(skb, skb_totallen); 3369 3370 for (count = pos = 0; count < pkt_cnt; count++) { 3371 skb2 = skb_dequeue(tqp); 3372 if (skb2) { 3373 length += (skb2->len - TX_OVERHEAD); 3374 memcpy(skb->data + pos, skb2->data, skb2->len); 3375 pos += roundup(skb2->len, sizeof(u32)); 3376 dev_kfree_skb(skb2); 3377 } 3378 } 3379 3380 gso_skb: 3381 urb = usb_alloc_urb(0, GFP_ATOMIC); 3382 if (!urb) 3383 goto drop; 3384 3385 entry = (struct skb_data *)skb->cb; 3386 entry->urb = urb; 3387 entry->dev = dev; 3388 entry->length = length; 3389 entry->num_of_packet = count; 3390 3391 spin_lock_irqsave(&dev->txq.lock, flags); 3392 ret = usb_autopm_get_interface_async(dev->intf); 3393 if (ret < 0) { 3394 spin_unlock_irqrestore(&dev->txq.lock, flags); 3395 goto drop; 3396 } 3397 3398 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out, 3399 skb->data, skb->len, tx_complete, skb); 3400 3401 if (length % dev->maxpacket == 0) { 3402 /* send USB_ZERO_PACKET */ 3403 urb->transfer_flags |= URB_ZERO_PACKET; 3404 } 3405 3406 #ifdef CONFIG_PM 3407 /* if this triggers the device is still a sleep */ 3408 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 3409 /* transmission will be done in resume */ 3410 usb_anchor_urb(urb, &dev->deferred); 3411 /* no use to process more packets */ 3412 netif_stop_queue(dev->net); 3413 usb_put_urb(urb); 3414 spin_unlock_irqrestore(&dev->txq.lock, flags); 3415 netdev_dbg(dev->net, "Delaying transmission for resumption\n"); 3416 return; 3417 } 3418 #endif 3419 3420 ret = usb_submit_urb(urb, GFP_ATOMIC); 3421 switch (ret) { 3422 case 0: 3423 netif_trans_update(dev->net); 3424 lan78xx_queue_skb(&dev->txq, skb, tx_start); 3425 if (skb_queue_len(&dev->txq) >= dev->tx_qlen) 3426 netif_stop_queue(dev->net); 3427 break; 3428 case -EPIPE: 3429 netif_stop_queue(dev->net); 3430 lan78xx_defer_kevent(dev, EVENT_TX_HALT); 3431 usb_autopm_put_interface_async(dev->intf); 3432 break; 3433 default: 3434 usb_autopm_put_interface_async(dev->intf); 3435 netif_dbg(dev, tx_err, dev->net, 3436 "tx: submit urb err %d\n", ret); 3437 break; 3438 } 3439 3440 spin_unlock_irqrestore(&dev->txq.lock, flags); 3441 3442 if (ret) { 3443 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret); 3444 drop: 3445 dev->net->stats.tx_dropped++; 3446 if (skb) 3447 dev_kfree_skb_any(skb); 3448 usb_free_urb(urb); 3449 } else 3450 netif_dbg(dev, tx_queued, dev->net, 3451 "> tx, len %d, type 0x%x\n", length, skb->protocol); 3452 } 3453 3454 static void lan78xx_rx_bh(struct lan78xx_net *dev) 3455 { 3456 struct urb *urb; 3457 int i; 3458 3459 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) { 3460 for (i = 0; i < 10; i++) { 3461 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen) 3462 break; 3463 urb = usb_alloc_urb(0, GFP_ATOMIC); 3464 if (urb) 3465 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK) 3466 return; 3467 } 3468 3469 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) 3470 tasklet_schedule(&dev->bh); 3471 } 3472 if (skb_queue_len(&dev->txq) < dev->tx_qlen) 3473 netif_wake_queue(dev->net); 3474 } 3475 3476 static void lan78xx_bh(unsigned long param) 3477 { 3478 struct lan78xx_net *dev = (struct lan78xx_net *)param; 3479 struct sk_buff *skb; 3480 struct skb_data *entry; 3481 3482 while ((skb = skb_dequeue(&dev->done))) { 3483 entry = (struct skb_data *)(skb->cb); 3484 switch (entry->state) { 3485 case rx_done: 3486 entry->state = rx_cleanup; 3487 rx_process(dev, skb); 3488 continue; 3489 case tx_done: 3490 usb_free_urb(entry->urb); 3491 dev_kfree_skb(skb); 3492 continue; 3493 case rx_cleanup: 3494 usb_free_urb(entry->urb); 3495 dev_kfree_skb(skb); 3496 continue; 3497 default: 3498 netdev_dbg(dev->net, "skb state %d\n", entry->state); 3499 return; 3500 } 3501 } 3502 3503 if (netif_device_present(dev->net) && netif_running(dev->net)) { 3504 /* reset update timer delta */ 3505 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) { 3506 dev->delta = 1; 3507 mod_timer(&dev->stat_monitor, 3508 jiffies + STAT_UPDATE_TIMER); 3509 } 3510 3511 if (!skb_queue_empty(&dev->txq_pend)) 3512 lan78xx_tx_bh(dev); 3513 3514 if (!timer_pending(&dev->delay) && 3515 !test_bit(EVENT_RX_HALT, &dev->flags)) 3516 lan78xx_rx_bh(dev); 3517 } 3518 } 3519 3520 static void lan78xx_delayedwork(struct work_struct *work) 3521 { 3522 int status; 3523 struct lan78xx_net *dev; 3524 3525 dev = container_of(work, struct lan78xx_net, wq.work); 3526 3527 if (test_bit(EVENT_TX_HALT, &dev->flags)) { 3528 unlink_urbs(dev, &dev->txq); 3529 status = usb_autopm_get_interface(dev->intf); 3530 if (status < 0) 3531 goto fail_pipe; 3532 status = usb_clear_halt(dev->udev, dev->pipe_out); 3533 usb_autopm_put_interface(dev->intf); 3534 if (status < 0 && 3535 status != -EPIPE && 3536 status != -ESHUTDOWN) { 3537 if (netif_msg_tx_err(dev)) 3538 fail_pipe: 3539 netdev_err(dev->net, 3540 "can't clear tx halt, status %d\n", 3541 status); 3542 } else { 3543 clear_bit(EVENT_TX_HALT, &dev->flags); 3544 if (status != -ESHUTDOWN) 3545 netif_wake_queue(dev->net); 3546 } 3547 } 3548 if (test_bit(EVENT_RX_HALT, &dev->flags)) { 3549 unlink_urbs(dev, &dev->rxq); 3550 status = usb_autopm_get_interface(dev->intf); 3551 if (status < 0) 3552 goto fail_halt; 3553 status = usb_clear_halt(dev->udev, dev->pipe_in); 3554 usb_autopm_put_interface(dev->intf); 3555 if (status < 0 && 3556 status != -EPIPE && 3557 status != -ESHUTDOWN) { 3558 if (netif_msg_rx_err(dev)) 3559 fail_halt: 3560 netdev_err(dev->net, 3561 "can't clear rx halt, status %d\n", 3562 status); 3563 } else { 3564 clear_bit(EVENT_RX_HALT, &dev->flags); 3565 tasklet_schedule(&dev->bh); 3566 } 3567 } 3568 3569 if (test_bit(EVENT_LINK_RESET, &dev->flags)) { 3570 int ret = 0; 3571 3572 clear_bit(EVENT_LINK_RESET, &dev->flags); 3573 status = usb_autopm_get_interface(dev->intf); 3574 if (status < 0) 3575 goto skip_reset; 3576 if (lan78xx_link_reset(dev) < 0) { 3577 usb_autopm_put_interface(dev->intf); 3578 skip_reset: 3579 netdev_info(dev->net, "link reset failed (%d)\n", 3580 ret); 3581 } else { 3582 usb_autopm_put_interface(dev->intf); 3583 } 3584 } 3585 3586 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) { 3587 lan78xx_update_stats(dev); 3588 3589 clear_bit(EVENT_STAT_UPDATE, &dev->flags); 3590 3591 mod_timer(&dev->stat_monitor, 3592 jiffies + (STAT_UPDATE_TIMER * dev->delta)); 3593 3594 dev->delta = min((dev->delta * 2), 50); 3595 } 3596 } 3597 3598 static void intr_complete(struct urb *urb) 3599 { 3600 struct lan78xx_net *dev = urb->context; 3601 int status = urb->status; 3602 3603 switch (status) { 3604 /* success */ 3605 case 0: 3606 lan78xx_status(dev, urb); 3607 break; 3608 3609 /* software-driven interface shutdown */ 3610 case -ENOENT: /* urb killed */ 3611 case -ESHUTDOWN: /* hardware gone */ 3612 netif_dbg(dev, ifdown, dev->net, 3613 "intr shutdown, code %d\n", status); 3614 return; 3615 3616 /* NOTE: not throttling like RX/TX, since this endpoint 3617 * already polls infrequently 3618 */ 3619 default: 3620 netdev_dbg(dev->net, "intr status %d\n", status); 3621 break; 3622 } 3623 3624 if (!netif_running(dev->net)) 3625 return; 3626 3627 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); 3628 status = usb_submit_urb(urb, GFP_ATOMIC); 3629 if (status != 0) 3630 netif_err(dev, timer, dev->net, 3631 "intr resubmit --> %d\n", status); 3632 } 3633 3634 static void lan78xx_disconnect(struct usb_interface *intf) 3635 { 3636 struct lan78xx_net *dev; 3637 struct usb_device *udev; 3638 struct net_device *net; 3639 struct phy_device *phydev; 3640 3641 dev = usb_get_intfdata(intf); 3642 usb_set_intfdata(intf, NULL); 3643 if (!dev) 3644 return; 3645 3646 udev = interface_to_usbdev(intf); 3647 net = dev->net; 3648 phydev = net->phydev; 3649 3650 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); 3651 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); 3652 3653 phy_disconnect(net->phydev); 3654 3655 if (phy_is_pseudo_fixed_link(phydev)) 3656 fixed_phy_unregister(phydev); 3657 3658 unregister_netdev(net); 3659 3660 cancel_delayed_work_sync(&dev->wq); 3661 3662 usb_scuttle_anchored_urbs(&dev->deferred); 3663 3664 lan78xx_unbind(dev, intf); 3665 3666 usb_kill_urb(dev->urb_intr); 3667 usb_free_urb(dev->urb_intr); 3668 3669 free_netdev(net); 3670 usb_put_dev(udev); 3671 } 3672 3673 static void lan78xx_tx_timeout(struct net_device *net) 3674 { 3675 struct lan78xx_net *dev = netdev_priv(net); 3676 3677 unlink_urbs(dev, &dev->txq); 3678 tasklet_schedule(&dev->bh); 3679 } 3680 3681 static const struct net_device_ops lan78xx_netdev_ops = { 3682 .ndo_open = lan78xx_open, 3683 .ndo_stop = lan78xx_stop, 3684 .ndo_start_xmit = lan78xx_start_xmit, 3685 .ndo_tx_timeout = lan78xx_tx_timeout, 3686 .ndo_change_mtu = lan78xx_change_mtu, 3687 .ndo_set_mac_address = lan78xx_set_mac_addr, 3688 .ndo_validate_addr = eth_validate_addr, 3689 .ndo_do_ioctl = lan78xx_ioctl, 3690 .ndo_set_rx_mode = lan78xx_set_multicast, 3691 .ndo_set_features = lan78xx_set_features, 3692 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, 3693 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, 3694 }; 3695 3696 static void lan78xx_stat_monitor(struct timer_list *t) 3697 { 3698 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor); 3699 3700 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE); 3701 } 3702 3703 static int lan78xx_probe(struct usb_interface *intf, 3704 const struct usb_device_id *id) 3705 { 3706 struct lan78xx_net *dev; 3707 struct net_device *netdev; 3708 struct usb_device *udev; 3709 int ret; 3710 unsigned maxp; 3711 unsigned period; 3712 u8 *buf = NULL; 3713 3714 udev = interface_to_usbdev(intf); 3715 udev = usb_get_dev(udev); 3716 3717 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3718 if (!netdev) { 3719 dev_err(&intf->dev, "Error: OOM\n"); 3720 ret = -ENOMEM; 3721 goto out1; 3722 } 3723 3724 /* netdev_printk() needs this */ 3725 SET_NETDEV_DEV(netdev, &intf->dev); 3726 3727 dev = netdev_priv(netdev); 3728 dev->udev = udev; 3729 dev->intf = intf; 3730 dev->net = netdev; 3731 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV 3732 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 3733 3734 skb_queue_head_init(&dev->rxq); 3735 skb_queue_head_init(&dev->txq); 3736 skb_queue_head_init(&dev->done); 3737 skb_queue_head_init(&dev->rxq_pause); 3738 skb_queue_head_init(&dev->txq_pend); 3739 mutex_init(&dev->phy_mutex); 3740 3741 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev); 3742 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); 3743 init_usb_anchor(&dev->deferred); 3744 3745 netdev->netdev_ops = &lan78xx_netdev_ops; 3746 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES; 3747 netdev->ethtool_ops = &lan78xx_ethtool_ops; 3748 3749 dev->delta = 1; 3750 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0); 3751 3752 mutex_init(&dev->stats.access_lock); 3753 3754 ret = lan78xx_bind(dev, intf); 3755 if (ret < 0) 3756 goto out2; 3757 3758 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) 3759 netdev->mtu = dev->hard_mtu - netdev->hard_header_len; 3760 3761 /* MTU range: 68 - 9000 */ 3762 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; 3763 3764 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; 3765 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; 3766 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; 3767 3768 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); 3769 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); 3770 3771 dev->pipe_intr = usb_rcvintpipe(dev->udev, 3772 dev->ep_intr->desc.bEndpointAddress & 3773 USB_ENDPOINT_NUMBER_MASK); 3774 period = dev->ep_intr->desc.bInterval; 3775 3776 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); 3777 buf = kmalloc(maxp, GFP_KERNEL); 3778 if (buf) { 3779 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); 3780 if (!dev->urb_intr) { 3781 ret = -ENOMEM; 3782 kfree(buf); 3783 goto out3; 3784 } else { 3785 usb_fill_int_urb(dev->urb_intr, dev->udev, 3786 dev->pipe_intr, buf, maxp, 3787 intr_complete, dev, period); 3788 } 3789 } 3790 3791 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1); 3792 3793 /* driver requires remote-wakeup capability during autosuspend. */ 3794 intf->needs_remote_wakeup = 1; 3795 3796 ret = register_netdev(netdev); 3797 if (ret != 0) { 3798 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3799 goto out3; 3800 } 3801 3802 usb_set_intfdata(intf, dev); 3803 3804 ret = device_set_wakeup_enable(&udev->dev, true); 3805 3806 /* Default delay of 2sec has more overhead than advantage. 3807 * Set to 10sec as default. 3808 */ 3809 pm_runtime_set_autosuspend_delay(&udev->dev, 3810 DEFAULT_AUTOSUSPEND_DELAY); 3811 3812 ret = lan78xx_phy_init(dev); 3813 if (ret < 0) 3814 goto out4; 3815 3816 return 0; 3817 3818 out4: 3819 unregister_netdev(netdev); 3820 out3: 3821 lan78xx_unbind(dev, intf); 3822 out2: 3823 free_netdev(netdev); 3824 out1: 3825 usb_put_dev(udev); 3826 3827 return ret; 3828 } 3829 3830 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) 3831 { 3832 const u16 crc16poly = 0x8005; 3833 int i; 3834 u16 bit, crc, msb; 3835 u8 data; 3836 3837 crc = 0xFFFF; 3838 for (i = 0; i < len; i++) { 3839 data = *buf++; 3840 for (bit = 0; bit < 8; bit++) { 3841 msb = crc >> 15; 3842 crc <<= 1; 3843 3844 if (msb ^ (u16)(data & 1)) { 3845 crc ^= crc16poly; 3846 crc |= (u16)0x0001U; 3847 } 3848 data >>= 1; 3849 } 3850 } 3851 3852 return crc; 3853 } 3854 3855 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) 3856 { 3857 u32 buf; 3858 int ret; 3859 int mask_index; 3860 u16 crc; 3861 u32 temp_wucsr; 3862 u32 temp_pmt_ctl; 3863 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; 3864 const u8 ipv6_multicast[3] = { 0x33, 0x33 }; 3865 const u8 arp_type[2] = { 0x08, 0x06 }; 3866 3867 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 3868 buf &= ~MAC_TX_TXEN_; 3869 ret = lan78xx_write_reg(dev, MAC_TX, buf); 3870 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 3871 buf &= ~MAC_RX_RXEN_; 3872 ret = lan78xx_write_reg(dev, MAC_RX, buf); 3873 3874 ret = lan78xx_write_reg(dev, WUCSR, 0); 3875 ret = lan78xx_write_reg(dev, WUCSR2, 0); 3876 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); 3877 3878 temp_wucsr = 0; 3879 3880 temp_pmt_ctl = 0; 3881 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); 3882 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; 3883 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; 3884 3885 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) 3886 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); 3887 3888 mask_index = 0; 3889 if (wol & WAKE_PHY) { 3890 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_; 3891 3892 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3893 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3894 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; 3895 } 3896 if (wol & WAKE_MAGIC) { 3897 temp_wucsr |= WUCSR_MPEN_; 3898 3899 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3900 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3901 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_; 3902 } 3903 if (wol & WAKE_BCAST) { 3904 temp_wucsr |= WUCSR_BCST_EN_; 3905 3906 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3907 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3908 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; 3909 } 3910 if (wol & WAKE_MCAST) { 3911 temp_wucsr |= WUCSR_WAKE_EN_; 3912 3913 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ 3914 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); 3915 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 3916 WUF_CFGX_EN_ | 3917 WUF_CFGX_TYPE_MCAST_ | 3918 (0 << WUF_CFGX_OFFSET_SHIFT_) | 3919 (crc & WUF_CFGX_CRC16_MASK_)); 3920 3921 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); 3922 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); 3923 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); 3924 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); 3925 mask_index++; 3926 3927 /* for IPv6 Multicast */ 3928 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); 3929 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 3930 WUF_CFGX_EN_ | 3931 WUF_CFGX_TYPE_MCAST_ | 3932 (0 << WUF_CFGX_OFFSET_SHIFT_) | 3933 (crc & WUF_CFGX_CRC16_MASK_)); 3934 3935 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); 3936 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); 3937 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); 3938 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); 3939 mask_index++; 3940 3941 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3942 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3943 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; 3944 } 3945 if (wol & WAKE_UCAST) { 3946 temp_wucsr |= WUCSR_PFDA_EN_; 3947 3948 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3949 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3950 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; 3951 } 3952 if (wol & WAKE_ARP) { 3953 temp_wucsr |= WUCSR_WAKE_EN_; 3954 3955 /* set WUF_CFG & WUF_MASK 3956 * for packettype (offset 12,13) = ARP (0x0806) 3957 */ 3958 crc = lan78xx_wakeframe_crc16(arp_type, 2); 3959 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 3960 WUF_CFGX_EN_ | 3961 WUF_CFGX_TYPE_ALL_ | 3962 (0 << WUF_CFGX_OFFSET_SHIFT_) | 3963 (crc & WUF_CFGX_CRC16_MASK_)); 3964 3965 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); 3966 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); 3967 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); 3968 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); 3969 mask_index++; 3970 3971 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3972 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3973 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; 3974 } 3975 3976 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); 3977 3978 /* when multiple WOL bits are set */ 3979 if (hweight_long((unsigned long)wol) > 1) { 3980 temp_pmt_ctl |= PMT_CTL_WOL_EN_; 3981 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; 3982 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; 3983 } 3984 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); 3985 3986 /* clear WUPS */ 3987 ret = lan78xx_read_reg(dev, PMT_CTL, &buf); 3988 buf |= PMT_CTL_WUPS_MASK_; 3989 ret = lan78xx_write_reg(dev, PMT_CTL, buf); 3990 3991 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 3992 buf |= MAC_RX_RXEN_; 3993 ret = lan78xx_write_reg(dev, MAC_RX, buf); 3994 3995 return 0; 3996 } 3997 3998 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) 3999 { 4000 struct lan78xx_net *dev = usb_get_intfdata(intf); 4001 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); 4002 u32 buf; 4003 int ret; 4004 int event; 4005 4006 event = message.event; 4007 4008 if (!dev->suspend_count++) { 4009 spin_lock_irq(&dev->txq.lock); 4010 /* don't autosuspend while transmitting */ 4011 if ((skb_queue_len(&dev->txq) || 4012 skb_queue_len(&dev->txq_pend)) && 4013 PMSG_IS_AUTO(message)) { 4014 spin_unlock_irq(&dev->txq.lock); 4015 ret = -EBUSY; 4016 goto out; 4017 } else { 4018 set_bit(EVENT_DEV_ASLEEP, &dev->flags); 4019 spin_unlock_irq(&dev->txq.lock); 4020 } 4021 4022 /* stop TX & RX */ 4023 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 4024 buf &= ~MAC_TX_TXEN_; 4025 ret = lan78xx_write_reg(dev, MAC_TX, buf); 4026 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 4027 buf &= ~MAC_RX_RXEN_; 4028 ret = lan78xx_write_reg(dev, MAC_RX, buf); 4029 4030 /* empty out the rx and queues */ 4031 netif_device_detach(dev->net); 4032 lan78xx_terminate_urbs(dev); 4033 usb_kill_urb(dev->urb_intr); 4034 4035 /* reattach */ 4036 netif_device_attach(dev->net); 4037 } 4038 4039 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { 4040 del_timer(&dev->stat_monitor); 4041 4042 if (PMSG_IS_AUTO(message)) { 4043 /* auto suspend (selective suspend) */ 4044 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 4045 buf &= ~MAC_TX_TXEN_; 4046 ret = lan78xx_write_reg(dev, MAC_TX, buf); 4047 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 4048 buf &= ~MAC_RX_RXEN_; 4049 ret = lan78xx_write_reg(dev, MAC_RX, buf); 4050 4051 ret = lan78xx_write_reg(dev, WUCSR, 0); 4052 ret = lan78xx_write_reg(dev, WUCSR2, 0); 4053 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); 4054 4055 /* set goodframe wakeup */ 4056 ret = lan78xx_read_reg(dev, WUCSR, &buf); 4057 4058 buf |= WUCSR_RFE_WAKE_EN_; 4059 buf |= WUCSR_STORE_WAKE_; 4060 4061 ret = lan78xx_write_reg(dev, WUCSR, buf); 4062 4063 ret = lan78xx_read_reg(dev, PMT_CTL, &buf); 4064 4065 buf &= ~PMT_CTL_RES_CLR_WKP_EN_; 4066 buf |= PMT_CTL_RES_CLR_WKP_STS_; 4067 4068 buf |= PMT_CTL_PHY_WAKE_EN_; 4069 buf |= PMT_CTL_WOL_EN_; 4070 buf &= ~PMT_CTL_SUS_MODE_MASK_; 4071 buf |= PMT_CTL_SUS_MODE_3_; 4072 4073 ret = lan78xx_write_reg(dev, PMT_CTL, buf); 4074 4075 ret = lan78xx_read_reg(dev, PMT_CTL, &buf); 4076 4077 buf |= PMT_CTL_WUPS_MASK_; 4078 4079 ret = lan78xx_write_reg(dev, PMT_CTL, buf); 4080 4081 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 4082 buf |= MAC_RX_RXEN_; 4083 ret = lan78xx_write_reg(dev, MAC_RX, buf); 4084 } else { 4085 lan78xx_set_suspend(dev, pdata->wol); 4086 } 4087 } 4088 4089 ret = 0; 4090 out: 4091 return ret; 4092 } 4093 4094 static int lan78xx_resume(struct usb_interface *intf) 4095 { 4096 struct lan78xx_net *dev = usb_get_intfdata(intf); 4097 struct sk_buff *skb; 4098 struct urb *res; 4099 int ret; 4100 u32 buf; 4101 4102 if (!timer_pending(&dev->stat_monitor)) { 4103 dev->delta = 1; 4104 mod_timer(&dev->stat_monitor, 4105 jiffies + STAT_UPDATE_TIMER); 4106 } 4107 4108 if (!--dev->suspend_count) { 4109 /* resume interrupt URBs */ 4110 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags)) 4111 usb_submit_urb(dev->urb_intr, GFP_NOIO); 4112 4113 spin_lock_irq(&dev->txq.lock); 4114 while ((res = usb_get_from_anchor(&dev->deferred))) { 4115 skb = (struct sk_buff *)res->context; 4116 ret = usb_submit_urb(res, GFP_ATOMIC); 4117 if (ret < 0) { 4118 dev_kfree_skb_any(skb); 4119 usb_free_urb(res); 4120 usb_autopm_put_interface_async(dev->intf); 4121 } else { 4122 netif_trans_update(dev->net); 4123 lan78xx_queue_skb(&dev->txq, skb, tx_start); 4124 } 4125 } 4126 4127 clear_bit(EVENT_DEV_ASLEEP, &dev->flags); 4128 spin_unlock_irq(&dev->txq.lock); 4129 4130 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 4131 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen)) 4132 netif_start_queue(dev->net); 4133 tasklet_schedule(&dev->bh); 4134 } 4135 } 4136 4137 ret = lan78xx_write_reg(dev, WUCSR2, 0); 4138 ret = lan78xx_write_reg(dev, WUCSR, 0); 4139 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); 4140 4141 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ | 4142 WUCSR2_ARP_RCD_ | 4143 WUCSR2_IPV6_TCPSYN_RCD_ | 4144 WUCSR2_IPV4_TCPSYN_RCD_); 4145 4146 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ | 4147 WUCSR_EEE_RX_WAKE_ | 4148 WUCSR_PFDA_FR_ | 4149 WUCSR_RFE_WAKE_FR_ | 4150 WUCSR_WUFR_ | 4151 WUCSR_MPR_ | 4152 WUCSR_BCST_FR_); 4153 4154 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 4155 buf |= MAC_TX_TXEN_; 4156 ret = lan78xx_write_reg(dev, MAC_TX, buf); 4157 4158 return 0; 4159 } 4160 4161 static int lan78xx_reset_resume(struct usb_interface *intf) 4162 { 4163 struct lan78xx_net *dev = usb_get_intfdata(intf); 4164 4165 lan78xx_reset(dev); 4166 4167 phy_start(dev->net->phydev); 4168 4169 return lan78xx_resume(intf); 4170 } 4171 4172 static const struct usb_device_id products[] = { 4173 { 4174 /* LAN7800 USB Gigabit Ethernet Device */ 4175 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID), 4176 }, 4177 { 4178 /* LAN7850 USB Gigabit Ethernet Device */ 4179 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID), 4180 }, 4181 { 4182 /* LAN7801 USB Gigabit Ethernet Device */ 4183 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID), 4184 }, 4185 {}, 4186 }; 4187 MODULE_DEVICE_TABLE(usb, products); 4188 4189 static struct usb_driver lan78xx_driver = { 4190 .name = DRIVER_NAME, 4191 .id_table = products, 4192 .probe = lan78xx_probe, 4193 .disconnect = lan78xx_disconnect, 4194 .suspend = lan78xx_suspend, 4195 .resume = lan78xx_resume, 4196 .reset_resume = lan78xx_reset_resume, 4197 .supports_autosuspend = 1, 4198 .disable_hub_initiated_lpm = 1, 4199 }; 4200 4201 module_usb_driver(lan78xx_driver); 4202 4203 MODULE_AUTHOR(DRIVER_AUTHOR); 4204 MODULE_DESCRIPTION(DRIVER_DESC); 4205 MODULE_LICENSE("GPL"); 4206