Lines Matching full:netdev
63 return priv->netdev->dev.parent; in owl_emac_get_dev()
161 static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev) in owl_emac_alloc_skb() argument
166 skb = netdev_alloc_skb(netdev, OWL_EMAC_RX_FRAME_MAX_LEN + in owl_emac_alloc_skb()
183 struct net_device *netdev = priv->netdev; in owl_emac_ring_prepare_rx() local
190 skb = owl_emac_alloc_skb(netdev); in owl_emac_ring_prepare_rx()
342 static void owl_emac_set_hw_mac_addr(struct net_device *netdev) in owl_emac_set_hw_mac_addr() argument
344 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_set_hw_mac_addr()
345 const u8 *mac_addr = netdev->dev_addr; in owl_emac_set_hw_mac_addr()
394 static void owl_emac_adjust_link(struct net_device *netdev) in owl_emac_adjust_link() argument
396 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_adjust_link()
397 struct phy_device *phydev = netdev->phydev; in owl_emac_adjust_link()
438 struct net_device *netdev = data; in owl_emac_handle_irq() local
439 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_handle_irq()
441 if (netif_running(netdev)) { in owl_emac_handle_irq()
465 const u8 *mac_addr = priv->netdev->dev_addr; in owl_emac_setup_frame_prepare()
493 struct net_device *netdev = priv->netdev; in owl_emac_setup_frame_xmit() local
501 skb = owl_emac_alloc_skb(netdev); in owl_emac_setup_frame_xmit()
554 netif_stop_queue(netdev); in owl_emac_setup_frame_xmit()
566 struct net_device *netdev) in owl_emac_ndo_start_xmit() argument
568 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_ndo_start_xmit()
578 dev_err_ratelimited(&netdev->dev, "TX DMA mapping failed\n"); in owl_emac_ndo_start_xmit()
580 netdev->stats.tx_dropped++; in owl_emac_ndo_start_xmit()
595 netif_stop_queue(netdev); in owl_emac_ndo_start_xmit()
598 dev_dbg_ratelimited(&netdev->dev, "TX buffer full, status=0x%08x\n", in owl_emac_ndo_start_xmit()
601 netdev->stats.tx_dropped++; in owl_emac_ndo_start_xmit()
624 netif_stop_queue(netdev); in owl_emac_ndo_start_xmit()
634 struct net_device *netdev = priv->netdev; in owl_emac_tx_complete_tail() local
651 dev_dbg_ratelimited(&netdev->dev, in owl_emac_tx_complete_tail()
655 netdev->stats.tx_errors++; in owl_emac_tx_complete_tail()
658 netdev->stats.tx_fifo_errors++; in owl_emac_tx_complete_tail()
661 netdev->stats.tx_aborted_errors++; in owl_emac_tx_complete_tail()
664 netdev->stats.tx_window_errors++; in owl_emac_tx_complete_tail()
667 netdev->stats.tx_heartbeat_errors++; in owl_emac_tx_complete_tail()
670 netdev->stats.tx_carrier_errors++; in owl_emac_tx_complete_tail()
672 netdev->stats.tx_packets++; in owl_emac_tx_complete_tail()
673 netdev->stats.tx_bytes += ring->skbs[tx_tail]->len; in owl_emac_tx_complete_tail()
678 netdev->stats.collisions++; in owl_emac_tx_complete_tail()
689 if (unlikely(netif_queue_stopped(netdev))) in owl_emac_tx_complete_tail()
690 netif_wake_queue(netdev); in owl_emac_tx_complete_tail()
698 struct net_device *netdev = priv->netdev; in owl_emac_tx_complete() local
731 netdev_dbg(netdev, "Found uncleared TX desc OWN bit\n"); in owl_emac_tx_complete()
750 struct net_device *netdev = priv->netdev; in owl_emac_rx_process() local
782 dev_dbg_ratelimited(&netdev->dev, in owl_emac_rx_process()
787 netdev->stats.rx_over_errors++; in owl_emac_rx_process()
790 netdev->stats.rx_frame_errors++; in owl_emac_rx_process()
793 netdev->stats.rx_length_errors++; in owl_emac_rx_process()
796 netdev->stats.collisions++; in owl_emac_rx_process()
799 netdev->stats.rx_crc_errors++; in owl_emac_rx_process()
802 netdev->stats.rx_fifo_errors++; in owl_emac_rx_process()
809 netdev->stats.rx_length_errors++; in owl_emac_rx_process()
810 netdev_err(netdev, "invalid RX frame len: %u\n", len); in owl_emac_rx_process()
815 new_skb = owl_emac_alloc_skb(netdev); in owl_emac_rx_process()
822 netdev_err(netdev, "RX DMA mapping failed\n"); in owl_emac_rx_process()
830 curr_skb->protocol = eth_type_trans(curr_skb, netdev); in owl_emac_rx_process()
831 curr_skb->dev = netdev; in owl_emac_rx_process()
835 netdev->stats.rx_packets++; in owl_emac_rx_process()
836 netdev->stats.rx_bytes += len; in owl_emac_rx_process()
841 netdev->stats.rx_dropped++; in owl_emac_rx_process()
842 netdev->stats.rx_errors++; in owl_emac_rx_process()
930 netdev_dbg(priv->netdev, "%s error status: 0x%08x\n", in owl_emac_poll()
1032 static int owl_emac_enable(struct net_device *netdev, bool start_phy) in owl_emac_enable() argument
1034 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_enable()
1048 netdev_err(netdev, "failed to soft reset MAC core: %d\n", ret); in owl_emac_enable()
1052 owl_emac_set_hw_mac_addr(netdev); in owl_emac_enable()
1055 netdev_reset_queue(netdev); in owl_emac_enable()
1062 phy_start(netdev->phydev); in owl_emac_enable()
1064 netif_start_queue(netdev); in owl_emac_enable()
1075 static void owl_emac_disable(struct net_device *netdev, bool stop_phy) in owl_emac_disable() argument
1077 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_disable()
1082 netif_stop_queue(netdev); in owl_emac_disable()
1086 phy_stop(netdev->phydev); in owl_emac_disable()
1092 static int owl_emac_ndo_open(struct net_device *netdev) in owl_emac_ndo_open() argument
1094 return owl_emac_enable(netdev, true); in owl_emac_ndo_open()
1097 static int owl_emac_ndo_stop(struct net_device *netdev) in owl_emac_ndo_stop() argument
1099 owl_emac_disable(netdev, true); in owl_emac_ndo_stop()
1104 static void owl_emac_set_multicast(struct net_device *netdev, int count) in owl_emac_set_multicast() argument
1106 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_set_multicast()
1115 netdev_for_each_mc_addr(ha, netdev) { in owl_emac_set_multicast()
1128 static void owl_emac_ndo_set_rx_mode(struct net_device *netdev) in owl_emac_ndo_set_rx_mode() argument
1130 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_ndo_set_rx_mode()
1134 if (netdev->flags & IFF_PROMISC) { in owl_emac_ndo_set_rx_mode()
1136 } else if (netdev->flags & IFF_ALLMULTI) { in owl_emac_ndo_set_rx_mode()
1138 } else if (netdev->flags & IFF_MULTICAST) { in owl_emac_ndo_set_rx_mode()
1139 mcast_count = netdev_mc_count(netdev); in owl_emac_ndo_set_rx_mode()
1163 owl_emac_set_multicast(netdev, mcast_count); in owl_emac_ndo_set_rx_mode()
1166 static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) in owl_emac_ndo_set_mac_addr() argument
1173 if (netif_running(netdev)) in owl_emac_ndo_set_mac_addr()
1176 eth_hw_addr_set(netdev, skaddr->sa_data); in owl_emac_ndo_set_mac_addr()
1177 owl_emac_set_hw_mac_addr(netdev); in owl_emac_ndo_set_mac_addr()
1179 return owl_emac_setup_frame_xmit(netdev_priv(netdev)); in owl_emac_ndo_set_mac_addr()
1182 static int owl_emac_ndo_eth_ioctl(struct net_device *netdev, in owl_emac_ndo_eth_ioctl() argument
1185 if (!netif_running(netdev)) in owl_emac_ndo_eth_ioctl()
1188 return phy_mii_ioctl(netdev->phydev, req, cmd); in owl_emac_ndo_eth_ioctl()
1191 static void owl_emac_ndo_tx_timeout(struct net_device *netdev, in owl_emac_ndo_tx_timeout() argument
1194 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_ndo_tx_timeout()
1205 netdev_dbg(priv->netdev, "resetting MAC\n"); in owl_emac_reset_task()
1206 owl_emac_disable(priv->netdev, false); in owl_emac_reset_task()
1207 owl_emac_enable(priv->netdev, false); in owl_emac_reset_task()
1211 owl_emac_ndo_get_stats(struct net_device *netdev) in owl_emac_ndo_get_stats() argument
1217 return &netdev->stats; in owl_emac_ndo_get_stats()
1238 static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev) in owl_emac_ethtool_get_msglevel() argument
1240 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_ethtool_get_msglevel()
1321 static int owl_emac_mdio_init(struct net_device *netdev) in owl_emac_mdio_init() argument
1323 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_mdio_init()
1358 static int owl_emac_phy_init(struct net_device *netdev) in owl_emac_phy_init() argument
1360 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_phy_init()
1364 phy = of_phy_get_and_connect(netdev, dev->of_node, in owl_emac_phy_init()
1377 static void owl_emac_get_mac_addr(struct net_device *netdev) in owl_emac_get_mac_addr() argument
1379 struct device *dev = netdev->dev.parent; in owl_emac_get_mac_addr()
1382 ret = platform_get_ethdev_address(dev, netdev); in owl_emac_get_mac_addr()
1383 if (!ret && is_valid_ether_addr(netdev->dev_addr)) in owl_emac_get_mac_addr()
1386 eth_hw_addr_random(netdev); in owl_emac_get_mac_addr()
1387 dev_warn(dev, "using random MAC address %pM\n", netdev->dev_addr); in owl_emac_get_mac_addr()
1392 struct net_device *netdev = dev_get_drvdata(dev); in owl_emac_suspend() local
1393 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_suspend()
1395 disable_irq(netdev->irq); in owl_emac_suspend()
1397 if (netif_running(netdev)) { in owl_emac_suspend()
1398 owl_emac_disable(netdev, true); in owl_emac_suspend()
1399 netif_device_detach(netdev); in owl_emac_suspend()
1409 struct net_device *netdev = dev_get_drvdata(dev); in owl_emac_resume() local
1410 struct owl_emac_priv *priv = netdev_priv(netdev); in owl_emac_resume()
1417 if (netif_running(netdev)) { in owl_emac_resume()
1421 ret = owl_emac_enable(netdev, true); in owl_emac_resume()
1427 netif_device_attach(netdev); in owl_emac_resume()
1430 enable_irq(netdev->irq); in owl_emac_resume()
1473 struct net_device *netdev; in owl_emac_probe() local
1477 netdev = devm_alloc_etherdev(dev, sizeof(*priv)); in owl_emac_probe()
1478 if (!netdev) in owl_emac_probe()
1481 platform_set_drvdata(pdev, netdev); in owl_emac_probe()
1482 SET_NETDEV_DEV(netdev, dev); in owl_emac_probe()
1484 priv = netdev_priv(netdev); in owl_emac_probe()
1485 priv->netdev = netdev; in owl_emac_probe()
1514 netdev->irq = platform_get_irq(pdev, 0); in owl_emac_probe()
1515 if (netdev->irq < 0) in owl_emac_probe()
1516 return netdev->irq; in owl_emac_probe()
1518 ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq, in owl_emac_probe()
1519 IRQF_SHARED, netdev->name, netdev); in owl_emac_probe()
1521 dev_err(dev, "failed to request irq: %d\n", netdev->irq); in owl_emac_probe()
1549 owl_emac_get_mac_addr(netdev); in owl_emac_probe()
1554 ret = owl_emac_mdio_init(netdev); in owl_emac_probe()
1560 ret = owl_emac_phy_init(netdev); in owl_emac_probe()
1568 netdev->min_mtu = OWL_EMAC_MTU_MIN; in owl_emac_probe()
1569 netdev->max_mtu = OWL_EMAC_MTU_MAX; in owl_emac_probe()
1570 netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT; in owl_emac_probe()
1571 netdev->netdev_ops = &owl_emac_netdev_ops; in owl_emac_probe()
1572 netdev->ethtool_ops = &owl_emac_ethtool_ops; in owl_emac_probe()
1573 netif_napi_add(netdev, &priv->napi, owl_emac_poll); in owl_emac_probe()
1575 ret = devm_register_netdev(dev, netdev); in owl_emac_probe()
1578 phy_disconnect(netdev->phydev); in owl_emac_probe()
1590 phy_disconnect(priv->netdev->phydev); in owl_emac_remove()