Lines Matching +full:axistream +full:- +full:connected

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
60 { .compatible = "xlnx,axi-ethernet-1.00.a", },
61 { .compatible = "xlnx,axi-ethernet-1.01.a", },
62 { .compatible = "xlnx,axi-ethernet-2.01.a", },
124 * axienet_dma_in32 - Memory mapped Axi DMA register read
134 return ioread32(lp->dma_regs + reg); in axienet_dma_in32()
140 desc->phys = lower_32_bits(addr); in desc_set_phys_addr()
141 if (lp->features & XAE_FEATURE_DMA_64BIT) in desc_set_phys_addr()
142 desc->phys_msb = upper_32_bits(addr); in desc_set_phys_addr()
148 dma_addr_t ret = desc->phys; in desc_get_phys_addr()
150 if (lp->features & XAE_FEATURE_DMA_64BIT) in desc_get_phys_addr()
151 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; in desc_get_phys_addr()
157 * axienet_dma_bd_release - Release buffer descriptor rings
170 dma_free_coherent(lp->dev, in axienet_dma_bd_release()
171 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, in axienet_dma_bd_release()
172 lp->tx_bd_v, in axienet_dma_bd_release()
173 lp->tx_bd_p); in axienet_dma_bd_release()
175 if (!lp->rx_bd_v) in axienet_dma_bd_release()
178 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_bd_release()
184 if (!lp->rx_bd_v[i].skb) in axienet_dma_bd_release()
187 dev_kfree_skb(lp->rx_bd_v[i].skb); in axienet_dma_bd_release()
189 /* For each descriptor, we programmed cntrl with the (non-zero) in axienet_dma_bd_release()
191 * So a non-zero value in there means we need to unmap it. in axienet_dma_bd_release()
193 if (lp->rx_bd_v[i].cntrl) { in axienet_dma_bd_release()
194 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); in axienet_dma_bd_release()
195 dma_unmap_single(lp->dev, phys, in axienet_dma_bd_release()
196 lp->max_frm_size, DMA_FROM_DEVICE); in axienet_dma_bd_release()
200 dma_free_coherent(lp->dev, in axienet_dma_bd_release()
201 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, in axienet_dma_bd_release()
202 lp->rx_bd_v, in axienet_dma_bd_release()
203 lp->rx_bd_p); in axienet_dma_bd_release()
207 * axienet_usec_to_timer - Calculate IRQ delay timer value
216 if (lp->axi_clk) in axienet_usec_to_timer()
217 clk_rate = clk_get_rate(lp->axi_clk); in axienet_usec_to_timer()
229 * axienet_dma_start - Set up DMA registers and start DMA operation
235 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | in axienet_dma_start()
240 if (lp->coalesce_count_rx > 1) in axienet_dma_start()
241 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) in axienet_dma_start()
244 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_dma_start()
247 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | in axienet_dma_start()
252 if (lp->coalesce_count_tx > 1) in axienet_dma_start()
253 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) in axienet_dma_start()
256 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_dma_start()
261 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); in axienet_dma_start()
262 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
263 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_dma_start()
264 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + in axienet_dma_start()
265 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); in axienet_dma_start()
267 /* Write to the RS (Run-stop) bit in the Tx channel control register. in axienet_dma_start()
271 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); in axienet_dma_start()
272 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
273 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_dma_start()
277 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
280 * Return: 0, on success -ENOMEM, on failure
293 lp->tx_bd_ci = 0; in axienet_dma_bd_init()
294 lp->tx_bd_tail = 0; in axienet_dma_bd_init()
295 lp->rx_bd_ci = 0; in axienet_dma_bd_init()
298 lp->tx_bd_v = dma_alloc_coherent(lp->dev, in axienet_dma_bd_init()
299 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, in axienet_dma_bd_init()
300 &lp->tx_bd_p, GFP_KERNEL); in axienet_dma_bd_init()
301 if (!lp->tx_bd_v) in axienet_dma_bd_init()
302 return -ENOMEM; in axienet_dma_bd_init()
304 lp->rx_bd_v = dma_alloc_coherent(lp->dev, in axienet_dma_bd_init()
305 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, in axienet_dma_bd_init()
306 &lp->rx_bd_p, GFP_KERNEL); in axienet_dma_bd_init()
307 if (!lp->rx_bd_v) in axienet_dma_bd_init()
310 for (i = 0; i < lp->tx_bd_num; i++) { in axienet_dma_bd_init()
311 dma_addr_t addr = lp->tx_bd_p + in axienet_dma_bd_init()
312 sizeof(*lp->tx_bd_v) * in axienet_dma_bd_init()
313 ((i + 1) % lp->tx_bd_num); in axienet_dma_bd_init()
315 lp->tx_bd_v[i].next = lower_32_bits(addr); in axienet_dma_bd_init()
316 if (lp->features & XAE_FEATURE_DMA_64BIT) in axienet_dma_bd_init()
317 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); in axienet_dma_bd_init()
320 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_bd_init()
323 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * in axienet_dma_bd_init()
324 ((i + 1) % lp->rx_bd_num); in axienet_dma_bd_init()
325 lp->rx_bd_v[i].next = lower_32_bits(addr); in axienet_dma_bd_init()
326 if (lp->features & XAE_FEATURE_DMA_64BIT) in axienet_dma_bd_init()
327 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); in axienet_dma_bd_init()
329 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); in axienet_dma_bd_init()
333 lp->rx_bd_v[i].skb = skb; in axienet_dma_bd_init()
334 addr = dma_map_single(lp->dev, skb->data, in axienet_dma_bd_init()
335 lp->max_frm_size, DMA_FROM_DEVICE); in axienet_dma_bd_init()
336 if (dma_mapping_error(lp->dev, addr)) { in axienet_dma_bd_init()
340 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); in axienet_dma_bd_init()
342 lp->rx_bd_v[i].cntrl = lp->max_frm_size; in axienet_dma_bd_init()
350 return -ENOMEM; in axienet_dma_bd_init()
354 * axienet_set_mac_address - Write the MAC address
368 if (!is_valid_ether_addr(ndev->dev_addr)) in axienet_set_mac_address()
373 (ndev->dev_addr[0]) | in axienet_set_mac_address()
374 (ndev->dev_addr[1] << 8) | in axienet_set_mac_address()
375 (ndev->dev_addr[2] << 16) | in axienet_set_mac_address()
376 (ndev->dev_addr[3] << 24)); in axienet_set_mac_address()
380 (ndev->dev_addr[4] | in axienet_set_mac_address()
381 (ndev->dev_addr[5] << 8)))); in axienet_set_mac_address()
385 * netdev_set_mac_address - Write the MAC address (from outside the driver)
398 axienet_set_mac_address(ndev, addr->sa_data); in netdev_set_mac_address()
403 * axienet_set_multicast_list - Prepare the multicast table
407 * initialization. The Axi Ethernet basic multicast support has a four-entry
419 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || in axienet_set_multicast_list()
425 ndev->flags |= IFF_PROMISC; in axienet_set_multicast_list()
429 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); in axienet_set_multicast_list()
441 af0reg = (ha->addr[0]); in axienet_set_multicast_list()
442 af0reg |= (ha->addr[1] << 8); in axienet_set_multicast_list()
443 af0reg |= (ha->addr[2] << 16); in axienet_set_multicast_list()
444 af0reg |= (ha->addr[3] << 24); in axienet_set_multicast_list()
446 af1reg = (ha->addr[4]); in axienet_set_multicast_list()
447 af1reg |= (ha->addr[5] << 8); in axienet_set_multicast_list()
463 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); in axienet_set_multicast_list()
475 * axienet_setoptions - Set an Axi Ethernet option
491 while (tp->opt) { in axienet_setoptions()
492 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); in axienet_setoptions()
493 if (options & tp->opt) in axienet_setoptions()
494 reg |= tp->m_or; in axienet_setoptions()
495 axienet_iow(lp, tp->reg, reg); in axienet_setoptions()
499 lp->options |= options; in axienet_setoptions()
520 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); in __axienet_device_reset()
530 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); in __axienet_device_reset()
538 * axienet_dma_stop - Stop DMA operation
549 synchronize_irq(lp->rx_irq); in axienet_dma_stop()
554 synchronize_irq(lp->tx_irq); in axienet_dma_stop()
576 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
582 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
597 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; in axienet_device_reset()
598 lp->options |= XAE_OPTION_VLAN; in axienet_device_reset()
599 lp->options &= (~XAE_OPTION_JUMBO); in axienet_device_reset()
601 if ((ndev->mtu > XAE_MTU) && in axienet_device_reset()
602 (ndev->mtu <= XAE_JUMBO_MTU)) { in axienet_device_reset()
603 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + in axienet_device_reset()
606 if (lp->max_frm_size <= lp->rxmem) in axienet_device_reset()
607 lp->options |= XAE_OPTION_JUMBO; in axienet_device_reset()
624 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? in axienet_device_reset()
632 axienet_setoptions(ndev, lp->options & in axienet_device_reset()
636 axienet_setoptions(ndev, lp->options); in axienet_device_reset()
644 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
650 * in all cleaned-up descriptors. Ignored if NULL.
666 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; in axienet_free_tx_chain()
667 status = cur_p->status; in axienet_free_tx_chain()
678 dma_unmap_single(lp->dev, phys, in axienet_free_tx_chain()
679 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), in axienet_free_tx_chain()
682 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { in axienet_free_tx_chain()
683 napi_consume_skb(cur_p->skb, budget); in axienet_free_tx_chain()
687 cur_p->app0 = 0; in axienet_free_tx_chain()
688 cur_p->app1 = 0; in axienet_free_tx_chain()
689 cur_p->app2 = 0; in axienet_free_tx_chain()
690 cur_p->app4 = 0; in axienet_free_tx_chain()
691 cur_p->skb = NULL; in axienet_free_tx_chain()
694 cur_p->cntrl = 0; in axienet_free_tx_chain()
695 cur_p->status = 0; in axienet_free_tx_chain()
702 lp->tx_bd_ci += i; in axienet_free_tx_chain()
703 if (lp->tx_bd_ci >= lp->tx_bd_num) in axienet_free_tx_chain()
704 lp->tx_bd_ci %= lp->tx_bd_num; in axienet_free_tx_chain()
711 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
730 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % in axienet_check_tx_bd_space()
731 lp->tx_bd_num]; in axienet_check_tx_bd_space()
732 if (cur_p->cntrl) in axienet_check_tx_bd_space()
738 * axienet_tx_poll - Invoked once a transmit is completed by the
754 struct net_device *ndev = lp->ndev; in axienet_tx_poll()
758 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, in axienet_tx_poll()
762 u64_stats_update_begin(&lp->tx_stat_sync); in axienet_tx_poll()
763 u64_stats_add(&lp->tx_packets, packets); in axienet_tx_poll()
764 u64_stats_add(&lp->tx_bytes, size); in axienet_tx_poll()
765 u64_stats_update_end(&lp->tx_stat_sync); in axienet_tx_poll()
775 /* Re-enable TX completion interrupts. This should in axienet_tx_poll()
779 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_tx_poll()
785 * axienet_start_xmit - Starts the transmission.
810 orig_tail_ptr = lp->tx_bd_tail; in axienet_start_xmit()
813 num_frag = skb_shinfo(skb)->nr_frags; in axienet_start_xmit()
814 cur_p = &lp->tx_bd_v[orig_tail_ptr]; in axienet_start_xmit()
827 if (skb->ip_summed == CHECKSUM_PARTIAL) { in axienet_start_xmit()
828 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { in axienet_start_xmit()
830 cur_p->app0 |= 2; in axienet_start_xmit()
831 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { in axienet_start_xmit()
833 csum_index_off = csum_start_off + skb->csum_offset; in axienet_start_xmit()
835 cur_p->app0 |= 1; in axienet_start_xmit()
836 cur_p->app1 = (csum_start_off << 16) | csum_index_off; in axienet_start_xmit()
838 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in axienet_start_xmit()
839 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ in axienet_start_xmit()
842 phys = dma_map_single(lp->dev, skb->data, in axienet_start_xmit()
844 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_start_xmit()
847 ndev->stats.tx_dropped++; in axienet_start_xmit()
852 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; in axienet_start_xmit()
855 if (++new_tail_ptr >= lp->tx_bd_num) in axienet_start_xmit()
857 cur_p = &lp->tx_bd_v[new_tail_ptr]; in axienet_start_xmit()
858 frag = &skb_shinfo(skb)->frags[ii]; in axienet_start_xmit()
859 phys = dma_map_single(lp->dev, in axienet_start_xmit()
863 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_start_xmit()
866 ndev->stats.tx_dropped++; in axienet_start_xmit()
873 cur_p->cntrl = skb_frag_size(frag); in axienet_start_xmit()
876 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; in axienet_start_xmit()
877 cur_p->skb = skb; in axienet_start_xmit()
879 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; in axienet_start_xmit()
880 if (++new_tail_ptr >= lp->tx_bd_num) in axienet_start_xmit()
882 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); in axienet_start_xmit()
894 /* Space might have just been freed - check again */ in axienet_start_xmit()
903 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
920 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; in axienet_rx_poll()
922 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { in axienet_rx_poll()
928 skb = cur_p->skb; in axienet_rx_poll()
929 cur_p->skb = NULL; in axienet_rx_poll()
937 length = cur_p->app4 & 0x0000FFFF; in axienet_rx_poll()
940 dma_unmap_single(lp->dev, phys, lp->max_frm_size, in axienet_rx_poll()
944 skb->protocol = eth_type_trans(skb, lp->ndev); in axienet_rx_poll()
946 skb->ip_summed = CHECKSUM_NONE; in axienet_rx_poll()
949 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { in axienet_rx_poll()
950 csumstatus = (cur_p->app2 & in axienet_rx_poll()
954 skb->ip_summed = CHECKSUM_UNNECESSARY; in axienet_rx_poll()
956 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && in axienet_rx_poll()
957 skb->protocol == htons(ETH_P_IP) && in axienet_rx_poll()
958 skb->len > 64) { in axienet_rx_poll()
959 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); in axienet_rx_poll()
960 skb->ip_summed = CHECKSUM_COMPLETE; in axienet_rx_poll()
969 new_skb = napi_alloc_skb(napi, lp->max_frm_size); in axienet_rx_poll()
973 phys = dma_map_single(lp->dev, new_skb->data, in axienet_rx_poll()
974 lp->max_frm_size, in axienet_rx_poll()
976 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_rx_poll()
978 netdev_err(lp->ndev, "RX DMA mapping error\n"); in axienet_rx_poll()
984 cur_p->cntrl = lp->max_frm_size; in axienet_rx_poll()
985 cur_p->status = 0; in axienet_rx_poll()
986 cur_p->skb = new_skb; in axienet_rx_poll()
991 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; in axienet_rx_poll()
993 if (++lp->rx_bd_ci >= lp->rx_bd_num) in axienet_rx_poll()
994 lp->rx_bd_ci = 0; in axienet_rx_poll()
995 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; in axienet_rx_poll()
998 u64_stats_update_begin(&lp->rx_stat_sync); in axienet_rx_poll()
999 u64_stats_add(&lp->rx_packets, packets); in axienet_rx_poll()
1000 u64_stats_add(&lp->rx_bytes, size); in axienet_rx_poll()
1001 u64_stats_update_end(&lp->rx_stat_sync); in axienet_rx_poll()
1007 /* Re-enable RX completion interrupts. This should in axienet_rx_poll()
1011 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_rx_poll()
1017 * axienet_tx_irq - Tx Done Isr.
1042 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, in axienet_tx_irq()
1043 (lp->tx_bd_v[lp->tx_bd_ci]).phys); in axienet_tx_irq()
1044 schedule_work(&lp->dma_err_task); in axienet_tx_irq()
1049 u32 cr = lp->tx_dma_cr; in axienet_tx_irq()
1052 if (napi_schedule_prep(&lp->napi_tx)) { in axienet_tx_irq()
1054 __napi_schedule(&lp->napi_tx); in axienet_tx_irq()
1062 * axienet_rx_irq - Rx Isr.
1087 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, in axienet_rx_irq()
1088 (lp->rx_bd_v[lp->rx_bd_ci]).phys); in axienet_rx_irq()
1089 schedule_work(&lp->dma_err_task); in axienet_rx_irq()
1094 u32 cr = lp->rx_dma_cr; in axienet_rx_irq()
1097 if (napi_schedule_prep(&lp->napi_rx)) { in axienet_rx_irq()
1099 __napi_schedule(&lp->napi_rx); in axienet_rx_irq()
1107 * axienet_eth_irq - Ethernet core Isr.
1126 ndev->stats.rx_missed_errors++; in axienet_eth_irq()
1129 ndev->stats.rx_frame_errors++; in axienet_eth_irq()
1138 * axienet_open - Driver open routine.
1142 * non-zero error value on failure
1155 dev_dbg(&ndev->dev, "axienet_open()\n"); in axienet_open()
1165 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); in axienet_open()
1167 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); in axienet_open()
1171 phylink_start(lp->phylink); in axienet_open()
1174 lp->stopping = false; in axienet_open()
1175 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); in axienet_open()
1177 napi_enable(&lp->napi_rx); in axienet_open()
1178 napi_enable(&lp->napi_tx); in axienet_open()
1181 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, in axienet_open()
1182 ndev->name, ndev); in axienet_open()
1186 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, in axienet_open()
1187 ndev->name, ndev); in axienet_open()
1191 if (lp->eth_irq > 0) { in axienet_open()
1192 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, in axienet_open()
1193 ndev->name, ndev); in axienet_open()
1201 free_irq(lp->rx_irq, ndev); in axienet_open()
1203 free_irq(lp->tx_irq, ndev); in axienet_open()
1205 napi_disable(&lp->napi_tx); in axienet_open()
1206 napi_disable(&lp->napi_rx); in axienet_open()
1207 phylink_stop(lp->phylink); in axienet_open()
1208 phylink_disconnect_phy(lp->phylink); in axienet_open()
1209 cancel_work_sync(&lp->dma_err_task); in axienet_open()
1210 dev_err(lp->dev, "request_irq() failed\n"); in axienet_open()
1215 * axienet_stop - Driver stop routine.
1228 dev_dbg(&ndev->dev, "axienet_close()\n"); in axienet_stop()
1230 WRITE_ONCE(lp->stopping, true); in axienet_stop()
1231 flush_work(&lp->dma_err_task); in axienet_stop()
1233 napi_disable(&lp->napi_tx); in axienet_stop()
1234 napi_disable(&lp->napi_rx); in axienet_stop()
1236 phylink_stop(lp->phylink); in axienet_stop()
1237 phylink_disconnect_phy(lp->phylink); in axienet_stop()
1239 axienet_setoptions(ndev, lp->options & in axienet_stop()
1246 cancel_work_sync(&lp->dma_err_task); in axienet_stop()
1248 if (lp->eth_irq > 0) in axienet_stop()
1249 free_irq(lp->eth_irq, ndev); in axienet_stop()
1250 free_irq(lp->tx_irq, ndev); in axienet_stop()
1251 free_irq(lp->rx_irq, ndev); in axienet_stop()
1258 * axienet_change_mtu - Driver change mtu routine.
1273 return -EBUSY; in axienet_change_mtu()
1276 XAE_TRL_SIZE) > lp->rxmem) in axienet_change_mtu()
1277 return -EINVAL; in axienet_change_mtu()
1279 ndev->mtu = new_mtu; in axienet_change_mtu()
1286 * axienet_poll_controller - Axi Ethernet poll mechanism.
1295 disable_irq(lp->tx_irq); in axienet_poll_controller()
1296 disable_irq(lp->rx_irq); in axienet_poll_controller()
1297 axienet_rx_irq(lp->tx_irq, ndev); in axienet_poll_controller()
1298 axienet_tx_irq(lp->rx_irq, ndev); in axienet_poll_controller()
1299 enable_irq(lp->tx_irq); in axienet_poll_controller()
1300 enable_irq(lp->rx_irq); in axienet_poll_controller()
1309 return -EINVAL; in axienet_ioctl()
1311 return phylink_mii_ioctl(lp->phylink, rq, cmd); in axienet_ioctl()
1320 netdev_stats_to_stats64(stats, &dev->stats); in axienet_get_stats64()
1323 start = u64_stats_fetch_begin(&lp->rx_stat_sync); in axienet_get_stats64()
1324 stats->rx_packets = u64_stats_read(&lp->rx_packets); in axienet_get_stats64()
1325 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); in axienet_get_stats64()
1326 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); in axienet_get_stats64()
1329 start = u64_stats_fetch_begin(&lp->tx_stat_sync); in axienet_get_stats64()
1330 stats->tx_packets = u64_stats_read(&lp->tx_packets); in axienet_get_stats64()
1331 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); in axienet_get_stats64()
1332 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); in axienet_get_stats64()
1351 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1356 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1361 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); in axienet_ethtools_get_drvinfo()
1362 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); in axienet_ethtools_get_drvinfo()
1366 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1381 * axienet_ethtools_get_regs - Dump the contents of all registers present
1388 * Issue "ethtool -d ethX" to execute this function.
1397 regs->version = 0; in axienet_ethtools_get_regs()
1398 regs->len = len; in axienet_ethtools_get_regs()
1447 ering->rx_max_pending = RX_BD_NUM_MAX; in axienet_ethtools_get_ringparam()
1448 ering->rx_mini_max_pending = 0; in axienet_ethtools_get_ringparam()
1449 ering->rx_jumbo_max_pending = 0; in axienet_ethtools_get_ringparam()
1450 ering->tx_max_pending = TX_BD_NUM_MAX; in axienet_ethtools_get_ringparam()
1451 ering->rx_pending = lp->rx_bd_num; in axienet_ethtools_get_ringparam()
1452 ering->rx_mini_pending = 0; in axienet_ethtools_get_ringparam()
1453 ering->rx_jumbo_pending = 0; in axienet_ethtools_get_ringparam()
1454 ering->tx_pending = lp->tx_bd_num; in axienet_ethtools_get_ringparam()
1465 if (ering->rx_pending > RX_BD_NUM_MAX || in axienet_ethtools_set_ringparam()
1466 ering->rx_mini_pending || in axienet_ethtools_set_ringparam()
1467 ering->rx_jumbo_pending || in axienet_ethtools_set_ringparam()
1468 ering->tx_pending < TX_BD_NUM_MIN || in axienet_ethtools_set_ringparam()
1469 ering->tx_pending > TX_BD_NUM_MAX) in axienet_ethtools_set_ringparam()
1470 return -EINVAL; in axienet_ethtools_set_ringparam()
1473 return -EBUSY; in axienet_ethtools_set_ringparam()
1475 lp->rx_bd_num = ering->rx_pending; in axienet_ethtools_set_ringparam()
1476 lp->tx_bd_num = ering->tx_pending; in axienet_ethtools_set_ringparam()
1481 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1487 * setting. Issue "ethtool -a ethX" to execute this function.
1495 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); in axienet_ethtools_get_pauseparam()
1499 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1505 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1508 * Return: 0 on success, -EFAULT if device is running
1516 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); in axienet_ethtools_set_pauseparam()
1520 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1527 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1540 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; in axienet_ethtools_get_coalesce()
1541 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; in axienet_ethtools_get_coalesce()
1542 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; in axienet_ethtools_get_coalesce()
1543 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; in axienet_ethtools_get_coalesce()
1548 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1555 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1558 * Return: 0, on success, Non-zero error value on failure.
1571 return -EFAULT; in axienet_ethtools_set_coalesce()
1574 if (ecoalesce->rx_max_coalesced_frames) in axienet_ethtools_set_coalesce()
1575 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; in axienet_ethtools_set_coalesce()
1576 if (ecoalesce->rx_coalesce_usecs) in axienet_ethtools_set_coalesce()
1577 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; in axienet_ethtools_set_coalesce()
1578 if (ecoalesce->tx_max_coalesced_frames) in axienet_ethtools_set_coalesce()
1579 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; in axienet_ethtools_set_coalesce()
1580 if (ecoalesce->tx_coalesce_usecs) in axienet_ethtools_set_coalesce()
1581 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; in axienet_ethtools_set_coalesce()
1592 return phylink_ethtool_ksettings_get(lp->phylink, cmd); in axienet_ethtools_get_link_ksettings()
1601 return phylink_ethtool_ksettings_set(lp->phylink, cmd); in axienet_ethtools_set_link_ksettings()
1608 return phylink_ethtool_nway_reset(lp->phylink); in axienet_ethtools_nway_reset()
1637 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; in axienet_pcs_get_state()
1644 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; in axienet_pcs_an_restart()
1654 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; in axienet_pcs_config()
1655 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; in axienet_pcs_config()
1659 if (lp->switch_x_sgmii) { in axienet_pcs_config()
1688 struct net_device *ndev = to_net_dev(config->dev); in axienet_mac_select_pcs()
1693 return &lp->pcs; in axienet_mac_select_pcs()
1717 struct net_device *ndev = to_net_dev(config->dev); in axienet_mac_link_up()
1735 dev_err(&ndev->dev, in axienet_mac_link_up()
1762 * axienet_dma_err_handler - Work queue task for Axi DMA Error
1775 struct net_device *ndev = lp->ndev; in axienet_dma_err_handler()
1778 if (READ_ONCE(lp->stopping)) in axienet_dma_err_handler()
1781 napi_disable(&lp->napi_tx); in axienet_dma_err_handler()
1782 napi_disable(&lp->napi_rx); in axienet_dma_err_handler()
1784 axienet_setoptions(ndev, lp->options & in axienet_dma_err_handler()
1789 for (i = 0; i < lp->tx_bd_num; i++) { in axienet_dma_err_handler()
1790 cur_p = &lp->tx_bd_v[i]; in axienet_dma_err_handler()
1791 if (cur_p->cntrl) { in axienet_dma_err_handler()
1794 dma_unmap_single(lp->dev, addr, in axienet_dma_err_handler()
1795 (cur_p->cntrl & in axienet_dma_err_handler()
1799 if (cur_p->skb) in axienet_dma_err_handler()
1800 dev_kfree_skb_irq(cur_p->skb); in axienet_dma_err_handler()
1801 cur_p->phys = 0; in axienet_dma_err_handler()
1802 cur_p->phys_msb = 0; in axienet_dma_err_handler()
1803 cur_p->cntrl = 0; in axienet_dma_err_handler()
1804 cur_p->status = 0; in axienet_dma_err_handler()
1805 cur_p->app0 = 0; in axienet_dma_err_handler()
1806 cur_p->app1 = 0; in axienet_dma_err_handler()
1807 cur_p->app2 = 0; in axienet_dma_err_handler()
1808 cur_p->app3 = 0; in axienet_dma_err_handler()
1809 cur_p->app4 = 0; in axienet_dma_err_handler()
1810 cur_p->skb = NULL; in axienet_dma_err_handler()
1813 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_err_handler()
1814 cur_p = &lp->rx_bd_v[i]; in axienet_dma_err_handler()
1815 cur_p->status = 0; in axienet_dma_err_handler()
1816 cur_p->app0 = 0; in axienet_dma_err_handler()
1817 cur_p->app1 = 0; in axienet_dma_err_handler()
1818 cur_p->app2 = 0; in axienet_dma_err_handler()
1819 cur_p->app3 = 0; in axienet_dma_err_handler()
1820 cur_p->app4 = 0; in axienet_dma_err_handler()
1823 lp->tx_bd_ci = 0; in axienet_dma_err_handler()
1824 lp->tx_bd_tail = 0; in axienet_dma_err_handler()
1825 lp->rx_bd_ci = 0; in axienet_dma_err_handler()
1836 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? in axienet_dma_err_handler()
1843 axienet_setoptions(ndev, lp->options & in axienet_dma_err_handler()
1847 napi_enable(&lp->napi_rx); in axienet_dma_err_handler()
1848 napi_enable(&lp->napi_tx); in axienet_dma_err_handler()
1849 axienet_setoptions(ndev, lp->options); in axienet_dma_err_handler()
1853 * axienet_probe - Axi Ethernet probe function.
1857 * Non-zero error value on failure.
1877 return -ENOMEM; in axienet_probe()
1881 SET_NETDEV_DEV(ndev, &pdev->dev); in axienet_probe()
1882 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ in axienet_probe()
1883 ndev->features = NETIF_F_SG; in axienet_probe()
1884 ndev->netdev_ops = &axienet_netdev_ops; in axienet_probe()
1885 ndev->ethtool_ops = &axienet_ethtool_ops; in axienet_probe()
1887 /* MTU range: 64 - 9000 */ in axienet_probe()
1888 ndev->min_mtu = 64; in axienet_probe()
1889 ndev->max_mtu = XAE_JUMBO_MTU; in axienet_probe()
1892 lp->ndev = ndev; in axienet_probe()
1893 lp->dev = &pdev->dev; in axienet_probe()
1894 lp->options = XAE_OPTION_DEFAULTS; in axienet_probe()
1895 lp->rx_bd_num = RX_BD_NUM_DEFAULT; in axienet_probe()
1896 lp->tx_bd_num = TX_BD_NUM_DEFAULT; in axienet_probe()
1898 u64_stats_init(&lp->rx_stat_sync); in axienet_probe()
1899 u64_stats_init(&lp->tx_stat_sync); in axienet_probe()
1901 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); in axienet_probe()
1902 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); in axienet_probe()
1904 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); in axienet_probe()
1905 if (!lp->axi_clk) { in axienet_probe()
1909 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); in axienet_probe()
1911 if (IS_ERR(lp->axi_clk)) { in axienet_probe()
1912 ret = PTR_ERR(lp->axi_clk); in axienet_probe()
1915 ret = clk_prepare_enable(lp->axi_clk); in axienet_probe()
1917 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); in axienet_probe()
1921 lp->misc_clks[0].id = "axis_clk"; in axienet_probe()
1922 lp->misc_clks[1].id = "ref_clk"; in axienet_probe()
1923 lp->misc_clks[2].id = "mgt_clk"; in axienet_probe()
1925 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
1929 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
1934 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres); in axienet_probe()
1935 if (IS_ERR(lp->regs)) { in axienet_probe()
1936 ret = PTR_ERR(lp->regs); in axienet_probe()
1939 lp->regs_start = ethres->start; in axienet_probe()
1942 lp->features = 0; in axienet_probe()
1944 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); in axienet_probe()
1948 lp->csum_offload_on_tx_path = in axienet_probe()
1950 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; in axienet_probe()
1952 ndev->features |= NETIF_F_IP_CSUM; in axienet_probe()
1955 lp->csum_offload_on_tx_path = in axienet_probe()
1957 lp->features |= XAE_FEATURE_FULL_TX_CSUM; in axienet_probe()
1959 ndev->features |= NETIF_F_IP_CSUM; in axienet_probe()
1962 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; in axienet_probe()
1965 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); in axienet_probe()
1969 lp->csum_offload_on_rx_path = in axienet_probe()
1971 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; in axienet_probe()
1974 lp->csum_offload_on_rx_path = in axienet_probe()
1976 lp->features |= XAE_FEATURE_FULL_RX_CSUM; in axienet_probe()
1979 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; in axienet_probe()
1986 * the device-tree and accordingly set flags. in axienet_probe()
1988 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); in axienet_probe()
1990 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, in axienet_probe()
1991 "xlnx,switch-x-sgmii"); in axienet_probe()
1994 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); in axienet_probe()
1996 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); in axienet_probe()
1999 lp->phy_mode = PHY_INTERFACE_MODE_MII; in axienet_probe()
2002 lp->phy_mode = PHY_INTERFACE_MODE_GMII; in axienet_probe()
2005 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; in axienet_probe()
2008 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; in axienet_probe()
2011 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; in axienet_probe()
2014 ret = -EINVAL; in axienet_probe()
2018 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); in axienet_probe()
2022 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && in axienet_probe()
2023 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { in axienet_probe()
2024 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); in axienet_probe()
2025 ret = -EINVAL; in axienet_probe()
2030 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); in axienet_probe()
2036 dev_err(&pdev->dev, in axienet_probe()
2041 lp->dma_regs = devm_ioremap_resource(&pdev->dev, in axienet_probe()
2043 lp->rx_irq = irq_of_parse_and_map(np, 1); in axienet_probe()
2044 lp->tx_irq = irq_of_parse_and_map(np, 0); in axienet_probe()
2046 lp->eth_irq = platform_get_irq_optional(pdev, 0); in axienet_probe()
2049 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); in axienet_probe()
2050 lp->rx_irq = platform_get_irq(pdev, 1); in axienet_probe()
2051 lp->tx_irq = platform_get_irq(pdev, 0); in axienet_probe()
2052 lp->eth_irq = platform_get_irq_optional(pdev, 2); in axienet_probe()
2054 if (IS_ERR(lp->dma_regs)) { in axienet_probe()
2055 dev_err(&pdev->dev, "could not map DMA regs\n"); in axienet_probe()
2056 ret = PTR_ERR(lp->dma_regs); in axienet_probe()
2059 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { in axienet_probe()
2060 dev_err(&pdev->dev, "could not determine irqs\n"); in axienet_probe()
2061 ret = -ENOMEM; in axienet_probe()
2070 /* Autodetect the need for 64-bit DMA pointers. in axienet_probe()
2079 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; in axienet_probe()
2085 lp->features |= XAE_FEATURE_DMA_64BIT; in axienet_probe()
2087 dev_info(&pdev->dev, in axienet_probe()
2088 "autodetected 64-bit DMA range\n"); in axienet_probe()
2093 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { in axienet_probe()
2094 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); in axienet_probe()
2095 ret = -EINVAL; in axienet_probe()
2099 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); in axienet_probe()
2101 dev_err(&pdev->dev, "No suitable DMA available\n"); in axienet_probe()
2106 if (lp->eth_irq <= 0) in axienet_probe()
2107 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); in axienet_probe()
2110 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); in axienet_probe()
2114 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", in axienet_probe()
2119 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; in axienet_probe()
2120 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; in axienet_probe()
2121 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; in axienet_probe()
2122 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; in axienet_probe()
2126 dev_warn(&pdev->dev, in axienet_probe()
2129 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || in axienet_probe()
2130 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { in axienet_probe()
2131 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); in axienet_probe()
2133 /* Deprecated: Always use "pcs-handle" for pcs_phy. in axienet_probe()
2134 * Falling back to "phy-handle" here is only for in axienet_probe()
2137 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in axienet_probe()
2140 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); in axienet_probe()
2141 ret = -EINVAL; in axienet_probe()
2144 lp->pcs_phy = of_mdio_find_device(np); in axienet_probe()
2145 if (!lp->pcs_phy) { in axienet_probe()
2146 ret = -EPROBE_DEFER; in axienet_probe()
2151 lp->pcs.ops = &axienet_pcs_ops; in axienet_probe()
2152 lp->pcs.neg_mode = true; in axienet_probe()
2153 lp->pcs.poll = true; in axienet_probe()
2156 lp->phylink_config.dev = &ndev->dev; in axienet_probe()
2157 lp->phylink_config.type = PHYLINK_NETDEV; in axienet_probe()
2158 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | in axienet_probe()
2161 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); in axienet_probe()
2162 if (lp->switch_x_sgmii) { in axienet_probe()
2164 lp->phylink_config.supported_interfaces); in axienet_probe()
2166 lp->phylink_config.supported_interfaces); in axienet_probe()
2169 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, in axienet_probe()
2170 lp->phy_mode, in axienet_probe()
2172 if (IS_ERR(lp->phylink)) { in axienet_probe()
2173 ret = PTR_ERR(lp->phylink); in axienet_probe()
2174 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); in axienet_probe()
2178 ret = register_netdev(lp->ndev); in axienet_probe()
2180 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); in axienet_probe()
2187 phylink_destroy(lp->phylink); in axienet_probe()
2190 if (lp->pcs_phy) in axienet_probe()
2191 put_device(&lp->pcs_phy->dev); in axienet_probe()
2192 if (lp->mii_bus) in axienet_probe()
2195 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
2196 clk_disable_unprepare(lp->axi_clk); in axienet_probe()
2211 if (lp->phylink) in axienet_remove()
2212 phylink_destroy(lp->phylink); in axienet_remove()
2214 if (lp->pcs_phy) in axienet_remove()
2215 put_device(&lp->pcs_phy->dev); in axienet_remove()
2219 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_remove()
2220 clk_disable_unprepare(lp->axi_clk); in axienet_remove()