134ff6846SIoana Radulescu // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 234ff6846SIoana Radulescu /* Copyright 2014-2016 Freescale Semiconductor Inc. 3095174daSRobert-Ionut Alexa * Copyright 2016-2022 NXP 434ff6846SIoana Radulescu */ 534ff6846SIoana Radulescu #include <linux/init.h> 634ff6846SIoana Radulescu #include <linux/module.h> 734ff6846SIoana Radulescu #include <linux/platform_device.h> 834ff6846SIoana Radulescu #include <linux/etherdevice.h> 934ff6846SIoana Radulescu #include <linux/of_net.h> 1034ff6846SIoana Radulescu #include <linux/interrupt.h> 1134ff6846SIoana Radulescu #include <linux/kthread.h> 1234ff6846SIoana Radulescu #include <linux/iommu.h> 1334ff6846SIoana Radulescu #include <linux/fsl/mc.h> 147e273a8eSIoana Ciocoi Radulescu #include <linux/bpf.h> 157e273a8eSIoana Ciocoi Radulescu #include <linux/bpf_trace.h> 16d21c784cSYangbo Lu #include <linux/fsl/ptp_qoriq.h> 17c5521189SYangbo Lu #include <linux/ptp_classify.h> 183657cdafSIoana Ciornei #include <net/pkt_cls.h> 1934ff6846SIoana Radulescu #include <net/sock.h> 203dc709e0SIoana Ciornei #include <net/tso.h> 2148276c08SRobert-Ionut Alexa #include <net/xdp_sock_drv.h> 2234ff6846SIoana Radulescu 2334ff6846SIoana Radulescu #include "dpaa2-eth.h" 2434ff6846SIoana Radulescu 2534ff6846SIoana Radulescu /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 2634ff6846SIoana Radulescu * using trace events only need to #include <trace/events/sched.h> 2734ff6846SIoana Radulescu */ 2834ff6846SIoana Radulescu #define CREATE_TRACE_POINTS 2934ff6846SIoana Radulescu #include "dpaa2-eth-trace.h" 3034ff6846SIoana Radulescu 3134ff6846SIoana Radulescu MODULE_LICENSE("Dual BSD/GPL"); 3234ff6846SIoana Radulescu MODULE_AUTHOR("Freescale Semiconductor, Inc"); 3334ff6846SIoana Radulescu MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 3434ff6846SIoana Radulescu 35d21c784cSYangbo Lu struct ptp_qoriq *dpaa2_ptp; 36d21c784cSYangbo Lu EXPORT_SYMBOL(dpaa2_ptp); 37d21c784cSYangbo Lu 38c4680c97SRadu Bulie static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv) 39c4680c97SRadu Bulie { 40c4680c97SRadu Bulie priv->features = 0; 41c4680c97SRadu Bulie 42c4680c97SRadu Bulie if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR, 43c4680c97SRadu Bulie DPNI_PTP_ONESTEP_VER_MINOR) >= 0) 44c4680c97SRadu Bulie priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT; 45c4680c97SRadu Bulie } 46c4680c97SRadu Bulie 47c4680c97SRadu Bulie static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv, 48c4680c97SRadu Bulie u32 offset, u8 udp) 49c4680c97SRadu Bulie { 50c4680c97SRadu Bulie struct dpni_single_step_cfg cfg; 51c4680c97SRadu Bulie 52c4680c97SRadu Bulie cfg.en = 1; 53c4680c97SRadu Bulie cfg.ch_update = udp; 54c4680c97SRadu Bulie cfg.offset = offset; 55c4680c97SRadu Bulie cfg.peer_delay = 0; 56c4680c97SRadu Bulie 57c4680c97SRadu Bulie if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg)) 58c4680c97SRadu Bulie WARN_ONCE(1, "Failed to set single step register"); 59c4680c97SRadu Bulie } 60c4680c97SRadu Bulie 61c4680c97SRadu Bulie static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv, 62c4680c97SRadu Bulie u32 offset, u8 udp) 63c4680c97SRadu Bulie { 64c4680c97SRadu Bulie u32 val = 0; 65c4680c97SRadu Bulie 66c4680c97SRadu Bulie val = DPAA2_PTP_SINGLE_STEP_ENABLE | 67c4680c97SRadu Bulie DPAA2_PTP_SINGLE_CORRECTION_OFF(offset); 68c4680c97SRadu Bulie 69c4680c97SRadu Bulie if (udp) 70c4680c97SRadu Bulie val |= DPAA2_PTP_SINGLE_STEP_CH; 71c4680c97SRadu Bulie 72c4680c97SRadu Bulie if (priv->onestep_reg_base) 73c4680c97SRadu Bulie writel(val, priv->onestep_reg_base); 74c4680c97SRadu Bulie } 75c4680c97SRadu Bulie 76c4680c97SRadu Bulie static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) 77c4680c97SRadu Bulie { 78c4680c97SRadu Bulie struct device *dev = priv->net_dev->dev.parent; 79c4680c97SRadu Bulie struct dpni_single_step_cfg ptp_cfg; 80c4680c97SRadu Bulie 81c4680c97SRadu Bulie priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect; 82c4680c97SRadu Bulie 83c4680c97SRadu Bulie if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT)) 84c4680c97SRadu Bulie return; 85c4680c97SRadu Bulie 86c4680c97SRadu Bulie if (dpni_get_single_step_cfg(priv->mc_io, 0, 87c4680c97SRadu Bulie priv->mc_token, &ptp_cfg)) { 88c4680c97SRadu Bulie dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n"); 89c4680c97SRadu Bulie return; 90c4680c97SRadu Bulie } 91c4680c97SRadu Bulie 92c4680c97SRadu Bulie if (!ptp_cfg.ptp_onestep_reg_base) { 93c4680c97SRadu Bulie dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n"); 94c4680c97SRadu Bulie return; 95c4680c97SRadu Bulie } 96c4680c97SRadu Bulie 97c4680c97SRadu Bulie priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base, 98c4680c97SRadu Bulie sizeof(u32)); 99c4680c97SRadu Bulie if (!priv->onestep_reg_base) { 100c4680c97SRadu Bulie dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n"); 101c4680c97SRadu Bulie return; 102c4680c97SRadu Bulie } 103c4680c97SRadu Bulie 104c4680c97SRadu Bulie priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; 105c4680c97SRadu Bulie } 106c4680c97SRadu Bulie 10748276c08SRobert-Ionut Alexa void *dpaa2_iova_to_virt(struct iommu_domain *domain, 10834ff6846SIoana Radulescu dma_addr_t iova_addr) 10934ff6846SIoana Radulescu { 11034ff6846SIoana Radulescu phys_addr_t phys_addr; 11134ff6846SIoana Radulescu 11234ff6846SIoana Radulescu phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 11334ff6846SIoana Radulescu 11434ff6846SIoana Radulescu return phys_to_virt(phys_addr); 11534ff6846SIoana Radulescu } 11634ff6846SIoana Radulescu 1175d8dccf8SIoana Ciornei static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv, 11834ff6846SIoana Radulescu u32 fd_status, 11934ff6846SIoana Radulescu struct sk_buff *skb) 12034ff6846SIoana Radulescu { 12134ff6846SIoana Radulescu skb_checksum_none_assert(skb); 12234ff6846SIoana Radulescu 12334ff6846SIoana Radulescu /* HW checksum validation is disabled, nothing to do here */ 12434ff6846SIoana Radulescu if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 12534ff6846SIoana Radulescu return; 12634ff6846SIoana Radulescu 12734ff6846SIoana Radulescu /* Read checksum validation bits */ 12834ff6846SIoana Radulescu if (!((fd_status & DPAA2_FAS_L3CV) && 12934ff6846SIoana Radulescu (fd_status & DPAA2_FAS_L4CV))) 13034ff6846SIoana Radulescu return; 13134ff6846SIoana Radulescu 13234ff6846SIoana Radulescu /* Inform the stack there's no need to compute L3/L4 csum anymore */ 13334ff6846SIoana Radulescu skb->ip_summed = CHECKSUM_UNNECESSARY; 13434ff6846SIoana Radulescu } 13534ff6846SIoana Radulescu 13634ff6846SIoana Radulescu /* Free a received FD. 13734ff6846SIoana Radulescu * Not to be used for Tx conf FDs or on any other paths. 13834ff6846SIoana Radulescu */ 1395d8dccf8SIoana Ciornei static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, 14034ff6846SIoana Radulescu const struct dpaa2_fd *fd, 14134ff6846SIoana Radulescu void *vaddr) 14234ff6846SIoana Radulescu { 14334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 14434ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 14534ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 14634ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 14734ff6846SIoana Radulescu void *sg_vaddr; 14834ff6846SIoana Radulescu int i; 14934ff6846SIoana Radulescu 15034ff6846SIoana Radulescu /* If single buffer frame, just free the data buffer */ 15134ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) 15234ff6846SIoana Radulescu goto free_buf; 15334ff6846SIoana Radulescu else if (fd_format != dpaa2_fd_sg) 15434ff6846SIoana Radulescu /* We don't support any other format */ 15534ff6846SIoana Radulescu return; 15634ff6846SIoana Radulescu 15734ff6846SIoana Radulescu /* For S/G frames, we first need to free all SG entries 15834ff6846SIoana Radulescu * except the first one, which was taken care of already 15934ff6846SIoana Radulescu */ 16034ff6846SIoana Radulescu sgt = vaddr + dpaa2_fd_get_offset(fd); 16134ff6846SIoana Radulescu for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 16234ff6846SIoana Radulescu addr = dpaa2_sg_get_addr(&sgt[i]); 16334ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 164efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 16518c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 16634ff6846SIoana Radulescu 16727c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 16834ff6846SIoana Radulescu if (dpaa2_sg_is_final(&sgt[i])) 16934ff6846SIoana Radulescu break; 17034ff6846SIoana Radulescu } 17134ff6846SIoana Radulescu 17234ff6846SIoana Radulescu free_buf: 17327c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 17434ff6846SIoana Radulescu } 17534ff6846SIoana Radulescu 17634ff6846SIoana Radulescu /* Build a linear skb based on a single-buffer frame descriptor */ 1775d8dccf8SIoana Ciornei static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, 17834ff6846SIoana Radulescu const struct dpaa2_fd *fd, 17934ff6846SIoana Radulescu void *fd_vaddr) 18034ff6846SIoana Radulescu { 18134ff6846SIoana Radulescu struct sk_buff *skb = NULL; 18234ff6846SIoana Radulescu u16 fd_offset = dpaa2_fd_get_offset(fd); 18334ff6846SIoana Radulescu u32 fd_length = dpaa2_fd_get_len(fd); 18434ff6846SIoana Radulescu 18534ff6846SIoana Radulescu ch->buf_count--; 18634ff6846SIoana Radulescu 18727c87486SIoana Ciocoi Radulescu skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 18834ff6846SIoana Radulescu if (unlikely(!skb)) 18934ff6846SIoana Radulescu return NULL; 19034ff6846SIoana Radulescu 19134ff6846SIoana Radulescu skb_reserve(skb, fd_offset); 19234ff6846SIoana Radulescu skb_put(skb, fd_length); 19334ff6846SIoana Radulescu 19434ff6846SIoana Radulescu return skb; 19534ff6846SIoana Radulescu } 19634ff6846SIoana Radulescu 19734ff6846SIoana Radulescu /* Build a non linear (fragmented) skb based on a S/G table */ 1985d8dccf8SIoana Ciornei static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, 19934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 20034ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt) 20134ff6846SIoana Radulescu { 20234ff6846SIoana Radulescu struct sk_buff *skb = NULL; 20334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 20434ff6846SIoana Radulescu void *sg_vaddr; 20534ff6846SIoana Radulescu dma_addr_t sg_addr; 20634ff6846SIoana Radulescu u16 sg_offset; 20734ff6846SIoana Radulescu u32 sg_length; 20834ff6846SIoana Radulescu struct page *page, *head_page; 20934ff6846SIoana Radulescu int page_offset; 21034ff6846SIoana Radulescu int i; 21134ff6846SIoana Radulescu 21234ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 21334ff6846SIoana Radulescu struct dpaa2_sg_entry *sge = &sgt[i]; 21434ff6846SIoana Radulescu 21534ff6846SIoana Radulescu /* NOTE: We only support SG entries in dpaa2_sg_single format, 21634ff6846SIoana Radulescu * but this is the only format we may receive from HW anyway 21734ff6846SIoana Radulescu */ 21834ff6846SIoana Radulescu 21934ff6846SIoana Radulescu /* Get the address and length from the S/G entry */ 22034ff6846SIoana Radulescu sg_addr = dpaa2_sg_get_addr(sge); 22134ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 222efa6a7d0SIoana Ciornei dma_unmap_page(dev, sg_addr, priv->rx_buf_size, 22318c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 22434ff6846SIoana Radulescu 22534ff6846SIoana Radulescu sg_length = dpaa2_sg_get_len(sge); 22634ff6846SIoana Radulescu 22734ff6846SIoana Radulescu if (i == 0) { 22834ff6846SIoana Radulescu /* We build the skb around the first data buffer */ 22927c87486SIoana Ciocoi Radulescu skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 23034ff6846SIoana Radulescu if (unlikely(!skb)) { 23134ff6846SIoana Radulescu /* Free the first SG entry now, since we already 23234ff6846SIoana Radulescu * unmapped it and obtained the virtual address 23334ff6846SIoana Radulescu */ 23427c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 23534ff6846SIoana Radulescu 23634ff6846SIoana Radulescu /* We still need to subtract the buffers used 23734ff6846SIoana Radulescu * by this FD from our software counter 23834ff6846SIoana Radulescu */ 23934ff6846SIoana Radulescu while (!dpaa2_sg_is_final(&sgt[i]) && 24034ff6846SIoana Radulescu i < DPAA2_ETH_MAX_SG_ENTRIES) 24134ff6846SIoana Radulescu i++; 24234ff6846SIoana Radulescu break; 24334ff6846SIoana Radulescu } 24434ff6846SIoana Radulescu 24534ff6846SIoana Radulescu sg_offset = dpaa2_sg_get_offset(sge); 24634ff6846SIoana Radulescu skb_reserve(skb, sg_offset); 24734ff6846SIoana Radulescu skb_put(skb, sg_length); 24834ff6846SIoana Radulescu } else { 24934ff6846SIoana Radulescu /* Rest of the data buffers are stored as skb frags */ 25034ff6846SIoana Radulescu page = virt_to_page(sg_vaddr); 25134ff6846SIoana Radulescu head_page = virt_to_head_page(sg_vaddr); 25234ff6846SIoana Radulescu 25334ff6846SIoana Radulescu /* Offset in page (which may be compound). 25434ff6846SIoana Radulescu * Data in subsequent SG entries is stored from the 25534ff6846SIoana Radulescu * beginning of the buffer, so we don't need to add the 25634ff6846SIoana Radulescu * sg_offset. 25734ff6846SIoana Radulescu */ 25834ff6846SIoana Radulescu page_offset = ((unsigned long)sg_vaddr & 25934ff6846SIoana Radulescu (PAGE_SIZE - 1)) + 26034ff6846SIoana Radulescu (page_address(page) - page_address(head_page)); 26134ff6846SIoana Radulescu 26234ff6846SIoana Radulescu skb_add_rx_frag(skb, i - 1, head_page, page_offset, 263efa6a7d0SIoana Ciornei sg_length, priv->rx_buf_size); 26434ff6846SIoana Radulescu } 26534ff6846SIoana Radulescu 26634ff6846SIoana Radulescu if (dpaa2_sg_is_final(sge)) 26734ff6846SIoana Radulescu break; 26834ff6846SIoana Radulescu } 26934ff6846SIoana Radulescu 27034ff6846SIoana Radulescu WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 27134ff6846SIoana Radulescu 27234ff6846SIoana Radulescu /* Count all data buffers + SG table buffer */ 27334ff6846SIoana Radulescu ch->buf_count -= i + 2; 27434ff6846SIoana Radulescu 27534ff6846SIoana Radulescu return skb; 27634ff6846SIoana Radulescu } 27734ff6846SIoana Radulescu 278569375fbSIoana Ciocoi Radulescu /* Free buffers acquired from the buffer pool or which were meant to 279569375fbSIoana Ciocoi Radulescu * be released in the pool 280569375fbSIoana Ciocoi Radulescu */ 2815d8dccf8SIoana Ciornei static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, 28248276c08SRobert-Ionut Alexa int count, bool xsk_zc) 283569375fbSIoana Ciocoi Radulescu { 284569375fbSIoana Ciocoi Radulescu struct device *dev = priv->net_dev->dev.parent; 28548276c08SRobert-Ionut Alexa struct dpaa2_eth_swa *swa; 28648276c08SRobert-Ionut Alexa struct xdp_buff *xdp_buff; 287569375fbSIoana Ciocoi Radulescu void *vaddr; 288569375fbSIoana Ciocoi Radulescu int i; 289569375fbSIoana Ciocoi Radulescu 290569375fbSIoana Ciocoi Radulescu for (i = 0; i < count; i++) { 291569375fbSIoana Ciocoi Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 29248276c08SRobert-Ionut Alexa 29348276c08SRobert-Ionut Alexa if (!xsk_zc) { 294efa6a7d0SIoana Ciornei dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, 29518c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 29627c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 29748276c08SRobert-Ionut Alexa } else { 29848276c08SRobert-Ionut Alexa swa = (struct dpaa2_eth_swa *) 29948276c08SRobert-Ionut Alexa (vaddr + DPAA2_ETH_RX_HWA_SIZE); 30048276c08SRobert-Ionut Alexa xdp_buff = swa->xsk.xdp_buff; 30148276c08SRobert-Ionut Alexa xsk_buff_free(xdp_buff); 30248276c08SRobert-Ionut Alexa } 303569375fbSIoana Ciocoi Radulescu } 304569375fbSIoana Ciocoi Radulescu } 305569375fbSIoana Ciocoi Radulescu 30648276c08SRobert-Ionut Alexa void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, 3075d39dc21SIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 3085d39dc21SIoana Ciocoi Radulescu dma_addr_t addr) 3095d39dc21SIoana Ciocoi Radulescu { 310ef17bd7cSIoana Radulescu int retries = 0; 3115d39dc21SIoana Ciocoi Radulescu int err; 3125d39dc21SIoana Ciocoi Radulescu 31328d137ccSIoana Ciornei ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr; 31428d137ccSIoana Ciornei if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) 3155d39dc21SIoana Ciocoi Radulescu return; 3165d39dc21SIoana Ciocoi Radulescu 317095174daSRobert-Ionut Alexa while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, 31828d137ccSIoana Ciornei ch->recycled_bufs, 31928d137ccSIoana Ciornei ch->recycled_bufs_cnt)) == -EBUSY) { 320ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 321ef17bd7cSIoana Radulescu break; 3225d39dc21SIoana Ciocoi Radulescu cpu_relax(); 323ef17bd7cSIoana Radulescu } 3245d39dc21SIoana Ciocoi Radulescu 3255d39dc21SIoana Ciocoi Radulescu if (err) { 32648276c08SRobert-Ionut Alexa dpaa2_eth_free_bufs(priv, ch->recycled_bufs, 32748276c08SRobert-Ionut Alexa ch->recycled_bufs_cnt, ch->xsk_zc); 32828d137ccSIoana Ciornei ch->buf_count -= ch->recycled_bufs_cnt; 3295d39dc21SIoana Ciocoi Radulescu } 3305d39dc21SIoana Ciocoi Radulescu 33128d137ccSIoana Ciornei ch->recycled_bufs_cnt = 0; 3325d39dc21SIoana Ciocoi Radulescu } 3335d39dc21SIoana Ciocoi Radulescu 33438c440b2SIoana Ciornei static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, 33538c440b2SIoana Ciornei struct dpaa2_eth_fq *fq, 33638c440b2SIoana Ciornei struct dpaa2_eth_xdp_fds *xdp_fds) 33738c440b2SIoana Ciornei { 33838c440b2SIoana Ciornei int total_enqueued = 0, retries = 0, enqueued; 33938c440b2SIoana Ciornei struct dpaa2_eth_drv_stats *percpu_extras; 34038c440b2SIoana Ciornei int num_fds, err, max_retries; 34138c440b2SIoana Ciornei struct dpaa2_fd *fds; 34238c440b2SIoana Ciornei 34338c440b2SIoana Ciornei percpu_extras = this_cpu_ptr(priv->percpu_extras); 34438c440b2SIoana Ciornei 34538c440b2SIoana Ciornei /* try to enqueue all the FDs until the max number of retries is hit */ 34638c440b2SIoana Ciornei fds = xdp_fds->fds; 34738c440b2SIoana Ciornei num_fds = xdp_fds->num; 34838c440b2SIoana Ciornei max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; 34938c440b2SIoana Ciornei while (total_enqueued < num_fds && retries < max_retries) { 35038c440b2SIoana Ciornei err = priv->enqueue(priv, fq, &fds[total_enqueued], 35138c440b2SIoana Ciornei 0, num_fds - total_enqueued, &enqueued); 35238c440b2SIoana Ciornei if (err == -EBUSY) { 35338c440b2SIoana Ciornei percpu_extras->tx_portal_busy += ++retries; 35438c440b2SIoana Ciornei continue; 35538c440b2SIoana Ciornei } 35638c440b2SIoana Ciornei total_enqueued += enqueued; 35738c440b2SIoana Ciornei } 35838c440b2SIoana Ciornei xdp_fds->num = 0; 35938c440b2SIoana Ciornei 36038c440b2SIoana Ciornei return total_enqueued; 36138c440b2SIoana Ciornei } 36238c440b2SIoana Ciornei 3635d8dccf8SIoana Ciornei static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, 36474a1c059SIoana Ciornei struct dpaa2_eth_channel *ch, 36574a1c059SIoana Ciornei struct dpaa2_eth_fq *fq) 36674a1c059SIoana Ciornei { 36774a1c059SIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 36874a1c059SIoana Ciornei struct dpaa2_fd *fds; 36974a1c059SIoana Ciornei int enqueued, i; 37074a1c059SIoana Ciornei 37174a1c059SIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 37274a1c059SIoana Ciornei 37374a1c059SIoana Ciornei // enqueue the array of XDP_TX frames 37474a1c059SIoana Ciornei enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); 37574a1c059SIoana Ciornei 37674a1c059SIoana Ciornei /* update statistics */ 37774a1c059SIoana Ciornei percpu_stats->tx_packets += enqueued; 37874a1c059SIoana Ciornei fds = fq->xdp_tx_fds.fds; 37974a1c059SIoana Ciornei for (i = 0; i < enqueued; i++) { 38074a1c059SIoana Ciornei percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 38174a1c059SIoana Ciornei ch->stats.xdp_tx++; 38274a1c059SIoana Ciornei } 38374a1c059SIoana Ciornei for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { 38428d137ccSIoana Ciornei dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); 38574a1c059SIoana Ciornei percpu_stats->tx_errors++; 38674a1c059SIoana Ciornei ch->stats.xdp_tx_err++; 38774a1c059SIoana Ciornei } 38874a1c059SIoana Ciornei fq->xdp_tx_fds.num = 0; 38974a1c059SIoana Ciornei } 39074a1c059SIoana Ciornei 39148276c08SRobert-Ionut Alexa void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, 39274a1c059SIoana Ciornei struct dpaa2_eth_channel *ch, 39374a1c059SIoana Ciornei struct dpaa2_fd *fd, 39499e43521SIoana Ciocoi Radulescu void *buf_start, u16 queue_id) 39599e43521SIoana Ciocoi Radulescu { 39699e43521SIoana Ciocoi Radulescu struct dpaa2_faead *faead; 39774a1c059SIoana Ciornei struct dpaa2_fd *dest_fd; 39874a1c059SIoana Ciornei struct dpaa2_eth_fq *fq; 39999e43521SIoana Ciocoi Radulescu u32 ctrl, frc; 40099e43521SIoana Ciocoi Radulescu 40199e43521SIoana Ciocoi Radulescu /* Mark the egress frame hardware annotation area as valid */ 40299e43521SIoana Ciocoi Radulescu frc = dpaa2_fd_get_frc(fd); 40399e43521SIoana Ciocoi Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 40499e43521SIoana Ciocoi Radulescu dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); 40599e43521SIoana Ciocoi Radulescu 40699e43521SIoana Ciocoi Radulescu /* Instruct hardware to release the FD buffer directly into 40799e43521SIoana Ciocoi Radulescu * the buffer pool once transmission is completed, instead of 40899e43521SIoana Ciocoi Radulescu * sending a Tx confirmation frame to us 40999e43521SIoana Ciocoi Radulescu */ 41099e43521SIoana Ciocoi Radulescu ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; 41199e43521SIoana Ciocoi Radulescu faead = dpaa2_get_faead(buf_start, false); 41299e43521SIoana Ciocoi Radulescu faead->ctrl = cpu_to_le32(ctrl); 41399e43521SIoana Ciocoi Radulescu faead->conf_fqid = 0; 41499e43521SIoana Ciocoi Radulescu 41599e43521SIoana Ciocoi Radulescu fq = &priv->fq[queue_id]; 41674a1c059SIoana Ciornei dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; 41774a1c059SIoana Ciornei memcpy(dest_fd, fd, sizeof(*dest_fd)); 41899e43521SIoana Ciocoi Radulescu 41974a1c059SIoana Ciornei if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) 42074a1c059SIoana Ciornei return; 42174a1c059SIoana Ciornei 4225d8dccf8SIoana Ciornei dpaa2_eth_xdp_tx_flush(priv, ch, fq); 42399e43521SIoana Ciocoi Radulescu } 42499e43521SIoana Ciocoi Radulescu 4255d8dccf8SIoana Ciornei static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, 4267e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 42799e43521SIoana Ciocoi Radulescu struct dpaa2_eth_fq *rx_fq, 4287e273a8eSIoana Ciocoi Radulescu struct dpaa2_fd *fd, void *vaddr) 4297e273a8eSIoana Ciocoi Radulescu { 4305d39dc21SIoana Ciocoi Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 4317e273a8eSIoana Ciocoi Radulescu struct bpf_prog *xdp_prog; 4327e273a8eSIoana Ciocoi Radulescu struct xdp_buff xdp; 4337e273a8eSIoana Ciocoi Radulescu u32 xdp_act = XDP_PASS; 434be9df4afSLorenzo Bianconi int err, offset; 43599e43521SIoana Ciocoi Radulescu 4367e273a8eSIoana Ciocoi Radulescu xdp_prog = READ_ONCE(ch->xdp.prog); 4377e273a8eSIoana Ciocoi Radulescu if (!xdp_prog) 4387e273a8eSIoana Ciocoi Radulescu goto out; 4397e273a8eSIoana Ciocoi Radulescu 440be9df4afSLorenzo Bianconi offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM; 441be9df4afSLorenzo Bianconi xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq); 442be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM, 443be9df4afSLorenzo Bianconi dpaa2_fd_get_len(fd), false); 4444a9b052aSJesper Dangaard Brouer 4457e273a8eSIoana Ciocoi Radulescu xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); 4467e273a8eSIoana Ciocoi Radulescu 4477b1eea1aSIoana Ciocoi Radulescu /* xdp.data pointer may have changed */ 4487b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_offset(fd, xdp.data - vaddr); 4497b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); 4507b1eea1aSIoana Ciocoi Radulescu 4517e273a8eSIoana Ciocoi Radulescu switch (xdp_act) { 4527e273a8eSIoana Ciocoi Radulescu case XDP_PASS: 4537e273a8eSIoana Ciocoi Radulescu break; 45499e43521SIoana Ciocoi Radulescu case XDP_TX: 4555d8dccf8SIoana Ciornei dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); 45699e43521SIoana Ciocoi Radulescu break; 4577e273a8eSIoana Ciocoi Radulescu default: 458c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); 459df561f66SGustavo A. R. Silva fallthrough; 4607e273a8eSIoana Ciocoi Radulescu case XDP_ABORTED: 4617e273a8eSIoana Ciocoi Radulescu trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 462df561f66SGustavo A. R. Silva fallthrough; 4637e273a8eSIoana Ciocoi Radulescu case XDP_DROP: 46428d137ccSIoana Ciornei dpaa2_eth_recycle_buf(priv, ch, addr); 465a4a7b762SIoana Ciocoi Radulescu ch->stats.xdp_drop++; 4667e273a8eSIoana Ciocoi Radulescu break; 467d678be1dSIoana Radulescu case XDP_REDIRECT: 468d678be1dSIoana Radulescu dma_unmap_page(priv->net_dev->dev.parent, addr, 469efa6a7d0SIoana Ciornei priv->rx_buf_size, DMA_BIDIRECTIONAL); 470d678be1dSIoana Radulescu ch->buf_count--; 4714a9b052aSJesper Dangaard Brouer 4724a9b052aSJesper Dangaard Brouer /* Allow redirect use of full headroom */ 473d678be1dSIoana Radulescu xdp.data_hard_start = vaddr; 4744a9b052aSJesper Dangaard Brouer xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE; 4754a9b052aSJesper Dangaard Brouer 476d678be1dSIoana Radulescu err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); 477e12be913SIoana Ciornei if (unlikely(err)) { 478e12be913SIoana Ciornei addr = dma_map_page(priv->net_dev->dev.parent, 479e12be913SIoana Ciornei virt_to_page(vaddr), 0, 480e12be913SIoana Ciornei priv->rx_buf_size, DMA_BIDIRECTIONAL); 481e12be913SIoana Ciornei if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) { 482e12be913SIoana Ciornei free_pages((unsigned long)vaddr, 0); 483e12be913SIoana Ciornei } else { 484e12be913SIoana Ciornei ch->buf_count++; 48528d137ccSIoana Ciornei dpaa2_eth_recycle_buf(priv, ch, addr); 486e12be913SIoana Ciornei } 487d678be1dSIoana Radulescu ch->stats.xdp_drop++; 488e12be913SIoana Ciornei } else { 489d678be1dSIoana Radulescu ch->stats.xdp_redirect++; 490e12be913SIoana Ciornei } 491d678be1dSIoana Radulescu break; 4927e273a8eSIoana Ciocoi Radulescu } 4937e273a8eSIoana Ciocoi Radulescu 494d678be1dSIoana Radulescu ch->xdp.res |= xdp_act; 4957e273a8eSIoana Ciocoi Radulescu out: 4967e273a8eSIoana Ciocoi Radulescu return xdp_act; 4977e273a8eSIoana Ciocoi Radulescu } 4987e273a8eSIoana Ciocoi Radulescu 499129902a3SRobert-Ionut Alexa struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, 500129902a3SRobert-Ionut Alexa struct dpaa2_eth_channel *ch, 501129902a3SRobert-Ionut Alexa const struct dpaa2_fd *fd, u32 fd_length, 50250f82699SIoana Ciornei void *fd_vaddr) 50350f82699SIoana Ciornei { 50450f82699SIoana Ciornei u16 fd_offset = dpaa2_fd_get_offset(fd); 50550f82699SIoana Ciornei struct sk_buff *skb = NULL; 50650f82699SIoana Ciornei unsigned int skb_len; 50750f82699SIoana Ciornei 50850f82699SIoana Ciornei skb_len = fd_length + dpaa2_eth_needed_headroom(NULL); 50950f82699SIoana Ciornei 51050f82699SIoana Ciornei skb = napi_alloc_skb(&ch->napi, skb_len); 51150f82699SIoana Ciornei if (!skb) 51250f82699SIoana Ciornei return NULL; 51350f82699SIoana Ciornei 51450f82699SIoana Ciornei skb_reserve(skb, dpaa2_eth_needed_headroom(NULL)); 51550f82699SIoana Ciornei skb_put(skb, fd_length); 51650f82699SIoana Ciornei 51750f82699SIoana Ciornei memcpy(skb->data, fd_vaddr + fd_offset, fd_length); 51850f82699SIoana Ciornei 5198ed3cefcSIoana Ciornei dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); 52050f82699SIoana Ciornei 52150f82699SIoana Ciornei return skb; 52250f82699SIoana Ciornei } 52350f82699SIoana Ciornei 524129902a3SRobert-Ionut Alexa static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, 525129902a3SRobert-Ionut Alexa const struct dpaa2_fd *fd, 526129902a3SRobert-Ionut Alexa void *fd_vaddr) 527129902a3SRobert-Ionut Alexa { 528129902a3SRobert-Ionut Alexa struct dpaa2_eth_priv *priv = ch->priv; 529129902a3SRobert-Ionut Alexa u32 fd_length = dpaa2_fd_get_len(fd); 530129902a3SRobert-Ionut Alexa 531129902a3SRobert-Ionut Alexa if (fd_length > priv->rx_copybreak) 532129902a3SRobert-Ionut Alexa return NULL; 533129902a3SRobert-Ionut Alexa 534129902a3SRobert-Ionut Alexa return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr); 535129902a3SRobert-Ionut Alexa } 536129902a3SRobert-Ionut Alexa 537ee2a3bdeSRobert-Ionut Alexa void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, 538ee2a3bdeSRobert-Ionut Alexa struct dpaa2_eth_channel *ch, 539ee2a3bdeSRobert-Ionut Alexa const struct dpaa2_fd *fd, void *vaddr, 540ee2a3bdeSRobert-Ionut Alexa struct dpaa2_eth_fq *fq, 541ee2a3bdeSRobert-Ionut Alexa struct rtnl_link_stats64 *percpu_stats, 542ee2a3bdeSRobert-Ionut Alexa struct sk_buff *skb) 543ee2a3bdeSRobert-Ionut Alexa { 544ee2a3bdeSRobert-Ionut Alexa struct dpaa2_fas *fas; 545ee2a3bdeSRobert-Ionut Alexa u32 status = 0; 546ee2a3bdeSRobert-Ionut Alexa 547ee2a3bdeSRobert-Ionut Alexa fas = dpaa2_get_fas(vaddr, false); 548ee2a3bdeSRobert-Ionut Alexa prefetch(fas); 549ee2a3bdeSRobert-Ionut Alexa prefetch(skb->data); 550ee2a3bdeSRobert-Ionut Alexa 551ee2a3bdeSRobert-Ionut Alexa /* Get the timestamp value */ 552ee2a3bdeSRobert-Ionut Alexa if (priv->rx_tstamp) { 553ee2a3bdeSRobert-Ionut Alexa struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 554ee2a3bdeSRobert-Ionut Alexa __le64 *ts = dpaa2_get_ts(vaddr, false); 555ee2a3bdeSRobert-Ionut Alexa u64 ns; 556ee2a3bdeSRobert-Ionut Alexa 557ee2a3bdeSRobert-Ionut Alexa memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 558ee2a3bdeSRobert-Ionut Alexa 559ee2a3bdeSRobert-Ionut Alexa ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 560ee2a3bdeSRobert-Ionut Alexa shhwtstamps->hwtstamp = ns_to_ktime(ns); 561ee2a3bdeSRobert-Ionut Alexa } 562ee2a3bdeSRobert-Ionut Alexa 563ee2a3bdeSRobert-Ionut Alexa /* Check if we need to validate the L4 csum */ 564ee2a3bdeSRobert-Ionut Alexa if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 565ee2a3bdeSRobert-Ionut Alexa status = le32_to_cpu(fas->status); 566ee2a3bdeSRobert-Ionut Alexa dpaa2_eth_validate_rx_csum(priv, status, skb); 567ee2a3bdeSRobert-Ionut Alexa } 568ee2a3bdeSRobert-Ionut Alexa 569ee2a3bdeSRobert-Ionut Alexa skb->protocol = eth_type_trans(skb, priv->net_dev); 570ee2a3bdeSRobert-Ionut Alexa skb_record_rx_queue(skb, fq->flowid); 571ee2a3bdeSRobert-Ionut Alexa 572ee2a3bdeSRobert-Ionut Alexa percpu_stats->rx_packets++; 573ee2a3bdeSRobert-Ionut Alexa percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 574ee2a3bdeSRobert-Ionut Alexa ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); 575ee2a3bdeSRobert-Ionut Alexa 576ee2a3bdeSRobert-Ionut Alexa list_add_tail(&skb->list, ch->rx_list); 577ee2a3bdeSRobert-Ionut Alexa } 578ee2a3bdeSRobert-Ionut Alexa 57934ff6846SIoana Radulescu /* Main Rx frame processing routine */ 580ee2a3bdeSRobert-Ionut Alexa void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 58134ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 58234ff6846SIoana Radulescu const struct dpaa2_fd *fd, 583dbcdf728SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 58434ff6846SIoana Radulescu { 58534ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 58634ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 58734ff6846SIoana Radulescu void *vaddr; 58834ff6846SIoana Radulescu struct sk_buff *skb; 58934ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 59034ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 59134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 59234ff6846SIoana Radulescu void *buf_data; 5937e273a8eSIoana Ciocoi Radulescu u32 xdp_act; 59434ff6846SIoana Radulescu 59534ff6846SIoana Radulescu /* Tracing point */ 59634ff6846SIoana Radulescu trace_dpaa2_rx_fd(priv->net_dev, fd); 59734ff6846SIoana Radulescu 59834ff6846SIoana Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 599efa6a7d0SIoana Ciornei dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 60018c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 60134ff6846SIoana Radulescu 60234ff6846SIoana Radulescu buf_data = vaddr + dpaa2_fd_get_offset(fd); 60334ff6846SIoana Radulescu prefetch(buf_data); 60434ff6846SIoana Radulescu 60534ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 60634ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 60734ff6846SIoana Radulescu 60834ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 6095d8dccf8SIoana Ciornei xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); 6107e273a8eSIoana Ciocoi Radulescu if (xdp_act != XDP_PASS) { 6117e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_packets++; 6127e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 6137e273a8eSIoana Ciocoi Radulescu return; 6147e273a8eSIoana Ciocoi Radulescu } 6157e273a8eSIoana Ciocoi Radulescu 61650f82699SIoana Ciornei skb = dpaa2_eth_copybreak(ch, fd, vaddr); 61750f82699SIoana Ciornei if (!skb) { 618efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 61918c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 6205d8dccf8SIoana Ciornei skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 62150f82699SIoana Ciornei } 62234ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 6237e273a8eSIoana Ciocoi Radulescu WARN_ON(priv->xdp_prog); 6247e273a8eSIoana Ciocoi Radulescu 625efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 62618c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 6275d8dccf8SIoana Ciornei skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); 62827c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 62934ff6846SIoana Radulescu percpu_extras->rx_sg_frames++; 63034ff6846SIoana Radulescu percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 63134ff6846SIoana Radulescu } else { 63234ff6846SIoana Radulescu /* We don't support any other format */ 63334ff6846SIoana Radulescu goto err_frame_format; 63434ff6846SIoana Radulescu } 63534ff6846SIoana Radulescu 63634ff6846SIoana Radulescu if (unlikely(!skb)) 63734ff6846SIoana Radulescu goto err_build_skb; 63834ff6846SIoana Radulescu 639ee2a3bdeSRobert-Ionut Alexa dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); 64034ff6846SIoana Radulescu return; 64134ff6846SIoana Radulescu 64234ff6846SIoana Radulescu err_build_skb: 6435d8dccf8SIoana Ciornei dpaa2_eth_free_rx_fd(priv, fd, vaddr); 64434ff6846SIoana Radulescu err_frame_format: 64534ff6846SIoana Radulescu percpu_stats->rx_dropped++; 64634ff6846SIoana Radulescu } 64734ff6846SIoana Radulescu 648061d631fSIoana Ciornei /* Processing of Rx frames received on the error FQ 649061d631fSIoana Ciornei * We check and print the error bits and then free the frame 650061d631fSIoana Ciornei */ 651061d631fSIoana Ciornei static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, 652061d631fSIoana Ciornei struct dpaa2_eth_channel *ch, 653061d631fSIoana Ciornei const struct dpaa2_fd *fd, 654061d631fSIoana Ciornei struct dpaa2_eth_fq *fq __always_unused) 655061d631fSIoana Ciornei { 656061d631fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 657061d631fSIoana Ciornei dma_addr_t addr = dpaa2_fd_get_addr(fd); 658061d631fSIoana Ciornei u8 fd_format = dpaa2_fd_get_format(fd); 659061d631fSIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 660061d631fSIoana Ciornei struct dpaa2_eth_trap_item *trap_item; 661061d631fSIoana Ciornei struct dpaa2_fapr *fapr; 662061d631fSIoana Ciornei struct sk_buff *skb; 663061d631fSIoana Ciornei void *buf_data; 664061d631fSIoana Ciornei void *vaddr; 665061d631fSIoana Ciornei 666061d631fSIoana Ciornei vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 667061d631fSIoana Ciornei dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 668061d631fSIoana Ciornei DMA_BIDIRECTIONAL); 669061d631fSIoana Ciornei 670061d631fSIoana Ciornei buf_data = vaddr + dpaa2_fd_get_offset(fd); 671061d631fSIoana Ciornei 672061d631fSIoana Ciornei if (fd_format == dpaa2_fd_single) { 673061d631fSIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 674061d631fSIoana Ciornei DMA_BIDIRECTIONAL); 675061d631fSIoana Ciornei skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 676061d631fSIoana Ciornei } else if (fd_format == dpaa2_fd_sg) { 677061d631fSIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 678061d631fSIoana Ciornei DMA_BIDIRECTIONAL); 679061d631fSIoana Ciornei skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); 680061d631fSIoana Ciornei free_pages((unsigned long)vaddr, 0); 681061d631fSIoana Ciornei } else { 682061d631fSIoana Ciornei /* We don't support any other format */ 683061d631fSIoana Ciornei dpaa2_eth_free_rx_fd(priv, fd, vaddr); 684061d631fSIoana Ciornei goto err_frame_format; 685061d631fSIoana Ciornei } 686061d631fSIoana Ciornei 687061d631fSIoana Ciornei fapr = dpaa2_get_fapr(vaddr, false); 688061d631fSIoana Ciornei trap_item = dpaa2_eth_dl_get_trap(priv, fapr); 689061d631fSIoana Ciornei if (trap_item) 690061d631fSIoana Ciornei devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx, 691061d631fSIoana Ciornei &priv->devlink_port, NULL); 692061d631fSIoana Ciornei consume_skb(skb); 693061d631fSIoana Ciornei 694061d631fSIoana Ciornei err_frame_format: 695061d631fSIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 696061d631fSIoana Ciornei percpu_stats->rx_errors++; 697061d631fSIoana Ciornei ch->buf_count--; 698061d631fSIoana Ciornei } 699061d631fSIoana Ciornei 70034ff6846SIoana Radulescu /* Consume all frames pull-dequeued into the store. This is the simplest way to 70134ff6846SIoana Radulescu * make sure we don't accidentally issue another volatile dequeue which would 70234ff6846SIoana Radulescu * overwrite (leak) frames already in the store. 70334ff6846SIoana Radulescu * 70434ff6846SIoana Radulescu * Observance of NAPI budget is not our concern, leaving that to the caller. 70534ff6846SIoana Radulescu */ 7065d8dccf8SIoana Ciornei static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, 707569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq **src) 70834ff6846SIoana Radulescu { 70934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = ch->priv; 71068049a5fSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq = NULL; 71134ff6846SIoana Radulescu struct dpaa2_dq *dq; 71234ff6846SIoana Radulescu const struct dpaa2_fd *fd; 713ef17bd7cSIoana Radulescu int cleaned = 0, retries = 0; 71434ff6846SIoana Radulescu int is_last; 71534ff6846SIoana Radulescu 71634ff6846SIoana Radulescu do { 71734ff6846SIoana Radulescu dq = dpaa2_io_store_next(ch->store, &is_last); 71834ff6846SIoana Radulescu if (unlikely(!dq)) { 71934ff6846SIoana Radulescu /* If we're here, we *must* have placed a 72034ff6846SIoana Radulescu * volatile dequeue comnmand, so keep reading through 72134ff6846SIoana Radulescu * the store until we get some sort of valid response 72234ff6846SIoana Radulescu * token (either a valid frame or an "empty dequeue") 72334ff6846SIoana Radulescu */ 724ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { 725ef17bd7cSIoana Radulescu netdev_err_once(priv->net_dev, 726ef17bd7cSIoana Radulescu "Unable to read a valid dequeue response\n"); 727ef17bd7cSIoana Radulescu return -ETIMEDOUT; 728ef17bd7cSIoana Radulescu } 72934ff6846SIoana Radulescu continue; 73034ff6846SIoana Radulescu } 73134ff6846SIoana Radulescu 73234ff6846SIoana Radulescu fd = dpaa2_dq_fd(dq); 73334ff6846SIoana Radulescu fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 73434ff6846SIoana Radulescu 735dbcdf728SIoana Ciocoi Radulescu fq->consume(priv, ch, fd, fq); 73634ff6846SIoana Radulescu cleaned++; 737ef17bd7cSIoana Radulescu retries = 0; 73834ff6846SIoana Radulescu } while (!is_last); 73934ff6846SIoana Radulescu 74068049a5fSIoana Ciocoi Radulescu if (!cleaned) 74168049a5fSIoana Ciocoi Radulescu return 0; 74268049a5fSIoana Ciocoi Radulescu 74368049a5fSIoana Ciocoi Radulescu fq->stats.frames += cleaned; 744460fd830SIoana Ciornei ch->stats.frames += cleaned; 745fc398becSIoana Ciornei ch->stats.frames_per_cdan += cleaned; 74668049a5fSIoana Ciocoi Radulescu 74768049a5fSIoana Ciocoi Radulescu /* A dequeue operation only pulls frames from a single queue 748569dac6aSIoana Ciocoi Radulescu * into the store. Return the frame queue as an out param. 74968049a5fSIoana Ciocoi Radulescu */ 750569dac6aSIoana Ciocoi Radulescu if (src) 751569dac6aSIoana Ciocoi Radulescu *src = fq; 75268049a5fSIoana Ciocoi Radulescu 75334ff6846SIoana Radulescu return cleaned; 75434ff6846SIoana Radulescu } 75534ff6846SIoana Radulescu 756c5521189SYangbo Lu static int dpaa2_eth_ptp_parse(struct sk_buff *skb, 757c5521189SYangbo Lu u8 *msgtype, u8 *twostep, u8 *udp, 758c5521189SYangbo Lu u16 *correction_offset, 759c5521189SYangbo Lu u16 *origintimestamp_offset) 76034ff6846SIoana Radulescu { 761c5521189SYangbo Lu unsigned int ptp_class; 762c5521189SYangbo Lu struct ptp_header *hdr; 763c5521189SYangbo Lu unsigned int type; 764c5521189SYangbo Lu u8 *base; 765c5521189SYangbo Lu 766c5521189SYangbo Lu ptp_class = ptp_classify_raw(skb); 767c5521189SYangbo Lu if (ptp_class == PTP_CLASS_NONE) 768c5521189SYangbo Lu return -EINVAL; 769c5521189SYangbo Lu 770c5521189SYangbo Lu hdr = ptp_parse_header(skb, ptp_class); 771c5521189SYangbo Lu if (!hdr) 772c5521189SYangbo Lu return -EINVAL; 773c5521189SYangbo Lu 774c5521189SYangbo Lu *msgtype = ptp_get_msgtype(hdr, ptp_class); 775c5521189SYangbo Lu *twostep = hdr->flag_field[0] & 0x2; 776c5521189SYangbo Lu 777c5521189SYangbo Lu type = ptp_class & PTP_CLASS_PMASK; 778c5521189SYangbo Lu if (type == PTP_CLASS_IPV4 || 779c5521189SYangbo Lu type == PTP_CLASS_IPV6) 780c5521189SYangbo Lu *udp = 1; 781c5521189SYangbo Lu else 782c5521189SYangbo Lu *udp = 0; 783c5521189SYangbo Lu 784c5521189SYangbo Lu base = skb_mac_header(skb); 785c5521189SYangbo Lu *correction_offset = (u8 *)&hdr->correction - base; 786c5521189SYangbo Lu *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 787c5521189SYangbo Lu 788c5521189SYangbo Lu return 0; 789c5521189SYangbo Lu } 790c5521189SYangbo Lu 791c5521189SYangbo Lu /* Configure the egress frame annotation for timestamp update */ 792c5521189SYangbo Lu static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, 793c5521189SYangbo Lu struct dpaa2_fd *fd, 794c5521189SYangbo Lu void *buf_start, 795c5521189SYangbo Lu struct sk_buff *skb) 796c5521189SYangbo Lu { 797c5521189SYangbo Lu struct ptp_tstamp origin_timestamp; 798c5521189SYangbo Lu u8 msgtype, twostep, udp; 79934ff6846SIoana Radulescu struct dpaa2_faead *faead; 800c5521189SYangbo Lu struct dpaa2_fas *fas; 801c5521189SYangbo Lu struct timespec64 ts; 802c5521189SYangbo Lu u16 offset1, offset2; 80334ff6846SIoana Radulescu u32 ctrl, frc; 804c5521189SYangbo Lu __le64 *ns; 805c5521189SYangbo Lu u8 *data; 80634ff6846SIoana Radulescu 80734ff6846SIoana Radulescu /* Mark the egress frame annotation area as valid */ 80834ff6846SIoana Radulescu frc = dpaa2_fd_get_frc(fd); 80934ff6846SIoana Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 81034ff6846SIoana Radulescu 81134ff6846SIoana Radulescu /* Set hardware annotation size */ 81234ff6846SIoana Radulescu ctrl = dpaa2_fd_get_ctrl(fd); 81334ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 81434ff6846SIoana Radulescu 81534ff6846SIoana Radulescu /* enable UPD (update prepanded data) bit in FAEAD field of 81634ff6846SIoana Radulescu * hardware frame annotation area 81734ff6846SIoana Radulescu */ 81834ff6846SIoana Radulescu ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 81934ff6846SIoana Radulescu faead = dpaa2_get_faead(buf_start, true); 82034ff6846SIoana Radulescu faead->ctrl = cpu_to_le32(ctrl); 821c5521189SYangbo Lu 822c5521189SYangbo Lu if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 823c5521189SYangbo Lu if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, 824c5521189SYangbo Lu &offset1, &offset2) || 8256b6817c5SChristian Eggers msgtype != PTP_MSGTYPE_SYNC || twostep) { 826c5521189SYangbo Lu WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 827c5521189SYangbo Lu return; 828c5521189SYangbo Lu } 829c5521189SYangbo Lu 830c5521189SYangbo Lu /* Mark the frame annotation status as valid */ 831c5521189SYangbo Lu frc = dpaa2_fd_get_frc(fd); 832c5521189SYangbo Lu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV); 833c5521189SYangbo Lu 834c5521189SYangbo Lu /* Mark the PTP flag for one step timestamping */ 835c5521189SYangbo Lu fas = dpaa2_get_fas(buf_start, true); 836c5521189SYangbo Lu fas->status = cpu_to_le32(DPAA2_FAS_PTP); 837c5521189SYangbo Lu 838c5521189SYangbo Lu dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts); 839c5521189SYangbo Lu ns = dpaa2_get_ts(buf_start, true); 840c5521189SYangbo Lu *ns = cpu_to_le64(timespec64_to_ns(&ts) / 841c5521189SYangbo Lu DPAA2_PTP_CLK_PERIOD_NS); 842c5521189SYangbo Lu 843c5521189SYangbo Lu /* Update current time to PTP message originTimestamp field */ 844c5521189SYangbo Lu ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns)); 845c5521189SYangbo Lu data = skb_mac_header(skb); 846c5521189SYangbo Lu *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb); 847c5521189SYangbo Lu *(__be32 *)(data + offset2 + 2) = 848c5521189SYangbo Lu htonl(origin_timestamp.sec_lsb); 849c5521189SYangbo Lu *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec); 850c5521189SYangbo Lu 851c4680c97SRadu Bulie if (priv->ptp_correction_off == offset1) 852c4680c97SRadu Bulie return; 853c5521189SYangbo Lu 854c4680c97SRadu Bulie priv->dpaa2_set_onestep_params_cb(priv, offset1, udp); 855c4680c97SRadu Bulie priv->ptp_correction_off = offset1; 856c4680c97SRadu Bulie 857c5521189SYangbo Lu } 85834ff6846SIoana Radulescu } 85934ff6846SIoana Radulescu 8604a7f6c5aSRobert-Ionut Alexa void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) 861ae3b0817SIoana Ciornei { 862ae3b0817SIoana Ciornei struct dpaa2_eth_sgt_cache *sgt_cache; 863ae3b0817SIoana Ciornei void *sgt_buf = NULL; 864ae3b0817SIoana Ciornei int sgt_buf_size; 865ae3b0817SIoana Ciornei 866ae3b0817SIoana Ciornei sgt_cache = this_cpu_ptr(priv->sgt_cache); 867a4218aefSIoana Ciornei sgt_buf_size = priv->tx_data_offset + 868a4218aefSIoana Ciornei DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry); 869ae3b0817SIoana Ciornei 870ae3b0817SIoana Ciornei if (sgt_cache->count == 0) 871ae3b0817SIoana Ciornei sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN); 872ae3b0817SIoana Ciornei else 873ae3b0817SIoana Ciornei sgt_buf = sgt_cache->buf[--sgt_cache->count]; 874ae3b0817SIoana Ciornei if (!sgt_buf) 875ae3b0817SIoana Ciornei return NULL; 876ae3b0817SIoana Ciornei 877ae3b0817SIoana Ciornei memset(sgt_buf, 0, sgt_buf_size); 878ae3b0817SIoana Ciornei 879ae3b0817SIoana Ciornei return sgt_buf; 880ae3b0817SIoana Ciornei } 881ae3b0817SIoana Ciornei 8824a7f6c5aSRobert-Ionut Alexa void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) 883ae3b0817SIoana Ciornei { 884ae3b0817SIoana Ciornei struct dpaa2_eth_sgt_cache *sgt_cache; 885ae3b0817SIoana Ciornei 886ae3b0817SIoana Ciornei sgt_cache = this_cpu_ptr(priv->sgt_cache); 887ae3b0817SIoana Ciornei if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) 888ae3b0817SIoana Ciornei skb_free_frag(sgt_buf); 889ae3b0817SIoana Ciornei else 890ae3b0817SIoana Ciornei sgt_cache->buf[sgt_cache->count++] = sgt_buf; 891ae3b0817SIoana Ciornei } 892ae3b0817SIoana Ciornei 89334ff6846SIoana Radulescu /* Create a frame descriptor based on a fragmented skb */ 8945d8dccf8SIoana Ciornei static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, 89534ff6846SIoana Radulescu struct sk_buff *skb, 89664a965deSYangbo Lu struct dpaa2_fd *fd, 89764a965deSYangbo Lu void **swa_addr) 89834ff6846SIoana Radulescu { 89934ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 90034ff6846SIoana Radulescu void *sgt_buf = NULL; 90134ff6846SIoana Radulescu dma_addr_t addr; 90234ff6846SIoana Radulescu int nr_frags = skb_shinfo(skb)->nr_frags; 90334ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 90434ff6846SIoana Radulescu int i, err; 90534ff6846SIoana Radulescu int sgt_buf_size; 90634ff6846SIoana Radulescu struct scatterlist *scl, *crt_scl; 90734ff6846SIoana Radulescu int num_sg; 90834ff6846SIoana Radulescu int num_dma_bufs; 90934ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 91034ff6846SIoana Radulescu 91134ff6846SIoana Radulescu /* Create and map scatterlist. 91234ff6846SIoana Radulescu * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 91334ff6846SIoana Radulescu * to go beyond nr_frags+1. 91434ff6846SIoana Radulescu * Note: We don't support chained scatterlists 91534ff6846SIoana Radulescu */ 91634ff6846SIoana Radulescu if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 91734ff6846SIoana Radulescu return -EINVAL; 91834ff6846SIoana Radulescu 919d4ceb8deSJulia Lawall scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 92034ff6846SIoana Radulescu if (unlikely(!scl)) 92134ff6846SIoana Radulescu return -ENOMEM; 92234ff6846SIoana Radulescu 92334ff6846SIoana Radulescu sg_init_table(scl, nr_frags + 1); 92434ff6846SIoana Radulescu num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 92537fbbddaSIoana Ciornei if (unlikely(num_sg < 0)) { 92637fbbddaSIoana Ciornei err = -ENOMEM; 92737fbbddaSIoana Ciornei goto dma_map_sg_failed; 92837fbbddaSIoana Ciornei } 92934ff6846SIoana Radulescu num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 93034ff6846SIoana Radulescu if (unlikely(!num_dma_bufs)) { 93134ff6846SIoana Radulescu err = -ENOMEM; 93234ff6846SIoana Radulescu goto dma_map_sg_failed; 93334ff6846SIoana Radulescu } 93434ff6846SIoana Radulescu 93534ff6846SIoana Radulescu /* Prepare the HW SGT structure */ 93634ff6846SIoana Radulescu sgt_buf_size = priv->tx_data_offset + 93734ff6846SIoana Radulescu sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 938a4218aefSIoana Ciornei sgt_buf = dpaa2_eth_sgt_get(priv); 93934ff6846SIoana Radulescu if (unlikely(!sgt_buf)) { 94034ff6846SIoana Radulescu err = -ENOMEM; 94134ff6846SIoana Radulescu goto sgt_buf_alloc_failed; 94234ff6846SIoana Radulescu } 94334ff6846SIoana Radulescu 94434ff6846SIoana Radulescu sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 94534ff6846SIoana Radulescu 94634ff6846SIoana Radulescu /* Fill in the HW SGT structure. 94734ff6846SIoana Radulescu * 94834ff6846SIoana Radulescu * sgt_buf is zeroed out, so the following fields are implicit 94934ff6846SIoana Radulescu * in all sgt entries: 95034ff6846SIoana Radulescu * - offset is 0 95134ff6846SIoana Radulescu * - format is 'dpaa2_sg_single' 95234ff6846SIoana Radulescu */ 95334ff6846SIoana Radulescu for_each_sg(scl, crt_scl, num_dma_bufs, i) { 95434ff6846SIoana Radulescu dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 95534ff6846SIoana Radulescu dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 95634ff6846SIoana Radulescu } 95734ff6846SIoana Radulescu dpaa2_sg_set_final(&sgt[i - 1], true); 95834ff6846SIoana Radulescu 95934ff6846SIoana Radulescu /* Store the skb backpointer in the SGT buffer. 96034ff6846SIoana Radulescu * Fit the scatterlist and the number of buffers alongside the 96134ff6846SIoana Radulescu * skb backpointer in the software annotation area. We'll need 96234ff6846SIoana Radulescu * all of them on Tx Conf. 96334ff6846SIoana Radulescu */ 96464a965deSYangbo Lu *swa_addr = (void *)sgt_buf; 96534ff6846SIoana Radulescu swa = (struct dpaa2_eth_swa *)sgt_buf; 966e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SG; 967e3fdf6baSIoana Radulescu swa->sg.skb = skb; 968e3fdf6baSIoana Radulescu swa->sg.scl = scl; 969e3fdf6baSIoana Radulescu swa->sg.num_sg = num_sg; 970e3fdf6baSIoana Radulescu swa->sg.sgt_size = sgt_buf_size; 97134ff6846SIoana Radulescu 97234ff6846SIoana Radulescu /* Separately map the SGT buffer */ 97334ff6846SIoana Radulescu addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 97434ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) { 97534ff6846SIoana Radulescu err = -ENOMEM; 97634ff6846SIoana Radulescu goto dma_map_single_failed; 97734ff6846SIoana Radulescu } 978a4ca448eSIoana Ciornei memset(fd, 0, sizeof(struct dpaa2_fd)); 97934ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, priv->tx_data_offset); 98034ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_sg); 98134ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 98234ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 983b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 98434ff6846SIoana Radulescu 98534ff6846SIoana Radulescu return 0; 98634ff6846SIoana Radulescu 98734ff6846SIoana Radulescu dma_map_single_failed: 988a4218aefSIoana Ciornei dpaa2_eth_sgt_recycle(priv, sgt_buf); 98934ff6846SIoana Radulescu sgt_buf_alloc_failed: 99034ff6846SIoana Radulescu dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 99134ff6846SIoana Radulescu dma_map_sg_failed: 99234ff6846SIoana Radulescu kfree(scl); 99334ff6846SIoana Radulescu return err; 99434ff6846SIoana Radulescu } 99534ff6846SIoana Radulescu 996d70446eeSIoana Ciornei /* Create a SG frame descriptor based on a linear skb. 997d70446eeSIoana Ciornei * 998d70446eeSIoana Ciornei * This function is used on the Tx path when the skb headroom is not large 999d70446eeSIoana Ciornei * enough for the HW requirements, thus instead of realloc-ing the skb we 1000d70446eeSIoana Ciornei * create a SG frame descriptor with only one entry. 1001d70446eeSIoana Ciornei */ 10025d8dccf8SIoana Ciornei static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, 1003d70446eeSIoana Ciornei struct sk_buff *skb, 100464a965deSYangbo Lu struct dpaa2_fd *fd, 100564a965deSYangbo Lu void **swa_addr) 1006d70446eeSIoana Ciornei { 1007d70446eeSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 1008d70446eeSIoana Ciornei struct dpaa2_sg_entry *sgt; 1009d70446eeSIoana Ciornei struct dpaa2_eth_swa *swa; 1010d70446eeSIoana Ciornei dma_addr_t addr, sgt_addr; 1011d70446eeSIoana Ciornei void *sgt_buf = NULL; 1012d70446eeSIoana Ciornei int sgt_buf_size; 1013d70446eeSIoana Ciornei int err; 1014d70446eeSIoana Ciornei 1015d70446eeSIoana Ciornei /* Prepare the HW SGT structure */ 1016d70446eeSIoana Ciornei sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); 1017ae3b0817SIoana Ciornei sgt_buf = dpaa2_eth_sgt_get(priv); 1018d70446eeSIoana Ciornei if (unlikely(!sgt_buf)) 1019d70446eeSIoana Ciornei return -ENOMEM; 1020d70446eeSIoana Ciornei sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 1021d70446eeSIoana Ciornei 1022d70446eeSIoana Ciornei addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); 1023d70446eeSIoana Ciornei if (unlikely(dma_mapping_error(dev, addr))) { 1024d70446eeSIoana Ciornei err = -ENOMEM; 1025d70446eeSIoana Ciornei goto data_map_failed; 1026d70446eeSIoana Ciornei } 1027d70446eeSIoana Ciornei 1028d70446eeSIoana Ciornei /* Fill in the HW SGT structure */ 1029d70446eeSIoana Ciornei dpaa2_sg_set_addr(sgt, addr); 1030d70446eeSIoana Ciornei dpaa2_sg_set_len(sgt, skb->len); 1031d70446eeSIoana Ciornei dpaa2_sg_set_final(sgt, true); 1032d70446eeSIoana Ciornei 1033d70446eeSIoana Ciornei /* Store the skb backpointer in the SGT buffer */ 103464a965deSYangbo Lu *swa_addr = (void *)sgt_buf; 1035d70446eeSIoana Ciornei swa = (struct dpaa2_eth_swa *)sgt_buf; 1036d70446eeSIoana Ciornei swa->type = DPAA2_ETH_SWA_SINGLE; 1037d70446eeSIoana Ciornei swa->single.skb = skb; 103854a57d1cSIoana Ciornei swa->single.sgt_size = sgt_buf_size; 1039d70446eeSIoana Ciornei 1040d70446eeSIoana Ciornei /* Separately map the SGT buffer */ 1041d70446eeSIoana Ciornei sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 1042d70446eeSIoana Ciornei if (unlikely(dma_mapping_error(dev, sgt_addr))) { 1043d70446eeSIoana Ciornei err = -ENOMEM; 1044d70446eeSIoana Ciornei goto sgt_map_failed; 1045d70446eeSIoana Ciornei } 1046d70446eeSIoana Ciornei 1047a4ca448eSIoana Ciornei memset(fd, 0, sizeof(struct dpaa2_fd)); 1048d70446eeSIoana Ciornei dpaa2_fd_set_offset(fd, priv->tx_data_offset); 1049d70446eeSIoana Ciornei dpaa2_fd_set_format(fd, dpaa2_fd_sg); 1050d70446eeSIoana Ciornei dpaa2_fd_set_addr(fd, sgt_addr); 1051d70446eeSIoana Ciornei dpaa2_fd_set_len(fd, skb->len); 1052d70446eeSIoana Ciornei dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 1053d70446eeSIoana Ciornei 1054d70446eeSIoana Ciornei return 0; 1055d70446eeSIoana Ciornei 1056d70446eeSIoana Ciornei sgt_map_failed: 1057d70446eeSIoana Ciornei dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); 1058d70446eeSIoana Ciornei data_map_failed: 1059ae3b0817SIoana Ciornei dpaa2_eth_sgt_recycle(priv, sgt_buf); 1060d70446eeSIoana Ciornei 1061d70446eeSIoana Ciornei return err; 1062d70446eeSIoana Ciornei } 1063d70446eeSIoana Ciornei 106434ff6846SIoana Radulescu /* Create a frame descriptor based on a linear skb */ 10655d8dccf8SIoana Ciornei static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, 106634ff6846SIoana Radulescu struct sk_buff *skb, 106764a965deSYangbo Lu struct dpaa2_fd *fd, 106864a965deSYangbo Lu void **swa_addr) 106934ff6846SIoana Radulescu { 107034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 107134ff6846SIoana Radulescu u8 *buffer_start, *aligned_start; 1072e3fdf6baSIoana Radulescu struct dpaa2_eth_swa *swa; 107334ff6846SIoana Radulescu dma_addr_t addr; 107434ff6846SIoana Radulescu 10751cf773bdSYangbo Lu buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); 107634ff6846SIoana Radulescu 107734ff6846SIoana Radulescu /* If there's enough room to align the FD address, do it. 107834ff6846SIoana Radulescu * It will help hardware optimize accesses. 107934ff6846SIoana Radulescu */ 108034ff6846SIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 108134ff6846SIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 108234ff6846SIoana Radulescu if (aligned_start >= skb->head) 108334ff6846SIoana Radulescu buffer_start = aligned_start; 108434ff6846SIoana Radulescu 108534ff6846SIoana Radulescu /* Store a backpointer to the skb at the beginning of the buffer 108634ff6846SIoana Radulescu * (in the private data area) such that we can release it 108734ff6846SIoana Radulescu * on Tx confirm 108834ff6846SIoana Radulescu */ 108964a965deSYangbo Lu *swa_addr = (void *)buffer_start; 1090e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 1091e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SINGLE; 1092e3fdf6baSIoana Radulescu swa->single.skb = skb; 109334ff6846SIoana Radulescu 109434ff6846SIoana Radulescu addr = dma_map_single(dev, buffer_start, 109534ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 109634ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 109734ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 109834ff6846SIoana Radulescu return -ENOMEM; 109934ff6846SIoana Radulescu 1100a4ca448eSIoana Ciornei memset(fd, 0, sizeof(struct dpaa2_fd)); 110134ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 110234ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 110334ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 110434ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_single); 1105b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 110634ff6846SIoana Radulescu 110734ff6846SIoana Radulescu return 0; 110834ff6846SIoana Radulescu } 110934ff6846SIoana Radulescu 111034ff6846SIoana Radulescu /* FD freeing routine on the Tx path 111134ff6846SIoana Radulescu * 111234ff6846SIoana Radulescu * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 111334ff6846SIoana Radulescu * back-pointed to is also freed. 111434ff6846SIoana Radulescu * This can be called either from dpaa2_eth_tx_conf() or on the error path of 111534ff6846SIoana Radulescu * dpaa2_eth_tx(). 111634ff6846SIoana Radulescu */ 11174a7f6c5aSRobert-Ionut Alexa void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, 11184a7f6c5aSRobert-Ionut Alexa struct dpaa2_eth_channel *ch, 1119d678be1dSIoana Radulescu struct dpaa2_eth_fq *fq, 11200723a3aeSIoana Ciocoi Radulescu const struct dpaa2_fd *fd, bool in_napi) 112134ff6846SIoana Radulescu { 112234ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 1123d70446eeSIoana Ciornei dma_addr_t fd_addr, sg_addr; 1124d678be1dSIoana Radulescu struct sk_buff *skb = NULL; 112534ff6846SIoana Radulescu unsigned char *buffer_start; 112634ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 112734ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 1128d678be1dSIoana Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 1129d70446eeSIoana Ciornei struct dpaa2_sg_entry *sgt; 11303dc709e0SIoana Ciornei int should_free_skb = 1; 113106d12994SIoana Ciornei void *tso_hdr; 11323dc709e0SIoana Ciornei int i; 1133d70446eeSIoana Ciornei 113434ff6846SIoana Radulescu fd_addr = dpaa2_fd_get_addr(fd); 1135e3fdf6baSIoana Radulescu buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 1136e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 113734ff6846SIoana Radulescu 113834ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 1139d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_SINGLE) { 1140e3fdf6baSIoana Radulescu skb = swa->single.skb; 1141d678be1dSIoana Radulescu /* Accessing the skb buffer is safe before dma unmap, 1142d678be1dSIoana Radulescu * because we didn't map the actual skb shell. 114334ff6846SIoana Radulescu */ 114434ff6846SIoana Radulescu dma_unmap_single(dev, fd_addr, 114534ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 114634ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 1147d678be1dSIoana Radulescu } else { 1148d678be1dSIoana Radulescu WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); 1149d678be1dSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, 1150d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 1151d678be1dSIoana Radulescu } 115234ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 1153d70446eeSIoana Ciornei if (swa->type == DPAA2_ETH_SWA_SG) { 1154e3fdf6baSIoana Radulescu skb = swa->sg.skb; 115534ff6846SIoana Radulescu 115634ff6846SIoana Radulescu /* Unmap the scatterlist */ 1157e3fdf6baSIoana Radulescu dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, 1158e3fdf6baSIoana Radulescu DMA_BIDIRECTIONAL); 1159e3fdf6baSIoana Radulescu kfree(swa->sg.scl); 116034ff6846SIoana Radulescu 116134ff6846SIoana Radulescu /* Unmap the SGT buffer */ 1162e3fdf6baSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, 116334ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 11643dc709e0SIoana Ciornei } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) { 11653dc709e0SIoana Ciornei skb = swa->tso.skb; 11663dc709e0SIoana Ciornei 11673dc709e0SIoana Ciornei sgt = (struct dpaa2_sg_entry *)(buffer_start + 11683dc709e0SIoana Ciornei priv->tx_data_offset); 11693dc709e0SIoana Ciornei 11700a09c5b8SIoana Ciornei /* Unmap the SGT buffer */ 11710a09c5b8SIoana Ciornei dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, 11720a09c5b8SIoana Ciornei DMA_BIDIRECTIONAL); 11730a09c5b8SIoana Ciornei 11743dc709e0SIoana Ciornei /* Unmap and free the header */ 117506d12994SIoana Ciornei tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)); 11763dc709e0SIoana Ciornei dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE, 11773dc709e0SIoana Ciornei DMA_TO_DEVICE); 117806d12994SIoana Ciornei kfree(tso_hdr); 11793dc709e0SIoana Ciornei 11803dc709e0SIoana Ciornei /* Unmap the other SG entries for the data */ 11813dc709e0SIoana Ciornei for (i = 1; i < swa->tso.num_sg; i++) 11823dc709e0SIoana Ciornei dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]), 11833dc709e0SIoana Ciornei dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); 11843dc709e0SIoana Ciornei 11853dc709e0SIoana Ciornei if (!swa->tso.is_last_fd) 11863dc709e0SIoana Ciornei should_free_skb = 0; 11874a7f6c5aSRobert-Ionut Alexa } else if (swa->type == DPAA2_ETH_SWA_XSK) { 11884a7f6c5aSRobert-Ionut Alexa /* Unmap the SGT Buffer */ 11894a7f6c5aSRobert-Ionut Alexa dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size, 11904a7f6c5aSRobert-Ionut Alexa DMA_BIDIRECTIONAL); 119134ff6846SIoana Radulescu } else { 1192d70446eeSIoana Ciornei skb = swa->single.skb; 1193d70446eeSIoana Ciornei 1194d70446eeSIoana Ciornei /* Unmap the SGT Buffer */ 1195d70446eeSIoana Ciornei dma_unmap_single(dev, fd_addr, swa->single.sgt_size, 1196d70446eeSIoana Ciornei DMA_BIDIRECTIONAL); 1197d70446eeSIoana Ciornei 1198d70446eeSIoana Ciornei sgt = (struct dpaa2_sg_entry *)(buffer_start + 1199d70446eeSIoana Ciornei priv->tx_data_offset); 1200d70446eeSIoana Ciornei sg_addr = dpaa2_sg_get_addr(sgt); 1201d70446eeSIoana Ciornei dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); 1202d70446eeSIoana Ciornei } 1203d70446eeSIoana Ciornei } else { 120434ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "Invalid FD format\n"); 120534ff6846SIoana Radulescu return; 120634ff6846SIoana Radulescu } 120734ff6846SIoana Radulescu 12084a7f6c5aSRobert-Ionut Alexa if (swa->type == DPAA2_ETH_SWA_XSK) { 12094a7f6c5aSRobert-Ionut Alexa ch->xsk_tx_pkts_sent++; 12104a7f6c5aSRobert-Ionut Alexa dpaa2_eth_sgt_recycle(priv, buffer_start); 12114a7f6c5aSRobert-Ionut Alexa return; 12124a7f6c5aSRobert-Ionut Alexa } 12134a7f6c5aSRobert-Ionut Alexa 1214d678be1dSIoana Radulescu if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { 1215d678be1dSIoana Radulescu fq->dq_frames++; 1216d678be1dSIoana Radulescu fq->dq_bytes += fd_len; 1217d678be1dSIoana Radulescu } 1218d678be1dSIoana Radulescu 1219d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_XDP) { 1220d678be1dSIoana Radulescu xdp_return_frame(swa->xdp.xdpf); 1221d678be1dSIoana Radulescu return; 1222d678be1dSIoana Radulescu } 1223d678be1dSIoana Radulescu 122434ff6846SIoana Radulescu /* Get the timestamp value */ 12253dc709e0SIoana Ciornei if (swa->type != DPAA2_ETH_SWA_SW_TSO) { 12261cf773bdSYangbo Lu if (skb->cb[0] == TX_TSTAMP) { 122734ff6846SIoana Radulescu struct skb_shared_hwtstamps shhwtstamps; 1228e3fdf6baSIoana Radulescu __le64 *ts = dpaa2_get_ts(buffer_start, true); 122934ff6846SIoana Radulescu u64 ns; 123034ff6846SIoana Radulescu 123134ff6846SIoana Radulescu memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 123234ff6846SIoana Radulescu 123334ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 123434ff6846SIoana Radulescu shhwtstamps.hwtstamp = ns_to_ktime(ns); 123534ff6846SIoana Radulescu skb_tstamp_tx(skb, &shhwtstamps); 1236c5521189SYangbo Lu } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 1237c5521189SYangbo Lu mutex_unlock(&priv->onestep_tstamp_lock); 123834ff6846SIoana Radulescu } 12393dc709e0SIoana Ciornei } 124034ff6846SIoana Radulescu 124134ff6846SIoana Radulescu /* Free SGT buffer allocated on tx */ 1242a4218aefSIoana Ciornei if (fd_format != dpaa2_fd_single) 1243ae3b0817SIoana Ciornei dpaa2_eth_sgt_recycle(priv, buffer_start); 124434ff6846SIoana Radulescu 12453dc709e0SIoana Ciornei /* Move on with skb release. If we are just confirming multiple FDs 12463dc709e0SIoana Ciornei * from the same TSO skb then only the last one will need to free the 12473dc709e0SIoana Ciornei * skb. 12483dc709e0SIoana Ciornei */ 12493dc709e0SIoana Ciornei if (should_free_skb) 12500723a3aeSIoana Ciocoi Radulescu napi_consume_skb(skb, in_napi); 125134ff6846SIoana Radulescu } 125234ff6846SIoana Radulescu 12533dc709e0SIoana Ciornei static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv, 12543dc709e0SIoana Ciornei struct sk_buff *skb, struct dpaa2_fd *fd, 12553dc709e0SIoana Ciornei int *num_fds, u32 *total_fds_len) 12563dc709e0SIoana Ciornei { 12573dc709e0SIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 12583dc709e0SIoana Ciornei int hdr_len, total_len, data_left, fd_len; 12593dc709e0SIoana Ciornei int num_sge, err, i, sgt_buf_size; 12603dc709e0SIoana Ciornei struct dpaa2_fd *fd_start = fd; 12613dc709e0SIoana Ciornei struct dpaa2_sg_entry *sgt; 12623dc709e0SIoana Ciornei struct dpaa2_eth_swa *swa; 12633dc709e0SIoana Ciornei dma_addr_t sgt_addr, addr; 12643dc709e0SIoana Ciornei dma_addr_t tso_hdr_dma; 12653dc709e0SIoana Ciornei unsigned int index = 0; 12663dc709e0SIoana Ciornei struct tso_t tso; 12673dc709e0SIoana Ciornei char *tso_hdr; 12683dc709e0SIoana Ciornei void *sgt_buf; 12693dc709e0SIoana Ciornei 12703dc709e0SIoana Ciornei /* Initialize the TSO handler, and prepare the first payload */ 12713dc709e0SIoana Ciornei hdr_len = tso_start(skb, &tso); 12723dc709e0SIoana Ciornei *total_fds_len = 0; 12733dc709e0SIoana Ciornei 12743dc709e0SIoana Ciornei total_len = skb->len - hdr_len; 12753dc709e0SIoana Ciornei while (total_len > 0) { 12763dc709e0SIoana Ciornei /* Prepare the HW SGT structure for this frame */ 12773dc709e0SIoana Ciornei sgt_buf = dpaa2_eth_sgt_get(priv); 12783dc709e0SIoana Ciornei if (unlikely(!sgt_buf)) { 12793dc709e0SIoana Ciornei netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n"); 12803dc709e0SIoana Ciornei err = -ENOMEM; 12813dc709e0SIoana Ciornei goto err_sgt_get; 12823dc709e0SIoana Ciornei } 12833dc709e0SIoana Ciornei sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 12843dc709e0SIoana Ciornei 12853dc709e0SIoana Ciornei /* Determine the data length of this frame */ 12863dc709e0SIoana Ciornei data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 12873dc709e0SIoana Ciornei total_len -= data_left; 12883dc709e0SIoana Ciornei fd_len = data_left + hdr_len; 12893dc709e0SIoana Ciornei 12903dc709e0SIoana Ciornei /* Prepare packet headers: MAC + IP + TCP */ 12913dc709e0SIoana Ciornei tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC); 12923dc709e0SIoana Ciornei if (!tso_hdr) { 12933dc709e0SIoana Ciornei err = -ENOMEM; 12943dc709e0SIoana Ciornei goto err_alloc_tso_hdr; 12953dc709e0SIoana Ciornei } 12963dc709e0SIoana Ciornei 12973dc709e0SIoana Ciornei tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0); 12983dc709e0SIoana Ciornei tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE); 12993dc709e0SIoana Ciornei if (dma_mapping_error(dev, tso_hdr_dma)) { 13003dc709e0SIoana Ciornei netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n"); 13013dc709e0SIoana Ciornei err = -ENOMEM; 13023dc709e0SIoana Ciornei goto err_map_tso_hdr; 13033dc709e0SIoana Ciornei } 13043dc709e0SIoana Ciornei 13053dc709e0SIoana Ciornei /* Setup the SG entry for the header */ 13063dc709e0SIoana Ciornei dpaa2_sg_set_addr(sgt, tso_hdr_dma); 13073dc709e0SIoana Ciornei dpaa2_sg_set_len(sgt, hdr_len); 130899cd6a64SYang Li dpaa2_sg_set_final(sgt, data_left <= 0); 13093dc709e0SIoana Ciornei 13103dc709e0SIoana Ciornei /* Compose the SG entries for each fragment of data */ 13113dc709e0SIoana Ciornei num_sge = 1; 13123dc709e0SIoana Ciornei while (data_left > 0) { 13133dc709e0SIoana Ciornei int size; 13143dc709e0SIoana Ciornei 13153dc709e0SIoana Ciornei /* Move to the next SG entry */ 13163dc709e0SIoana Ciornei sgt++; 13173dc709e0SIoana Ciornei size = min_t(int, tso.size, data_left); 13183dc709e0SIoana Ciornei 13193dc709e0SIoana Ciornei addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE); 13203dc709e0SIoana Ciornei if (dma_mapping_error(dev, addr)) { 13213dc709e0SIoana Ciornei netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n"); 13223dc709e0SIoana Ciornei err = -ENOMEM; 13233dc709e0SIoana Ciornei goto err_map_data; 13243dc709e0SIoana Ciornei } 13253dc709e0SIoana Ciornei dpaa2_sg_set_addr(sgt, addr); 13263dc709e0SIoana Ciornei dpaa2_sg_set_len(sgt, size); 132799cd6a64SYang Li dpaa2_sg_set_final(sgt, size == data_left); 13283dc709e0SIoana Ciornei 13293dc709e0SIoana Ciornei num_sge++; 13303dc709e0SIoana Ciornei 13313dc709e0SIoana Ciornei /* Build the data for the __next__ fragment */ 13323dc709e0SIoana Ciornei data_left -= size; 13333dc709e0SIoana Ciornei tso_build_data(skb, &tso, size); 13343dc709e0SIoana Ciornei } 13353dc709e0SIoana Ciornei 13363dc709e0SIoana Ciornei /* Store the skb backpointer in the SGT buffer */ 13373dc709e0SIoana Ciornei sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry); 13383dc709e0SIoana Ciornei swa = (struct dpaa2_eth_swa *)sgt_buf; 13393dc709e0SIoana Ciornei swa->type = DPAA2_ETH_SWA_SW_TSO; 13403dc709e0SIoana Ciornei swa->tso.skb = skb; 13413dc709e0SIoana Ciornei swa->tso.num_sg = num_sge; 13423dc709e0SIoana Ciornei swa->tso.sgt_size = sgt_buf_size; 13433dc709e0SIoana Ciornei swa->tso.is_last_fd = total_len == 0 ? 1 : 0; 13443dc709e0SIoana Ciornei 13453dc709e0SIoana Ciornei /* Separately map the SGT buffer */ 13463dc709e0SIoana Ciornei sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 13473dc709e0SIoana Ciornei if (unlikely(dma_mapping_error(dev, sgt_addr))) { 13483dc709e0SIoana Ciornei netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n"); 13493dc709e0SIoana Ciornei err = -ENOMEM; 13503dc709e0SIoana Ciornei goto err_map_sgt; 13513dc709e0SIoana Ciornei } 13523dc709e0SIoana Ciornei 13533dc709e0SIoana Ciornei /* Setup the frame descriptor */ 13543dc709e0SIoana Ciornei memset(fd, 0, sizeof(struct dpaa2_fd)); 13553dc709e0SIoana Ciornei dpaa2_fd_set_offset(fd, priv->tx_data_offset); 13563dc709e0SIoana Ciornei dpaa2_fd_set_format(fd, dpaa2_fd_sg); 13573dc709e0SIoana Ciornei dpaa2_fd_set_addr(fd, sgt_addr); 13583dc709e0SIoana Ciornei dpaa2_fd_set_len(fd, fd_len); 13593dc709e0SIoana Ciornei dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 13603dc709e0SIoana Ciornei 13613dc709e0SIoana Ciornei *total_fds_len += fd_len; 13623dc709e0SIoana Ciornei /* Advance to the next frame descriptor */ 13633dc709e0SIoana Ciornei fd++; 13643dc709e0SIoana Ciornei index++; 13653dc709e0SIoana Ciornei } 13663dc709e0SIoana Ciornei 13673dc709e0SIoana Ciornei *num_fds = index; 13683dc709e0SIoana Ciornei 13693dc709e0SIoana Ciornei return 0; 13703dc709e0SIoana Ciornei 13713dc709e0SIoana Ciornei err_map_sgt: 13723dc709e0SIoana Ciornei err_map_data: 13733dc709e0SIoana Ciornei /* Unmap all the data S/G entries for the current FD */ 13743dc709e0SIoana Ciornei sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 13753dc709e0SIoana Ciornei for (i = 1; i < num_sge; i++) 13763dc709e0SIoana Ciornei dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]), 13773dc709e0SIoana Ciornei dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); 13783dc709e0SIoana Ciornei 13793dc709e0SIoana Ciornei /* Unmap the header entry */ 13803dc709e0SIoana Ciornei dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE); 13813dc709e0SIoana Ciornei err_map_tso_hdr: 13823dc709e0SIoana Ciornei kfree(tso_hdr); 13833dc709e0SIoana Ciornei err_alloc_tso_hdr: 13843dc709e0SIoana Ciornei dpaa2_eth_sgt_recycle(priv, sgt_buf); 13853dc709e0SIoana Ciornei err_sgt_get: 13863dc709e0SIoana Ciornei /* Free all the other FDs that were already fully created */ 13873dc709e0SIoana Ciornei for (i = 0; i < index; i++) 13884a7f6c5aSRobert-Ionut Alexa dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false); 13893dc709e0SIoana Ciornei 13903dc709e0SIoana Ciornei return err; 13913dc709e0SIoana Ciornei } 13923dc709e0SIoana Ciornei 1393c5521189SYangbo Lu static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, 1394c5521189SYangbo Lu struct net_device *net_dev) 139534ff6846SIoana Radulescu { 139634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1397a4ca448eSIoana Ciornei int total_enqueued = 0, retries = 0, enqueued; 139834ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 1399035dd64dSIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 1400035dd64dSIoana Ciornei unsigned int needed_headroom; 1401a4ca448eSIoana Ciornei int num_fds = 1, max_retries; 140234ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 1403569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 1404a4ca448eSIoana Ciornei struct dpaa2_fd *fd; 140534ff6846SIoana Radulescu u16 queue_mapping; 14063dc709e0SIoana Ciornei void *swa = NULL; 1407ab1e6de2SIoana Radulescu u8 prio = 0; 140834ff6846SIoana Radulescu int err, i; 1409035dd64dSIoana Ciornei u32 fd_len; 141034ff6846SIoana Radulescu 141134ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 141234ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 1413a4ca448eSIoana Ciornei fd = (this_cpu_ptr(priv->fd))->array; 141434ff6846SIoana Radulescu 14151cf773bdSYangbo Lu needed_headroom = dpaa2_eth_needed_headroom(skb); 141634ff6846SIoana Radulescu 141734ff6846SIoana Radulescu /* We'll be holding a back-reference to the skb until Tx Confirmation; 141834ff6846SIoana Radulescu * we don't want that overwritten by a concurrent Tx with a cloned skb. 141934ff6846SIoana Radulescu */ 142034ff6846SIoana Radulescu skb = skb_unshare(skb, GFP_ATOMIC); 142134ff6846SIoana Radulescu if (unlikely(!skb)) { 142234ff6846SIoana Radulescu /* skb_unshare() has already freed the skb */ 142334ff6846SIoana Radulescu percpu_stats->tx_dropped++; 142434ff6846SIoana Radulescu return NETDEV_TX_OK; 142534ff6846SIoana Radulescu } 142634ff6846SIoana Radulescu 142734ff6846SIoana Radulescu /* Setup the FD fields */ 142834ff6846SIoana Radulescu 14293dc709e0SIoana Ciornei if (skb_is_gso(skb)) { 14303dc709e0SIoana Ciornei err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len); 14313dc709e0SIoana Ciornei percpu_extras->tx_sg_frames += num_fds; 14323dc709e0SIoana Ciornei percpu_extras->tx_sg_bytes += fd_len; 14333dc709e0SIoana Ciornei percpu_extras->tx_tso_frames += num_fds; 14343dc709e0SIoana Ciornei percpu_extras->tx_tso_bytes += fd_len; 14353dc709e0SIoana Ciornei } else if (skb_is_nonlinear(skb)) { 1436a4ca448eSIoana Ciornei err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa); 143734ff6846SIoana Radulescu percpu_extras->tx_sg_frames++; 143834ff6846SIoana Radulescu percpu_extras->tx_sg_bytes += skb->len; 1439a4ca448eSIoana Ciornei fd_len = dpaa2_fd_get_len(fd); 1440d70446eeSIoana Ciornei } else if (skb_headroom(skb) < needed_headroom) { 1441a4ca448eSIoana Ciornei err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa); 1442d70446eeSIoana Ciornei percpu_extras->tx_sg_frames++; 1443d70446eeSIoana Ciornei percpu_extras->tx_sg_bytes += skb->len; 14444c96c0acSIoana Ciornei percpu_extras->tx_converted_sg_frames++; 14454c96c0acSIoana Ciornei percpu_extras->tx_converted_sg_bytes += skb->len; 1446a4ca448eSIoana Ciornei fd_len = dpaa2_fd_get_len(fd); 144734ff6846SIoana Radulescu } else { 1448a4ca448eSIoana Ciornei err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa); 1449a4ca448eSIoana Ciornei fd_len = dpaa2_fd_get_len(fd); 145034ff6846SIoana Radulescu } 145134ff6846SIoana Radulescu 145234ff6846SIoana Radulescu if (unlikely(err)) { 145334ff6846SIoana Radulescu percpu_stats->tx_dropped++; 145434ff6846SIoana Radulescu goto err_build_fd; 145534ff6846SIoana Radulescu } 145634ff6846SIoana Radulescu 14573dc709e0SIoana Ciornei if (swa && skb->cb[0]) 1458a4ca448eSIoana Ciornei dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb); 145964a965deSYangbo Lu 146034ff6846SIoana Radulescu /* Tracing point */ 1461a4ca448eSIoana Ciornei for (i = 0; i < num_fds; i++) 1462a4ca448eSIoana Ciornei trace_dpaa2_tx_fd(net_dev, &fd[i]); 146334ff6846SIoana Radulescu 146434ff6846SIoana Radulescu /* TxConf FQ selection relies on queue id from the stack. 146534ff6846SIoana Radulescu * In case of a forwarded frame from another DPNI interface, we choose 146634ff6846SIoana Radulescu * a queue affined to the same core that processed the Rx frame 146734ff6846SIoana Radulescu */ 146834ff6846SIoana Radulescu queue_mapping = skb_get_queue_mapping(skb); 1469ab1e6de2SIoana Radulescu 1470ab1e6de2SIoana Radulescu if (net_dev->num_tc) { 1471ab1e6de2SIoana Radulescu prio = netdev_txq_to_tc(net_dev, queue_mapping); 1472ab1e6de2SIoana Radulescu /* Hardware interprets priority level 0 as being the highest, 1473ab1e6de2SIoana Radulescu * so we need to do a reverse mapping to the netdev tc index 1474ab1e6de2SIoana Radulescu */ 1475ab1e6de2SIoana Radulescu prio = net_dev->num_tc - prio - 1; 1476ab1e6de2SIoana Radulescu /* We have only one FQ array entry for all Tx hardware queues 1477ab1e6de2SIoana Radulescu * with the same flow id (but different priority levels) 1478ab1e6de2SIoana Radulescu */ 1479ab1e6de2SIoana Radulescu queue_mapping %= dpaa2_eth_queue_count(priv); 1480ab1e6de2SIoana Radulescu } 148134ff6846SIoana Radulescu fq = &priv->fq[queue_mapping]; 14828c838f53SIoana Ciornei nq = netdev_get_tx_queue(net_dev, queue_mapping); 14838c838f53SIoana Ciornei netdev_tx_sent_queue(nq, fd_len); 14848c838f53SIoana Ciornei 14858c838f53SIoana Ciornei /* Everything that happens after this enqueues might race with 14868c838f53SIoana Ciornei * the Tx confirmation callback for this frame 14878c838f53SIoana Ciornei */ 1488a4ca448eSIoana Ciornei max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; 1489a4ca448eSIoana Ciornei while (total_enqueued < num_fds && retries < max_retries) { 1490a4ca448eSIoana Ciornei err = priv->enqueue(priv, fq, &fd[total_enqueued], 1491a4ca448eSIoana Ciornei prio, num_fds - total_enqueued, &enqueued); 1492a4ca448eSIoana Ciornei if (err == -EBUSY) { 1493a4ca448eSIoana Ciornei retries++; 1494a4ca448eSIoana Ciornei continue; 149534ff6846SIoana Radulescu } 1496a4ca448eSIoana Ciornei 1497a4ca448eSIoana Ciornei total_enqueued += enqueued; 1498a4ca448eSIoana Ciornei } 1499a4ca448eSIoana Ciornei percpu_extras->tx_portal_busy += retries; 1500a4ca448eSIoana Ciornei 150134ff6846SIoana Radulescu if (unlikely(err < 0)) { 150234ff6846SIoana Radulescu percpu_stats->tx_errors++; 150334ff6846SIoana Radulescu /* Clean up everything, including freeing the skb */ 15044a7f6c5aSRobert-Ionut Alexa dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false); 15058c838f53SIoana Ciornei netdev_tx_completed_queue(nq, 1, fd_len); 150634ff6846SIoana Radulescu } else { 1507a4ca448eSIoana Ciornei percpu_stats->tx_packets += total_enqueued; 1508569dac6aSIoana Ciocoi Radulescu percpu_stats->tx_bytes += fd_len; 150934ff6846SIoana Radulescu } 151034ff6846SIoana Radulescu 151134ff6846SIoana Radulescu return NETDEV_TX_OK; 151234ff6846SIoana Radulescu 151334ff6846SIoana Radulescu err_build_fd: 151434ff6846SIoana Radulescu dev_kfree_skb(skb); 151534ff6846SIoana Radulescu 151634ff6846SIoana Radulescu return NETDEV_TX_OK; 151734ff6846SIoana Radulescu } 151834ff6846SIoana Radulescu 1519c5521189SYangbo Lu static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work) 1520c5521189SYangbo Lu { 1521c5521189SYangbo Lu struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv, 1522c5521189SYangbo Lu tx_onestep_tstamp); 1523c5521189SYangbo Lu struct sk_buff *skb; 1524c5521189SYangbo Lu 1525c5521189SYangbo Lu while (true) { 1526c5521189SYangbo Lu skb = skb_dequeue(&priv->tx_skbs); 1527c5521189SYangbo Lu if (!skb) 1528c5521189SYangbo Lu return; 1529c5521189SYangbo Lu 1530c5521189SYangbo Lu /* Lock just before TX one-step timestamping packet, 1531c5521189SYangbo Lu * and release the lock in dpaa2_eth_free_tx_fd when 1532c5521189SYangbo Lu * confirm the packet has been sent on hardware, or 1533c5521189SYangbo Lu * when clean up during transmit failure. 1534c5521189SYangbo Lu */ 1535c5521189SYangbo Lu mutex_lock(&priv->onestep_tstamp_lock); 1536c5521189SYangbo Lu __dpaa2_eth_tx(skb, priv->net_dev); 1537c5521189SYangbo Lu } 1538c5521189SYangbo Lu } 1539c5521189SYangbo Lu 1540c5521189SYangbo Lu static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 1541c5521189SYangbo Lu { 1542c5521189SYangbo Lu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1543c5521189SYangbo Lu u8 msgtype, twostep, udp; 1544c5521189SYangbo Lu u16 offset1, offset2; 1545c5521189SYangbo Lu 1546c5521189SYangbo Lu /* Utilize skb->cb[0] for timestamping request per skb */ 1547c5521189SYangbo Lu skb->cb[0] = 0; 1548c5521189SYangbo Lu 1549c5521189SYangbo Lu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) { 1550c5521189SYangbo Lu if (priv->tx_tstamp_type == HWTSTAMP_TX_ON) 1551c5521189SYangbo Lu skb->cb[0] = TX_TSTAMP; 1552c5521189SYangbo Lu else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) 1553c5521189SYangbo Lu skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC; 1554c5521189SYangbo Lu } 1555c5521189SYangbo Lu 1556c5521189SYangbo Lu /* TX for one-step timestamping PTP Sync packet */ 1557c5521189SYangbo Lu if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 1558c5521189SYangbo Lu if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, 1559c5521189SYangbo Lu &offset1, &offset2)) 15606b6817c5SChristian Eggers if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) { 1561c5521189SYangbo Lu skb_queue_tail(&priv->tx_skbs, skb); 1562c5521189SYangbo Lu queue_work(priv->dpaa2_ptp_wq, 1563c5521189SYangbo Lu &priv->tx_onestep_tstamp); 1564c5521189SYangbo Lu return NETDEV_TX_OK; 1565c5521189SYangbo Lu } 1566c5521189SYangbo Lu /* Use two-step timestamping if not one-step timestamping 1567c5521189SYangbo Lu * PTP Sync packet 1568c5521189SYangbo Lu */ 1569c5521189SYangbo Lu skb->cb[0] = TX_TSTAMP; 1570c5521189SYangbo Lu } 1571c5521189SYangbo Lu 1572c5521189SYangbo Lu /* TX for other packets */ 1573c5521189SYangbo Lu return __dpaa2_eth_tx(skb, net_dev); 1574c5521189SYangbo Lu } 1575c5521189SYangbo Lu 157634ff6846SIoana Radulescu /* Tx confirmation frame processing routine */ 157734ff6846SIoana Radulescu static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 1578fc398becSIoana Ciornei struct dpaa2_eth_channel *ch, 157934ff6846SIoana Radulescu const struct dpaa2_fd *fd, 1580569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 158134ff6846SIoana Radulescu { 158234ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 158334ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 1584569dac6aSIoana Ciocoi Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 158534ff6846SIoana Radulescu u32 fd_errors; 158634ff6846SIoana Radulescu 158734ff6846SIoana Radulescu /* Tracing point */ 158834ff6846SIoana Radulescu trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 158934ff6846SIoana Radulescu 159034ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 159134ff6846SIoana Radulescu percpu_extras->tx_conf_frames++; 1592569dac6aSIoana Ciocoi Radulescu percpu_extras->tx_conf_bytes += fd_len; 1593fc398becSIoana Ciornei ch->stats.bytes_per_cdan += fd_len; 1594569dac6aSIoana Ciocoi Radulescu 159534ff6846SIoana Radulescu /* Check frame errors in the FD field */ 159634ff6846SIoana Radulescu fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 15974a7f6c5aSRobert-Ionut Alexa dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true); 159834ff6846SIoana Radulescu 159934ff6846SIoana Radulescu if (likely(!fd_errors)) 160034ff6846SIoana Radulescu return; 160134ff6846SIoana Radulescu 160234ff6846SIoana Radulescu if (net_ratelimit()) 160334ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 160434ff6846SIoana Radulescu fd_errors); 160534ff6846SIoana Radulescu 160634ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 160734ff6846SIoana Radulescu /* Tx-conf logically pertains to the egress path. */ 160834ff6846SIoana Radulescu percpu_stats->tx_errors++; 160934ff6846SIoana Radulescu } 161034ff6846SIoana Radulescu 161170b32d82SIonut-robert Aron static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv, 161270b32d82SIonut-robert Aron bool enable) 161370b32d82SIonut-robert Aron { 161470b32d82SIonut-robert Aron int err; 161570b32d82SIonut-robert Aron 161670b32d82SIonut-robert Aron err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable); 161770b32d82SIonut-robert Aron 161870b32d82SIonut-robert Aron if (err) { 161970b32d82SIonut-robert Aron netdev_err(priv->net_dev, 162070b32d82SIonut-robert Aron "dpni_enable_vlan_filter failed\n"); 162170b32d82SIonut-robert Aron return err; 162270b32d82SIonut-robert Aron } 162370b32d82SIonut-robert Aron 162470b32d82SIonut-robert Aron return 0; 162570b32d82SIonut-robert Aron } 162670b32d82SIonut-robert Aron 16275d8dccf8SIoana Ciornei static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 162834ff6846SIoana Radulescu { 162934ff6846SIoana Radulescu int err; 163034ff6846SIoana Radulescu 163134ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 163234ff6846SIoana Radulescu DPNI_OFF_RX_L3_CSUM, enable); 163334ff6846SIoana Radulescu if (err) { 163434ff6846SIoana Radulescu netdev_err(priv->net_dev, 163534ff6846SIoana Radulescu "dpni_set_offload(RX_L3_CSUM) failed\n"); 163634ff6846SIoana Radulescu return err; 163734ff6846SIoana Radulescu } 163834ff6846SIoana Radulescu 163934ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 164034ff6846SIoana Radulescu DPNI_OFF_RX_L4_CSUM, enable); 164134ff6846SIoana Radulescu if (err) { 164234ff6846SIoana Radulescu netdev_err(priv->net_dev, 164334ff6846SIoana Radulescu "dpni_set_offload(RX_L4_CSUM) failed\n"); 164434ff6846SIoana Radulescu return err; 164534ff6846SIoana Radulescu } 164634ff6846SIoana Radulescu 164734ff6846SIoana Radulescu return 0; 164834ff6846SIoana Radulescu } 164934ff6846SIoana Radulescu 16505d8dccf8SIoana Ciornei static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 165134ff6846SIoana Radulescu { 165234ff6846SIoana Radulescu int err; 165334ff6846SIoana Radulescu 165434ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 165534ff6846SIoana Radulescu DPNI_OFF_TX_L3_CSUM, enable); 165634ff6846SIoana Radulescu if (err) { 165734ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 165834ff6846SIoana Radulescu return err; 165934ff6846SIoana Radulescu } 166034ff6846SIoana Radulescu 166134ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 166234ff6846SIoana Radulescu DPNI_OFF_TX_L4_CSUM, enable); 166334ff6846SIoana Radulescu if (err) { 166434ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 166534ff6846SIoana Radulescu return err; 166634ff6846SIoana Radulescu } 166734ff6846SIoana Radulescu 166834ff6846SIoana Radulescu return 0; 166934ff6846SIoana Radulescu } 167034ff6846SIoana Radulescu 167134ff6846SIoana Radulescu /* Perform a single release command to add buffers 167234ff6846SIoana Radulescu * to the specified buffer pool 167334ff6846SIoana Radulescu */ 16745d8dccf8SIoana Ciornei static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, 1675095174daSRobert-Ionut Alexa struct dpaa2_eth_channel *ch) 167634ff6846SIoana Radulescu { 167748276c08SRobert-Ionut Alexa struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; 167834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 167934ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 168048276c08SRobert-Ionut Alexa struct dpaa2_eth_swa *swa; 168127c87486SIoana Ciocoi Radulescu struct page *page; 168234ff6846SIoana Radulescu dma_addr_t addr; 1683ef17bd7cSIoana Radulescu int retries = 0; 168448276c08SRobert-Ionut Alexa int i = 0, err; 168548276c08SRobert-Ionut Alexa u32 batch; 168634ff6846SIoana Radulescu 168748276c08SRobert-Ionut Alexa /* Allocate buffers visible to WRIOP */ 168848276c08SRobert-Ionut Alexa if (!ch->xsk_zc) { 168934ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 169048276c08SRobert-Ionut Alexa /* Also allocate skb shared info and alignment padding. 169148276c08SRobert-Ionut Alexa * There is one page for each Rx buffer. WRIOP sees 169227c87486SIoana Ciocoi Radulescu * the entire page except for a tailroom reserved for 169327c87486SIoana Ciocoi Radulescu * skb shared info 169427c87486SIoana Ciocoi Radulescu */ 169527c87486SIoana Ciocoi Radulescu page = dev_alloc_pages(0); 169627c87486SIoana Ciocoi Radulescu if (!page) 169734ff6846SIoana Radulescu goto err_alloc; 169834ff6846SIoana Radulescu 1699efa6a7d0SIoana Ciornei addr = dma_map_page(dev, page, 0, priv->rx_buf_size, 170018c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 170134ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 170234ff6846SIoana Radulescu goto err_map; 170334ff6846SIoana Radulescu 170434ff6846SIoana Radulescu buf_array[i] = addr; 170534ff6846SIoana Radulescu 170634ff6846SIoana Radulescu /* tracing point */ 170748276c08SRobert-Ionut Alexa trace_dpaa2_eth_buf_seed(priv->net_dev, 170848276c08SRobert-Ionut Alexa page_address(page), 1709e34f4934SChen Lin DPAA2_ETH_RX_BUF_RAW_SIZE, 1710efa6a7d0SIoana Ciornei addr, priv->rx_buf_size, 1711095174daSRobert-Ionut Alexa ch->bp->bpid); 171234ff6846SIoana Radulescu } 171348276c08SRobert-Ionut Alexa } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { 171448276c08SRobert-Ionut Alexa /* Allocate XSK buffers for AF_XDP fast path in batches 171548276c08SRobert-Ionut Alexa * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot 171648276c08SRobert-Ionut Alexa * provide enough buffers at the moment 171748276c08SRobert-Ionut Alexa */ 171848276c08SRobert-Ionut Alexa batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs, 171948276c08SRobert-Ionut Alexa DPAA2_ETH_BUFS_PER_CMD); 172048276c08SRobert-Ionut Alexa if (!batch) 172148276c08SRobert-Ionut Alexa goto err_alloc; 172248276c08SRobert-Ionut Alexa 172348276c08SRobert-Ionut Alexa for (i = 0; i < batch; i++) { 172448276c08SRobert-Ionut Alexa swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + 172548276c08SRobert-Ionut Alexa DPAA2_ETH_RX_HWA_SIZE); 172648276c08SRobert-Ionut Alexa swa->xsk.xdp_buff = xdp_buffs[i]; 172748276c08SRobert-Ionut Alexa 172848276c08SRobert-Ionut Alexa addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]); 172948276c08SRobert-Ionut Alexa if (unlikely(dma_mapping_error(dev, addr))) 173048276c08SRobert-Ionut Alexa goto err_map; 173148276c08SRobert-Ionut Alexa 173248276c08SRobert-Ionut Alexa buf_array[i] = addr; 17333817b2acSRobert-Ionut Alexa 17343817b2acSRobert-Ionut Alexa trace_dpaa2_xsk_buf_seed(priv->net_dev, 17353817b2acSRobert-Ionut Alexa xdp_buffs[i]->data_hard_start, 17363817b2acSRobert-Ionut Alexa DPAA2_ETH_RX_BUF_RAW_SIZE, 17373817b2acSRobert-Ionut Alexa addr, priv->rx_buf_size, 17383817b2acSRobert-Ionut Alexa ch->bp->bpid); 173948276c08SRobert-Ionut Alexa } 174048276c08SRobert-Ionut Alexa } 174134ff6846SIoana Radulescu 174234ff6846SIoana Radulescu release_bufs: 174334ff6846SIoana Radulescu /* In case the portal is busy, retry until successful */ 1744095174daSRobert-Ionut Alexa while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, 1745ef17bd7cSIoana Radulescu buf_array, i)) == -EBUSY) { 1746ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 1747ef17bd7cSIoana Radulescu break; 174834ff6846SIoana Radulescu cpu_relax(); 1749ef17bd7cSIoana Radulescu } 175034ff6846SIoana Radulescu 175134ff6846SIoana Radulescu /* If release command failed, clean up and bail out; 175234ff6846SIoana Radulescu * not much else we can do about it 175334ff6846SIoana Radulescu */ 175434ff6846SIoana Radulescu if (err) { 175548276c08SRobert-Ionut Alexa dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); 175634ff6846SIoana Radulescu return 0; 175734ff6846SIoana Radulescu } 175834ff6846SIoana Radulescu 175934ff6846SIoana Radulescu return i; 176034ff6846SIoana Radulescu 176134ff6846SIoana Radulescu err_map: 176248276c08SRobert-Ionut Alexa if (!ch->xsk_zc) { 176327c87486SIoana Ciocoi Radulescu __free_pages(page, 0); 176448276c08SRobert-Ionut Alexa } else { 176548276c08SRobert-Ionut Alexa for (; i < batch; i++) 176648276c08SRobert-Ionut Alexa xsk_buff_free(xdp_buffs[i]); 176748276c08SRobert-Ionut Alexa } 176834ff6846SIoana Radulescu err_alloc: 176934ff6846SIoana Radulescu /* If we managed to allocate at least some buffers, 177034ff6846SIoana Radulescu * release them to hardware 177134ff6846SIoana Radulescu */ 177234ff6846SIoana Radulescu if (i) 177334ff6846SIoana Radulescu goto release_bufs; 177434ff6846SIoana Radulescu 177534ff6846SIoana Radulescu return 0; 177634ff6846SIoana Radulescu } 177734ff6846SIoana Radulescu 1778095174daSRobert-Ionut Alexa static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, 1779095174daSRobert-Ionut Alexa struct dpaa2_eth_channel *ch) 178034ff6846SIoana Radulescu { 1781095174daSRobert-Ionut Alexa int i; 178234ff6846SIoana Radulescu int new_count; 178334ff6846SIoana Radulescu 1784095174daSRobert-Ionut Alexa for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { 1785095174daSRobert-Ionut Alexa new_count = dpaa2_eth_add_bufs(priv, ch); 1786095174daSRobert-Ionut Alexa ch->buf_count += new_count; 178734ff6846SIoana Radulescu 1788095174daSRobert-Ionut Alexa if (new_count < DPAA2_ETH_BUFS_PER_CMD) 178934ff6846SIoana Radulescu return -ENOMEM; 179034ff6846SIoana Radulescu } 179134ff6846SIoana Radulescu 179234ff6846SIoana Radulescu return 0; 179334ff6846SIoana Radulescu } 179434ff6846SIoana Radulescu 1795095174daSRobert-Ionut Alexa static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv) 1796095174daSRobert-Ionut Alexa { 1797095174daSRobert-Ionut Alexa struct net_device *net_dev = priv->net_dev; 1798095174daSRobert-Ionut Alexa struct dpaa2_eth_channel *channel; 1799095174daSRobert-Ionut Alexa int i, err = 0; 1800095174daSRobert-Ionut Alexa 1801095174daSRobert-Ionut Alexa for (i = 0; i < priv->num_channels; i++) { 1802095174daSRobert-Ionut Alexa channel = priv->channel[i]; 1803095174daSRobert-Ionut Alexa 1804095174daSRobert-Ionut Alexa err = dpaa2_eth_seed_pool(priv, channel); 1805095174daSRobert-Ionut Alexa 1806095174daSRobert-Ionut Alexa /* Not much to do; the buffer pool, though not filled up, 1807095174daSRobert-Ionut Alexa * may still contain some buffers which would enable us 1808095174daSRobert-Ionut Alexa * to limp on. 1809095174daSRobert-Ionut Alexa */ 1810095174daSRobert-Ionut Alexa if (err) 1811095174daSRobert-Ionut Alexa netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 1812095174daSRobert-Ionut Alexa channel->bp->dev->obj_desc.id, 1813095174daSRobert-Ionut Alexa channel->bp->bpid); 1814095174daSRobert-Ionut Alexa } 1815095174daSRobert-Ionut Alexa } 1816095174daSRobert-Ionut Alexa 1817d0ea5cbdSJesse Brandeburg /* 1818095174daSRobert-Ionut Alexa * Drain the specified number of buffers from one of the DPNI's private buffer 1819095174daSRobert-Ionut Alexa * pools. 182034ff6846SIoana Radulescu * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 182134ff6846SIoana Radulescu */ 1822095174daSRobert-Ionut Alexa static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, 1823095174daSRobert-Ionut Alexa int count) 182434ff6846SIoana Radulescu { 182534ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 182648276c08SRobert-Ionut Alexa bool xsk_zc = false; 1827ef17bd7cSIoana Radulescu int retries = 0; 182848276c08SRobert-Ionut Alexa int i, ret; 182948276c08SRobert-Ionut Alexa 183048276c08SRobert-Ionut Alexa for (i = 0; i < priv->num_channels; i++) 183148276c08SRobert-Ionut Alexa if (priv->channel[i]->bp->bpid == bpid) 183248276c08SRobert-Ionut Alexa xsk_zc = priv->channel[i]->xsk_zc; 183334ff6846SIoana Radulescu 183434ff6846SIoana Radulescu do { 1835095174daSRobert-Ionut Alexa ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); 183634ff6846SIoana Radulescu if (ret < 0) { 1837ef17bd7cSIoana Radulescu if (ret == -EBUSY && 18380e5ad75bSIoana Ciornei retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) 1839ef17bd7cSIoana Radulescu continue; 184034ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 184134ff6846SIoana Radulescu return; 184234ff6846SIoana Radulescu } 184348276c08SRobert-Ionut Alexa dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc); 1844ef17bd7cSIoana Radulescu retries = 0; 184534ff6846SIoana Radulescu } while (ret); 184634ff6846SIoana Radulescu } 184734ff6846SIoana Radulescu 1848095174daSRobert-Ionut Alexa static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid) 184934ff6846SIoana Radulescu { 185034ff6846SIoana Radulescu int i; 185134ff6846SIoana Radulescu 1852095174daSRobert-Ionut Alexa /* Drain the buffer pool */ 1853095174daSRobert-Ionut Alexa dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD); 1854095174daSRobert-Ionut Alexa dpaa2_eth_drain_bufs(priv, bpid, 1); 185534ff6846SIoana Radulescu 1856095174daSRobert-Ionut Alexa /* Setup to zero the buffer count of all channels which were 1857095174daSRobert-Ionut Alexa * using this buffer pool. 1858095174daSRobert-Ionut Alexa */ 185934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 1860095174daSRobert-Ionut Alexa if (priv->channel[i]->bp->bpid == bpid) 186134ff6846SIoana Radulescu priv->channel[i]->buf_count = 0; 186234ff6846SIoana Radulescu } 186334ff6846SIoana Radulescu 1864095174daSRobert-Ionut Alexa static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv) 1865095174daSRobert-Ionut Alexa { 1866095174daSRobert-Ionut Alexa int i; 1867095174daSRobert-Ionut Alexa 1868095174daSRobert-Ionut Alexa for (i = 0; i < priv->num_bps; i++) 1869095174daSRobert-Ionut Alexa dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid); 1870095174daSRobert-Ionut Alexa } 1871095174daSRobert-Ionut Alexa 187234ff6846SIoana Radulescu /* Function is called from softirq context only, so we don't need to guard 187334ff6846SIoana Radulescu * the access to percpu count 187434ff6846SIoana Radulescu */ 18755d8dccf8SIoana Ciornei static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, 1876095174daSRobert-Ionut Alexa struct dpaa2_eth_channel *ch) 187734ff6846SIoana Radulescu { 187834ff6846SIoana Radulescu int new_count; 187934ff6846SIoana Radulescu 188034ff6846SIoana Radulescu if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 188134ff6846SIoana Radulescu return 0; 188234ff6846SIoana Radulescu 188334ff6846SIoana Radulescu do { 1884095174daSRobert-Ionut Alexa new_count = dpaa2_eth_add_bufs(priv, ch); 188534ff6846SIoana Radulescu if (unlikely(!new_count)) { 188634ff6846SIoana Radulescu /* Out of memory; abort for now, we'll try later on */ 188734ff6846SIoana Radulescu break; 188834ff6846SIoana Radulescu } 188934ff6846SIoana Radulescu ch->buf_count += new_count; 189034ff6846SIoana Radulescu } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 189134ff6846SIoana Radulescu 189234ff6846SIoana Radulescu if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 189334ff6846SIoana Radulescu return -ENOMEM; 189434ff6846SIoana Radulescu 189534ff6846SIoana Radulescu return 0; 189634ff6846SIoana Radulescu } 189734ff6846SIoana Radulescu 1898d70446eeSIoana Ciornei static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) 1899d70446eeSIoana Ciornei { 1900d70446eeSIoana Ciornei struct dpaa2_eth_sgt_cache *sgt_cache; 1901d70446eeSIoana Ciornei u16 count; 1902d70446eeSIoana Ciornei int k, i; 1903d70446eeSIoana Ciornei 19040fe665d4SIoana Ciornei for_each_possible_cpu(k) { 1905d70446eeSIoana Ciornei sgt_cache = per_cpu_ptr(priv->sgt_cache, k); 1906d70446eeSIoana Ciornei count = sgt_cache->count; 1907d70446eeSIoana Ciornei 1908d70446eeSIoana Ciornei for (i = 0; i < count; i++) 19098378a791SIoana Ciornei skb_free_frag(sgt_cache->buf[i]); 1910d70446eeSIoana Ciornei sgt_cache->count = 0; 1911d70446eeSIoana Ciornei } 1912d70446eeSIoana Ciornei } 1913d70446eeSIoana Ciornei 19145d8dccf8SIoana Ciornei static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) 191534ff6846SIoana Radulescu { 191634ff6846SIoana Radulescu int err; 191734ff6846SIoana Radulescu int dequeues = -1; 191834ff6846SIoana Radulescu 191934ff6846SIoana Radulescu /* Retry while portal is busy */ 192034ff6846SIoana Radulescu do { 192134ff6846SIoana Radulescu err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 192234ff6846SIoana Radulescu ch->store); 192334ff6846SIoana Radulescu dequeues++; 192434ff6846SIoana Radulescu cpu_relax(); 1925ef17bd7cSIoana Radulescu } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); 192634ff6846SIoana Radulescu 192734ff6846SIoana Radulescu ch->stats.dequeue_portal_busy += dequeues; 192834ff6846SIoana Radulescu if (unlikely(err)) 192934ff6846SIoana Radulescu ch->stats.pull_err++; 193034ff6846SIoana Radulescu 193134ff6846SIoana Radulescu return err; 193234ff6846SIoana Radulescu } 193334ff6846SIoana Radulescu 193434ff6846SIoana Radulescu /* NAPI poll routine 193534ff6846SIoana Radulescu * 193634ff6846SIoana Radulescu * Frames are dequeued from the QMan channel associated with this NAPI context. 193734ff6846SIoana Radulescu * Rx, Tx confirmation and (if configured) Rx error frames all count 193834ff6846SIoana Radulescu * towards the NAPI budget. 193934ff6846SIoana Radulescu */ 194034ff6846SIoana Radulescu static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 194134ff6846SIoana Radulescu { 194234ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 194334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 194468049a5fSIoana Ciocoi Radulescu int rx_cleaned = 0, txconf_cleaned = 0; 1945569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, *txc_fq = NULL; 1946569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 1947569dac6aSIoana Ciocoi Radulescu int store_cleaned, work_done; 19484a7f6c5aSRobert-Ionut Alexa bool work_done_zc = false; 19490a25d92cSIoana Ciornei struct list_head rx_list; 1950ef17bd7cSIoana Radulescu int retries = 0; 195174a1c059SIoana Ciornei u16 flowid; 195234ff6846SIoana Radulescu int err; 195334ff6846SIoana Radulescu 195434ff6846SIoana Radulescu ch = container_of(napi, struct dpaa2_eth_channel, napi); 1955d678be1dSIoana Radulescu ch->xdp.res = 0; 195634ff6846SIoana Radulescu priv = ch->priv; 195734ff6846SIoana Radulescu 19580a25d92cSIoana Ciornei INIT_LIST_HEAD(&rx_list); 19590a25d92cSIoana Ciornei ch->rx_list = &rx_list; 19600a25d92cSIoana Ciornei 19614a7f6c5aSRobert-Ionut Alexa if (ch->xsk_zc) { 19624a7f6c5aSRobert-Ionut Alexa work_done_zc = dpaa2_xsk_tx(priv, ch); 19634a7f6c5aSRobert-Ionut Alexa /* If we reached the XSK Tx per NAPI threshold, we're done */ 19644a7f6c5aSRobert-Ionut Alexa if (work_done_zc) { 19654a7f6c5aSRobert-Ionut Alexa work_done = budget; 19664a7f6c5aSRobert-Ionut Alexa goto out; 19674a7f6c5aSRobert-Ionut Alexa } 19684a7f6c5aSRobert-Ionut Alexa } 19694a7f6c5aSRobert-Ionut Alexa 197068049a5fSIoana Ciocoi Radulescu do { 19715d8dccf8SIoana Ciornei err = dpaa2_eth_pull_channel(ch); 197234ff6846SIoana Radulescu if (unlikely(err)) 197334ff6846SIoana Radulescu break; 197434ff6846SIoana Radulescu 197534ff6846SIoana Radulescu /* Refill pool if appropriate */ 1976095174daSRobert-Ionut Alexa dpaa2_eth_refill_pool(priv, ch); 197734ff6846SIoana Radulescu 19785d8dccf8SIoana Ciornei store_cleaned = dpaa2_eth_consume_frames(ch, &fq); 1979ef17bd7cSIoana Radulescu if (store_cleaned <= 0) 1980569dac6aSIoana Ciocoi Radulescu break; 1981569dac6aSIoana Ciocoi Radulescu if (fq->type == DPAA2_RX_FQ) { 198268049a5fSIoana Ciocoi Radulescu rx_cleaned += store_cleaned; 198374a1c059SIoana Ciornei flowid = fq->flowid; 1984569dac6aSIoana Ciocoi Radulescu } else { 198568049a5fSIoana Ciocoi Radulescu txconf_cleaned += store_cleaned; 1986569dac6aSIoana Ciocoi Radulescu /* We have a single Tx conf FQ on this channel */ 1987569dac6aSIoana Ciocoi Radulescu txc_fq = fq; 1988569dac6aSIoana Ciocoi Radulescu } 198934ff6846SIoana Radulescu 199068049a5fSIoana Ciocoi Radulescu /* If we either consumed the whole NAPI budget with Rx frames 199168049a5fSIoana Ciocoi Radulescu * or we reached the Tx confirmations threshold, we're done. 199234ff6846SIoana Radulescu */ 199368049a5fSIoana Ciocoi Radulescu if (rx_cleaned >= budget || 1994569dac6aSIoana Ciocoi Radulescu txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { 1995569dac6aSIoana Ciocoi Radulescu work_done = budget; 1996569dac6aSIoana Ciocoi Radulescu goto out; 1997569dac6aSIoana Ciocoi Radulescu } 199868049a5fSIoana Ciocoi Radulescu } while (store_cleaned); 199934ff6846SIoana Radulescu 2000fc398becSIoana Ciornei /* Update NET DIM with the values for this CDAN */ 2001fc398becSIoana Ciornei dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan, 2002fc398becSIoana Ciornei ch->stats.bytes_per_cdan); 2003fc398becSIoana Ciornei ch->stats.frames_per_cdan = 0; 2004fc398becSIoana Ciornei ch->stats.bytes_per_cdan = 0; 2005fc398becSIoana Ciornei 200668049a5fSIoana Ciocoi Radulescu /* We didn't consume the entire budget, so finish napi and 200768049a5fSIoana Ciocoi Radulescu * re-enable data availability notifications 200868049a5fSIoana Ciocoi Radulescu */ 200968049a5fSIoana Ciocoi Radulescu napi_complete_done(napi, rx_cleaned); 201034ff6846SIoana Radulescu do { 201134ff6846SIoana Radulescu err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 201234ff6846SIoana Radulescu cpu_relax(); 2013ef17bd7cSIoana Radulescu } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); 201434ff6846SIoana Radulescu WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 201534ff6846SIoana Radulescu ch->nctx.desired_cpu); 201634ff6846SIoana Radulescu 2017569dac6aSIoana Ciocoi Radulescu work_done = max(rx_cleaned, 1); 2018569dac6aSIoana Ciocoi Radulescu 2019569dac6aSIoana Ciocoi Radulescu out: 20200a25d92cSIoana Ciornei netif_receive_skb_list(ch->rx_list); 20210a25d92cSIoana Ciornei 20224a7f6c5aSRobert-Ionut Alexa if (ch->xsk_tx_pkts_sent) { 20234a7f6c5aSRobert-Ionut Alexa xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent); 20244a7f6c5aSRobert-Ionut Alexa ch->xsk_tx_pkts_sent = 0; 20254a7f6c5aSRobert-Ionut Alexa } 20264a7f6c5aSRobert-Ionut Alexa 2027d678be1dSIoana Radulescu if (txc_fq && txc_fq->dq_frames) { 2028569dac6aSIoana Ciocoi Radulescu nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); 2029569dac6aSIoana Ciocoi Radulescu netdev_tx_completed_queue(nq, txc_fq->dq_frames, 2030569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes); 2031569dac6aSIoana Ciocoi Radulescu txc_fq->dq_frames = 0; 2032569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes = 0; 2033569dac6aSIoana Ciocoi Radulescu } 2034569dac6aSIoana Ciocoi Radulescu 2035d678be1dSIoana Radulescu if (ch->xdp.res & XDP_REDIRECT) 2036d678be1dSIoana Radulescu xdp_do_flush_map(); 203774a1c059SIoana Ciornei else if (rx_cleaned && ch->xdp.res & XDP_TX) 20385d8dccf8SIoana Ciornei dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]); 2039d678be1dSIoana Radulescu 2040569dac6aSIoana Ciocoi Radulescu return work_done; 204134ff6846SIoana Radulescu } 204234ff6846SIoana Radulescu 20435d8dccf8SIoana Ciornei static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv) 204434ff6846SIoana Radulescu { 204534ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 204634ff6846SIoana Radulescu int i; 204734ff6846SIoana Radulescu 204834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 204934ff6846SIoana Radulescu ch = priv->channel[i]; 205034ff6846SIoana Radulescu napi_enable(&ch->napi); 205134ff6846SIoana Radulescu } 205234ff6846SIoana Radulescu } 205334ff6846SIoana Radulescu 20545d8dccf8SIoana Ciornei static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv) 205534ff6846SIoana Radulescu { 205634ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 205734ff6846SIoana Radulescu int i; 205834ff6846SIoana Radulescu 205934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 206034ff6846SIoana Radulescu ch = priv->channel[i]; 206134ff6846SIoana Radulescu napi_disable(&ch->napi); 206234ff6846SIoana Radulescu } 206334ff6846SIoana Radulescu } 206434ff6846SIoana Radulescu 206507beb165SIoana Ciornei void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, 206607beb165SIoana Ciornei bool tx_pause, bool pfc) 20678eb3cef8SIoana Radulescu { 20688eb3cef8SIoana Radulescu struct dpni_taildrop td = {0}; 2069685e39eaSIoana Radulescu struct dpaa2_eth_fq *fq; 20708eb3cef8SIoana Radulescu int i, err; 20718eb3cef8SIoana Radulescu 207207beb165SIoana Ciornei /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if 207307beb165SIoana Ciornei * flow control is disabled (as it might interfere with either the 207407beb165SIoana Ciornei * buffer pool depletion trigger for pause frames or with the group 207507beb165SIoana Ciornei * congestion trigger for PFC frames) 207607beb165SIoana Ciornei */ 20772c8d1c8dSIoana Radulescu td.enable = !tx_pause; 207807beb165SIoana Ciornei if (priv->rx_fqtd_enabled == td.enable) 207907beb165SIoana Ciornei goto set_cgtd; 20808eb3cef8SIoana Radulescu 20812c8d1c8dSIoana Radulescu td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH; 20822c8d1c8dSIoana Radulescu td.units = DPNI_CONGESTION_UNIT_BYTES; 20838eb3cef8SIoana Radulescu 20848eb3cef8SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 2085685e39eaSIoana Radulescu fq = &priv->fq[i]; 2086685e39eaSIoana Radulescu if (fq->type != DPAA2_RX_FQ) 20878eb3cef8SIoana Radulescu continue; 20888eb3cef8SIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 2089685e39eaSIoana Radulescu DPNI_CP_QUEUE, DPNI_QUEUE_RX, 2090685e39eaSIoana Radulescu fq->tc, fq->flowid, &td); 20918eb3cef8SIoana Radulescu if (err) { 20928eb3cef8SIoana Radulescu netdev_err(priv->net_dev, 20932c8d1c8dSIoana Radulescu "dpni_set_taildrop(FQ) failed\n"); 20942c8d1c8dSIoana Radulescu return; 20958eb3cef8SIoana Radulescu } 20968eb3cef8SIoana Radulescu } 20978eb3cef8SIoana Radulescu 209807beb165SIoana Ciornei priv->rx_fqtd_enabled = td.enable; 209907beb165SIoana Ciornei 210007beb165SIoana Ciornei set_cgtd: 21012c8d1c8dSIoana Radulescu /* Congestion group taildrop: threshold is in frames, per group 21022c8d1c8dSIoana Radulescu * of FQs belonging to the same traffic class 210307beb165SIoana Ciornei * Enabled if general Tx pause disabled or if PFCs are enabled 210407beb165SIoana Ciornei * (congestion group threhsold for PFC generation is lower than the 210507beb165SIoana Ciornei * CG taildrop threshold, so it won't interfere with it; we also 210607beb165SIoana Ciornei * want frames in non-PFC enabled traffic classes to be kept in check) 21072c8d1c8dSIoana Radulescu */ 2108b91b3a21SJiapeng Chong td.enable = !tx_pause || pfc; 210907beb165SIoana Ciornei if (priv->rx_cgtd_enabled == td.enable) 211007beb165SIoana Ciornei return; 211107beb165SIoana Ciornei 21122c8d1c8dSIoana Radulescu td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv); 21132c8d1c8dSIoana Radulescu td.units = DPNI_CONGESTION_UNIT_FRAMES; 21142c8d1c8dSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 21152c8d1c8dSIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 21162c8d1c8dSIoana Radulescu DPNI_CP_GROUP, DPNI_QUEUE_RX, 21172c8d1c8dSIoana Radulescu i, 0, &td); 21182c8d1c8dSIoana Radulescu if (err) { 21192c8d1c8dSIoana Radulescu netdev_err(priv->net_dev, 21202c8d1c8dSIoana Radulescu "dpni_set_taildrop(CG) failed\n"); 21212c8d1c8dSIoana Radulescu return; 21222c8d1c8dSIoana Radulescu } 21232c8d1c8dSIoana Radulescu } 21242c8d1c8dSIoana Radulescu 212507beb165SIoana Ciornei priv->rx_cgtd_enabled = td.enable; 21268eb3cef8SIoana Radulescu } 21278eb3cef8SIoana Radulescu 21285d8dccf8SIoana Ciornei static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) 212934ff6846SIoana Radulescu { 213085b7a342SIoana Ciornei struct dpni_link_state state = {0}; 21318eb3cef8SIoana Radulescu bool tx_pause; 213234ff6846SIoana Radulescu int err; 213334ff6846SIoana Radulescu 213434ff6846SIoana Radulescu err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 213534ff6846SIoana Radulescu if (unlikely(err)) { 213634ff6846SIoana Radulescu netdev_err(priv->net_dev, 213734ff6846SIoana Radulescu "dpni_get_link_state() failed\n"); 213834ff6846SIoana Radulescu return err; 213934ff6846SIoana Radulescu } 214034ff6846SIoana Radulescu 21418eb3cef8SIoana Radulescu /* If Tx pause frame settings have changed, we need to update 21428eb3cef8SIoana Radulescu * Rx FQ taildrop configuration as well. We configure taildrop 21438eb3cef8SIoana Radulescu * only when pause frame generation is disabled. 21448eb3cef8SIoana Radulescu */ 2145ad054f26SIoana Radulescu tx_pause = dpaa2_eth_tx_pause_enabled(state.options); 214607beb165SIoana Ciornei dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled); 21478eb3cef8SIoana Radulescu 214871947923SIoana Ciornei /* When we manage the MAC/PHY using phylink there is no need 214971947923SIoana Ciornei * to manually update the netif_carrier. 2150*2291982eSVladimir Oltean * We can avoid locking because we are called from the "link changed" 2151*2291982eSVladimir Oltean * IRQ handler, which is the same as the "endpoint changed" IRQ handler 2152*2291982eSVladimir Oltean * (the writer to priv->mac), so we cannot race with it. 215371947923SIoana Ciornei */ 2154*2291982eSVladimir Oltean if (dpaa2_mac_is_type_phy(priv->mac)) 215571947923SIoana Ciornei goto out; 215671947923SIoana Ciornei 215734ff6846SIoana Radulescu /* Chech link state; speed / duplex changes are not treated yet */ 215834ff6846SIoana Radulescu if (priv->link_state.up == state.up) 2159cce62943SIoana Radulescu goto out; 216034ff6846SIoana Radulescu 216134ff6846SIoana Radulescu if (state.up) { 216234ff6846SIoana Radulescu netif_carrier_on(priv->net_dev); 216334ff6846SIoana Radulescu netif_tx_start_all_queues(priv->net_dev); 216434ff6846SIoana Radulescu } else { 216534ff6846SIoana Radulescu netif_tx_stop_all_queues(priv->net_dev); 216634ff6846SIoana Radulescu netif_carrier_off(priv->net_dev); 216734ff6846SIoana Radulescu } 216834ff6846SIoana Radulescu 216934ff6846SIoana Radulescu netdev_info(priv->net_dev, "Link Event: state %s\n", 217034ff6846SIoana Radulescu state.up ? "up" : "down"); 217134ff6846SIoana Radulescu 2172cce62943SIoana Radulescu out: 2173cce62943SIoana Radulescu priv->link_state = state; 2174cce62943SIoana Radulescu 217534ff6846SIoana Radulescu return 0; 217634ff6846SIoana Radulescu } 217734ff6846SIoana Radulescu 217834ff6846SIoana Radulescu static int dpaa2_eth_open(struct net_device *net_dev) 217934ff6846SIoana Radulescu { 218034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 218134ff6846SIoana Radulescu int err; 218234ff6846SIoana Radulescu 2183095174daSRobert-Ionut Alexa dpaa2_eth_seed_pools(priv); 218434ff6846SIoana Radulescu 2185*2291982eSVladimir Oltean mutex_lock(&priv->mac_lock); 2186*2291982eSVladimir Oltean 2187d87e6063SIoana Ciornei if (!dpaa2_eth_is_type_phy(priv)) { 218871947923SIoana Ciornei /* We'll only start the txqs when the link is actually ready; 218971947923SIoana Ciornei * make sure we don't race against the link up notification, 219071947923SIoana Ciornei * which may come immediately after dpni_enable(); 219134ff6846SIoana Radulescu */ 219234ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 219371947923SIoana Ciornei 219471947923SIoana Ciornei /* Also, explicitly set carrier off, otherwise 219571947923SIoana Ciornei * netif_carrier_ok() will return true and cause 'ip link show' 219671947923SIoana Ciornei * to report the LOWER_UP flag, even though the link 219771947923SIoana Ciornei * notification wasn't even received. 219834ff6846SIoana Radulescu */ 219934ff6846SIoana Radulescu netif_carrier_off(net_dev); 220071947923SIoana Ciornei } 22015d8dccf8SIoana Ciornei dpaa2_eth_enable_ch_napi(priv); 220234ff6846SIoana Radulescu 220334ff6846SIoana Radulescu err = dpni_enable(priv->mc_io, 0, priv->mc_token); 220434ff6846SIoana Radulescu if (err < 0) { 2205*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 220634ff6846SIoana Radulescu netdev_err(net_dev, "dpni_enable() failed\n"); 220734ff6846SIoana Radulescu goto enable_err; 220834ff6846SIoana Radulescu } 220934ff6846SIoana Radulescu 221038533388SVladimir Oltean if (dpaa2_eth_is_type_phy(priv)) 2211f978fe85SIoana Ciornei dpaa2_mac_start(priv->mac); 221234ff6846SIoana Radulescu 2213*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 2214*2291982eSVladimir Oltean 221534ff6846SIoana Radulescu return 0; 221634ff6846SIoana Radulescu 221734ff6846SIoana Radulescu enable_err: 22185d8dccf8SIoana Ciornei dpaa2_eth_disable_ch_napi(priv); 2219095174daSRobert-Ionut Alexa dpaa2_eth_drain_pools(priv); 222034ff6846SIoana Radulescu return err; 222134ff6846SIoana Radulescu } 222234ff6846SIoana Radulescu 222368d74315SIoana Ciocoi Radulescu /* Total number of in-flight frames on ingress queues */ 22245d8dccf8SIoana Ciornei static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv) 222534ff6846SIoana Radulescu { 222668d74315SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq; 222768d74315SIoana Ciocoi Radulescu u32 fcnt = 0, bcnt = 0, total = 0; 222868d74315SIoana Ciocoi Radulescu int i, err; 222934ff6846SIoana Radulescu 223068d74315SIoana Ciocoi Radulescu for (i = 0; i < priv->num_fqs; i++) { 223168d74315SIoana Ciocoi Radulescu fq = &priv->fq[i]; 223268d74315SIoana Ciocoi Radulescu err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); 223368d74315SIoana Ciocoi Radulescu if (err) { 223468d74315SIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "query_fq_count failed"); 223568d74315SIoana Ciocoi Radulescu break; 223668d74315SIoana Ciocoi Radulescu } 223768d74315SIoana Ciocoi Radulescu total += fcnt; 223868d74315SIoana Ciocoi Radulescu } 223934ff6846SIoana Radulescu 224034ff6846SIoana Radulescu return total; 224134ff6846SIoana Radulescu } 224234ff6846SIoana Radulescu 22435d8dccf8SIoana Ciornei static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) 224434ff6846SIoana Radulescu { 224568d74315SIoana Ciocoi Radulescu int retries = 10; 224668d74315SIoana Ciocoi Radulescu u32 pending; 224734ff6846SIoana Radulescu 224868d74315SIoana Ciocoi Radulescu do { 22495d8dccf8SIoana Ciornei pending = dpaa2_eth_ingress_fq_count(priv); 225068d74315SIoana Ciocoi Radulescu if (pending) 225168d74315SIoana Ciocoi Radulescu msleep(100); 225268d74315SIoana Ciocoi Radulescu } while (pending && --retries); 225334ff6846SIoana Radulescu } 225434ff6846SIoana Radulescu 225552b6a4ffSIoana Radulescu #define DPNI_TX_PENDING_VER_MAJOR 7 225652b6a4ffSIoana Radulescu #define DPNI_TX_PENDING_VER_MINOR 13 22575d8dccf8SIoana Ciornei static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) 225852b6a4ffSIoana Radulescu { 225952b6a4ffSIoana Radulescu union dpni_statistics stats; 226052b6a4ffSIoana Radulescu int retries = 10; 226152b6a4ffSIoana Radulescu int err; 226252b6a4ffSIoana Radulescu 226352b6a4ffSIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, 226452b6a4ffSIoana Radulescu DPNI_TX_PENDING_VER_MINOR) < 0) 226552b6a4ffSIoana Radulescu goto out; 226652b6a4ffSIoana Radulescu 226752b6a4ffSIoana Radulescu do { 226852b6a4ffSIoana Radulescu err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, 226952b6a4ffSIoana Radulescu &stats); 227052b6a4ffSIoana Radulescu if (err) 227152b6a4ffSIoana Radulescu goto out; 227252b6a4ffSIoana Radulescu if (stats.page_6.tx_pending_frames == 0) 227352b6a4ffSIoana Radulescu return; 227452b6a4ffSIoana Radulescu } while (--retries); 227552b6a4ffSIoana Radulescu 227652b6a4ffSIoana Radulescu out: 227752b6a4ffSIoana Radulescu msleep(500); 227852b6a4ffSIoana Radulescu } 227952b6a4ffSIoana Radulescu 228034ff6846SIoana Radulescu static int dpaa2_eth_stop(struct net_device *net_dev) 228134ff6846SIoana Radulescu { 228234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 228385b7a342SIoana Ciornei int dpni_enabled = 0; 228434ff6846SIoana Radulescu int retries = 10; 228534ff6846SIoana Radulescu 2286*2291982eSVladimir Oltean mutex_lock(&priv->mac_lock); 2287*2291982eSVladimir Oltean 2288d87e6063SIoana Ciornei if (dpaa2_eth_is_type_phy(priv)) { 2289f978fe85SIoana Ciornei dpaa2_mac_stop(priv->mac); 2290d87e6063SIoana Ciornei } else { 229134ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 229234ff6846SIoana Radulescu netif_carrier_off(net_dev); 229371947923SIoana Ciornei } 229434ff6846SIoana Radulescu 2295*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 2296*2291982eSVladimir Oltean 229768d74315SIoana Ciocoi Radulescu /* On dpni_disable(), the MC firmware will: 229868d74315SIoana Ciocoi Radulescu * - stop MAC Rx and wait for all Rx frames to be enqueued to software 229968d74315SIoana Ciocoi Radulescu * - cut off WRIOP dequeues from egress FQs and wait until transmission 230068d74315SIoana Ciocoi Radulescu * of all in flight Tx frames is finished (and corresponding Tx conf 230168d74315SIoana Ciocoi Radulescu * frames are enqueued back to software) 230268d74315SIoana Ciocoi Radulescu * 230368d74315SIoana Ciocoi Radulescu * Before calling dpni_disable(), we wait for all Tx frames to arrive 230468d74315SIoana Ciocoi Radulescu * on WRIOP. After it finishes, wait until all remaining frames on Rx 230568d74315SIoana Ciocoi Radulescu * and Tx conf queues are consumed on NAPI poll. 230634ff6846SIoana Radulescu */ 23075d8dccf8SIoana Ciornei dpaa2_eth_wait_for_egress_fq_empty(priv); 230868d74315SIoana Ciocoi Radulescu 230934ff6846SIoana Radulescu do { 231034ff6846SIoana Radulescu dpni_disable(priv->mc_io, 0, priv->mc_token); 231134ff6846SIoana Radulescu dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 231234ff6846SIoana Radulescu if (dpni_enabled) 231334ff6846SIoana Radulescu /* Allow the hardware some slack */ 231434ff6846SIoana Radulescu msleep(100); 231534ff6846SIoana Radulescu } while (dpni_enabled && --retries); 231634ff6846SIoana Radulescu if (!retries) { 231734ff6846SIoana Radulescu netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 231834ff6846SIoana Radulescu /* Must go on and disable NAPI nonetheless, so we don't crash at 231934ff6846SIoana Radulescu * the next "ifconfig up" 232034ff6846SIoana Radulescu */ 232134ff6846SIoana Radulescu } 232234ff6846SIoana Radulescu 23235d8dccf8SIoana Ciornei dpaa2_eth_wait_for_ingress_fq_empty(priv); 23245d8dccf8SIoana Ciornei dpaa2_eth_disable_ch_napi(priv); 232534ff6846SIoana Radulescu 232634ff6846SIoana Radulescu /* Empty the buffer pool */ 2327095174daSRobert-Ionut Alexa dpaa2_eth_drain_pools(priv); 232834ff6846SIoana Radulescu 2329d70446eeSIoana Ciornei /* Empty the Scatter-Gather Buffer cache */ 2330d70446eeSIoana Ciornei dpaa2_eth_sgt_cache_drain(priv); 2331d70446eeSIoana Ciornei 233234ff6846SIoana Radulescu return 0; 233334ff6846SIoana Radulescu } 233434ff6846SIoana Radulescu 233534ff6846SIoana Radulescu static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 233634ff6846SIoana Radulescu { 233734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 233834ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 233934ff6846SIoana Radulescu int err; 234034ff6846SIoana Radulescu 234134ff6846SIoana Radulescu err = eth_mac_addr(net_dev, addr); 234234ff6846SIoana Radulescu if (err < 0) { 234334ff6846SIoana Radulescu dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 234434ff6846SIoana Radulescu return err; 234534ff6846SIoana Radulescu } 234634ff6846SIoana Radulescu 234734ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 234834ff6846SIoana Radulescu net_dev->dev_addr); 234934ff6846SIoana Radulescu if (err) { 235034ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 235134ff6846SIoana Radulescu return err; 235234ff6846SIoana Radulescu } 235334ff6846SIoana Radulescu 235434ff6846SIoana Radulescu return 0; 235534ff6846SIoana Radulescu } 235634ff6846SIoana Radulescu 235734ff6846SIoana Radulescu /** Fill in counters maintained by the GPP driver. These may be different from 235834ff6846SIoana Radulescu * the hardware counters obtained by ethtool. 235934ff6846SIoana Radulescu */ 236034ff6846SIoana Radulescu static void dpaa2_eth_get_stats(struct net_device *net_dev, 236134ff6846SIoana Radulescu struct rtnl_link_stats64 *stats) 236234ff6846SIoana Radulescu { 236334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 236434ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 236534ff6846SIoana Radulescu u64 *cpustats; 236634ff6846SIoana Radulescu u64 *netstats = (u64 *)stats; 236734ff6846SIoana Radulescu int i, j; 236834ff6846SIoana Radulescu int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 236934ff6846SIoana Radulescu 237034ff6846SIoana Radulescu for_each_possible_cpu(i) { 237134ff6846SIoana Radulescu percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 237234ff6846SIoana Radulescu cpustats = (u64 *)percpu_stats; 237334ff6846SIoana Radulescu for (j = 0; j < num; j++) 237434ff6846SIoana Radulescu netstats[j] += cpustats[j]; 237534ff6846SIoana Radulescu } 237634ff6846SIoana Radulescu } 237734ff6846SIoana Radulescu 237834ff6846SIoana Radulescu /* Copy mac unicast addresses from @net_dev to @priv. 237934ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 238034ff6846SIoana Radulescu */ 23815d8dccf8SIoana Ciornei static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev, 238234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 238334ff6846SIoana Radulescu { 238434ff6846SIoana Radulescu struct netdev_hw_addr *ha; 238534ff6846SIoana Radulescu int err; 238634ff6846SIoana Radulescu 238734ff6846SIoana Radulescu netdev_for_each_uc_addr(ha, net_dev) { 238834ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 238934ff6846SIoana Radulescu ha->addr); 239034ff6846SIoana Radulescu if (err) 239134ff6846SIoana Radulescu netdev_warn(priv->net_dev, 239234ff6846SIoana Radulescu "Could not add ucast MAC %pM to the filtering table (err %d)\n", 239334ff6846SIoana Radulescu ha->addr, err); 239434ff6846SIoana Radulescu } 239534ff6846SIoana Radulescu } 239634ff6846SIoana Radulescu 239734ff6846SIoana Radulescu /* Copy mac multicast addresses from @net_dev to @priv 239834ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 239934ff6846SIoana Radulescu */ 24005d8dccf8SIoana Ciornei static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev, 240134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 240234ff6846SIoana Radulescu { 240334ff6846SIoana Radulescu struct netdev_hw_addr *ha; 240434ff6846SIoana Radulescu int err; 240534ff6846SIoana Radulescu 240634ff6846SIoana Radulescu netdev_for_each_mc_addr(ha, net_dev) { 240734ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 240834ff6846SIoana Radulescu ha->addr); 240934ff6846SIoana Radulescu if (err) 241034ff6846SIoana Radulescu netdev_warn(priv->net_dev, 241134ff6846SIoana Radulescu "Could not add mcast MAC %pM to the filtering table (err %d)\n", 241234ff6846SIoana Radulescu ha->addr, err); 241334ff6846SIoana Radulescu } 241434ff6846SIoana Radulescu } 241534ff6846SIoana Radulescu 241670b32d82SIonut-robert Aron static int dpaa2_eth_rx_add_vid(struct net_device *net_dev, 241770b32d82SIonut-robert Aron __be16 vlan_proto, u16 vid) 241870b32d82SIonut-robert Aron { 241970b32d82SIonut-robert Aron struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 242070b32d82SIonut-robert Aron int err; 242170b32d82SIonut-robert Aron 242270b32d82SIonut-robert Aron err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token, 242370b32d82SIonut-robert Aron vid, 0, 0, 0); 242470b32d82SIonut-robert Aron 242570b32d82SIonut-robert Aron if (err) { 242670b32d82SIonut-robert Aron netdev_warn(priv->net_dev, 242770b32d82SIonut-robert Aron "Could not add the vlan id %u\n", 242870b32d82SIonut-robert Aron vid); 242970b32d82SIonut-robert Aron return err; 243070b32d82SIonut-robert Aron } 243170b32d82SIonut-robert Aron 243270b32d82SIonut-robert Aron return 0; 243370b32d82SIonut-robert Aron } 243470b32d82SIonut-robert Aron 243570b32d82SIonut-robert Aron static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev, 243670b32d82SIonut-robert Aron __be16 vlan_proto, u16 vid) 243770b32d82SIonut-robert Aron { 243870b32d82SIonut-robert Aron struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 243970b32d82SIonut-robert Aron int err; 244070b32d82SIonut-robert Aron 244170b32d82SIonut-robert Aron err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid); 244270b32d82SIonut-robert Aron 244370b32d82SIonut-robert Aron if (err) { 244470b32d82SIonut-robert Aron netdev_warn(priv->net_dev, 244570b32d82SIonut-robert Aron "Could not remove the vlan id %u\n", 244670b32d82SIonut-robert Aron vid); 244770b32d82SIonut-robert Aron return err; 244870b32d82SIonut-robert Aron } 244970b32d82SIonut-robert Aron 245070b32d82SIonut-robert Aron return 0; 245170b32d82SIonut-robert Aron } 245270b32d82SIonut-robert Aron 245334ff6846SIoana Radulescu static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 245434ff6846SIoana Radulescu { 245534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 245634ff6846SIoana Radulescu int uc_count = netdev_uc_count(net_dev); 245734ff6846SIoana Radulescu int mc_count = netdev_mc_count(net_dev); 245834ff6846SIoana Radulescu u8 max_mac = priv->dpni_attrs.mac_filter_entries; 245934ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 246034ff6846SIoana Radulescu u16 mc_token = priv->mc_token; 246134ff6846SIoana Radulescu struct fsl_mc_io *mc_io = priv->mc_io; 246234ff6846SIoana Radulescu int err; 246334ff6846SIoana Radulescu 246434ff6846SIoana Radulescu /* Basic sanity checks; these probably indicate a misconfiguration */ 246534ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 246634ff6846SIoana Radulescu netdev_info(net_dev, 246734ff6846SIoana Radulescu "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 246834ff6846SIoana Radulescu max_mac); 246934ff6846SIoana Radulescu 247034ff6846SIoana Radulescu /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 247134ff6846SIoana Radulescu if (uc_count > max_mac) { 247234ff6846SIoana Radulescu netdev_info(net_dev, 247334ff6846SIoana Radulescu "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 247434ff6846SIoana Radulescu uc_count, max_mac); 247534ff6846SIoana Radulescu goto force_promisc; 247634ff6846SIoana Radulescu } 247734ff6846SIoana Radulescu if (mc_count + uc_count > max_mac) { 247834ff6846SIoana Radulescu netdev_info(net_dev, 247934ff6846SIoana Radulescu "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 248034ff6846SIoana Radulescu uc_count + mc_count, max_mac); 248134ff6846SIoana Radulescu goto force_mc_promisc; 248234ff6846SIoana Radulescu } 248334ff6846SIoana Radulescu 248434ff6846SIoana Radulescu /* Adjust promisc settings due to flag combinations */ 248534ff6846SIoana Radulescu if (net_dev->flags & IFF_PROMISC) 248634ff6846SIoana Radulescu goto force_promisc; 248734ff6846SIoana Radulescu if (net_dev->flags & IFF_ALLMULTI) { 248834ff6846SIoana Radulescu /* First, rebuild unicast filtering table. This should be done 248934ff6846SIoana Radulescu * in promisc mode, in order to avoid frame loss while we 249034ff6846SIoana Radulescu * progressively add entries to the table. 249134ff6846SIoana Radulescu * We don't know whether we had been in promisc already, and 249234ff6846SIoana Radulescu * making an MC call to find out is expensive; so set uc promisc 249334ff6846SIoana Radulescu * nonetheless. 249434ff6846SIoana Radulescu */ 249534ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 249634ff6846SIoana Radulescu if (err) 249734ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc\n"); 249834ff6846SIoana Radulescu 249934ff6846SIoana Radulescu /* Actual uc table reconstruction. */ 250034ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 250134ff6846SIoana Radulescu if (err) 250234ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc filters\n"); 25035d8dccf8SIoana Ciornei dpaa2_eth_add_uc_hw_addr(net_dev, priv); 250434ff6846SIoana Radulescu 250534ff6846SIoana Radulescu /* Finally, clear uc promisc and set mc promisc as requested. */ 250634ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 250734ff6846SIoana Radulescu if (err) 250834ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc promisc\n"); 250934ff6846SIoana Radulescu goto force_mc_promisc; 251034ff6846SIoana Radulescu } 251134ff6846SIoana Radulescu 251234ff6846SIoana Radulescu /* Neither unicast, nor multicast promisc will be on... eventually. 251334ff6846SIoana Radulescu * For now, rebuild mac filtering tables while forcing both of them on. 251434ff6846SIoana Radulescu */ 251534ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 251634ff6846SIoana Radulescu if (err) 251734ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 251834ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 251934ff6846SIoana Radulescu if (err) 252034ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 252134ff6846SIoana Radulescu 252234ff6846SIoana Radulescu /* Actual mac filtering tables reconstruction */ 252334ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 252434ff6846SIoana Radulescu if (err) 252534ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mac filters\n"); 25265d8dccf8SIoana Ciornei dpaa2_eth_add_mc_hw_addr(net_dev, priv); 25275d8dccf8SIoana Ciornei dpaa2_eth_add_uc_hw_addr(net_dev, priv); 252834ff6846SIoana Radulescu 252934ff6846SIoana Radulescu /* Now we can clear both ucast and mcast promisc, without risking 253034ff6846SIoana Radulescu * to drop legitimate frames anymore. 253134ff6846SIoana Radulescu */ 253234ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 253334ff6846SIoana Radulescu if (err) 253434ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear ucast promisc\n"); 253534ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 253634ff6846SIoana Radulescu if (err) 253734ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mcast promisc\n"); 253834ff6846SIoana Radulescu 253934ff6846SIoana Radulescu return; 254034ff6846SIoana Radulescu 254134ff6846SIoana Radulescu force_promisc: 254234ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 254334ff6846SIoana Radulescu if (err) 254434ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set ucast promisc\n"); 254534ff6846SIoana Radulescu force_mc_promisc: 254634ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 254734ff6846SIoana Radulescu if (err) 254834ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mcast promisc\n"); 254934ff6846SIoana Radulescu } 255034ff6846SIoana Radulescu 255134ff6846SIoana Radulescu static int dpaa2_eth_set_features(struct net_device *net_dev, 255234ff6846SIoana Radulescu netdev_features_t features) 255334ff6846SIoana Radulescu { 255434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 255534ff6846SIoana Radulescu netdev_features_t changed = features ^ net_dev->features; 255634ff6846SIoana Radulescu bool enable; 255734ff6846SIoana Radulescu int err; 255834ff6846SIoana Radulescu 255970b32d82SIonut-robert Aron if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 256070b32d82SIonut-robert Aron enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); 256170b32d82SIonut-robert Aron err = dpaa2_eth_set_rx_vlan_filtering(priv, enable); 256270b32d82SIonut-robert Aron if (err) 256370b32d82SIonut-robert Aron return err; 256470b32d82SIonut-robert Aron } 256570b32d82SIonut-robert Aron 256634ff6846SIoana Radulescu if (changed & NETIF_F_RXCSUM) { 256734ff6846SIoana Radulescu enable = !!(features & NETIF_F_RXCSUM); 25685d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_csum(priv, enable); 256934ff6846SIoana Radulescu if (err) 257034ff6846SIoana Radulescu return err; 257134ff6846SIoana Radulescu } 257234ff6846SIoana Radulescu 257334ff6846SIoana Radulescu if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 257434ff6846SIoana Radulescu enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 25755d8dccf8SIoana Ciornei err = dpaa2_eth_set_tx_csum(priv, enable); 257634ff6846SIoana Radulescu if (err) 257734ff6846SIoana Radulescu return err; 257834ff6846SIoana Radulescu } 257934ff6846SIoana Radulescu 258034ff6846SIoana Radulescu return 0; 258134ff6846SIoana Radulescu } 258234ff6846SIoana Radulescu 258334ff6846SIoana Radulescu static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 258434ff6846SIoana Radulescu { 258534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 258634ff6846SIoana Radulescu struct hwtstamp_config config; 258734ff6846SIoana Radulescu 2588c5521189SYangbo Lu if (!dpaa2_ptp) 2589c5521189SYangbo Lu return -EINVAL; 2590c5521189SYangbo Lu 259134ff6846SIoana Radulescu if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 259234ff6846SIoana Radulescu return -EFAULT; 259334ff6846SIoana Radulescu 259434ff6846SIoana Radulescu switch (config.tx_type) { 259534ff6846SIoana Radulescu case HWTSTAMP_TX_OFF: 259634ff6846SIoana Radulescu case HWTSTAMP_TX_ON: 2597c5521189SYangbo Lu case HWTSTAMP_TX_ONESTEP_SYNC: 25981cf773bdSYangbo Lu priv->tx_tstamp_type = config.tx_type; 259934ff6846SIoana Radulescu break; 260034ff6846SIoana Radulescu default: 260134ff6846SIoana Radulescu return -ERANGE; 260234ff6846SIoana Radulescu } 260334ff6846SIoana Radulescu 260434ff6846SIoana Radulescu if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 260534ff6846SIoana Radulescu priv->rx_tstamp = false; 260634ff6846SIoana Radulescu } else { 260734ff6846SIoana Radulescu priv->rx_tstamp = true; 260834ff6846SIoana Radulescu /* TS is set for all frame types, not only those requested */ 260934ff6846SIoana Radulescu config.rx_filter = HWTSTAMP_FILTER_ALL; 261034ff6846SIoana Radulescu } 261134ff6846SIoana Radulescu 2612c4680c97SRadu Bulie if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) 2613c4680c97SRadu Bulie dpaa2_ptp_onestep_reg_update_method(priv); 2614c4680c97SRadu Bulie 261534ff6846SIoana Radulescu return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 261634ff6846SIoana Radulescu -EFAULT : 0; 261734ff6846SIoana Radulescu } 261834ff6846SIoana Radulescu 261934ff6846SIoana Radulescu static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 262034ff6846SIoana Radulescu { 26214a84182aSRussell King struct dpaa2_eth_priv *priv = netdev_priv(dev); 2622*2291982eSVladimir Oltean int err; 26234a84182aSRussell King 262434ff6846SIoana Radulescu if (cmd == SIOCSHWTSTAMP) 262534ff6846SIoana Radulescu return dpaa2_eth_ts_ioctl(dev, rq, cmd); 262634ff6846SIoana Radulescu 2627*2291982eSVladimir Oltean mutex_lock(&priv->mac_lock); 2628*2291982eSVladimir Oltean 2629*2291982eSVladimir Oltean if (dpaa2_eth_is_type_phy(priv)) { 2630*2291982eSVladimir Oltean err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd); 2631*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 2632*2291982eSVladimir Oltean return err; 2633*2291982eSVladimir Oltean } 2634*2291982eSVladimir Oltean 2635*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 26364a84182aSRussell King 26374a84182aSRussell King return -EOPNOTSUPP; 263834ff6846SIoana Radulescu } 263934ff6846SIoana Radulescu 26407e273a8eSIoana Ciocoi Radulescu static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) 26417e273a8eSIoana Ciocoi Radulescu { 26427e273a8eSIoana Ciocoi Radulescu int mfl, linear_mfl; 26437e273a8eSIoana Ciocoi Radulescu 26447e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 2645efa6a7d0SIoana Ciornei linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - 26467b1eea1aSIoana Ciocoi Radulescu dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; 26477e273a8eSIoana Ciocoi Radulescu 26487e273a8eSIoana Ciocoi Radulescu if (mfl > linear_mfl) { 26497e273a8eSIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", 26507e273a8eSIoana Ciocoi Radulescu linear_mfl - VLAN_ETH_HLEN); 26517e273a8eSIoana Ciocoi Radulescu return false; 26527e273a8eSIoana Ciocoi Radulescu } 26537e273a8eSIoana Ciocoi Radulescu 26547e273a8eSIoana Ciocoi Radulescu return true; 26557e273a8eSIoana Ciocoi Radulescu } 26567e273a8eSIoana Ciocoi Radulescu 26575d8dccf8SIoana Ciornei static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) 26587e273a8eSIoana Ciocoi Radulescu { 26597e273a8eSIoana Ciocoi Radulescu int mfl, err; 26607e273a8eSIoana Ciocoi Radulescu 26617e273a8eSIoana Ciocoi Radulescu /* We enforce a maximum Rx frame length based on MTU only if we have 26627e273a8eSIoana Ciocoi Radulescu * an XDP program attached (in order to avoid Rx S/G frames). 26637e273a8eSIoana Ciocoi Radulescu * Otherwise, we accept all incoming frames as long as they are not 26647e273a8eSIoana Ciocoi Radulescu * larger than maximum size supported in hardware 26657e273a8eSIoana Ciocoi Radulescu */ 26667e273a8eSIoana Ciocoi Radulescu if (has_xdp) 26677e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 26687e273a8eSIoana Ciocoi Radulescu else 26697e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_MFL; 26707e273a8eSIoana Ciocoi Radulescu 26717e273a8eSIoana Ciocoi Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); 26727e273a8eSIoana Ciocoi Radulescu if (err) { 26737e273a8eSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); 26747e273a8eSIoana Ciocoi Radulescu return err; 26757e273a8eSIoana Ciocoi Radulescu } 26767e273a8eSIoana Ciocoi Radulescu 26777e273a8eSIoana Ciocoi Radulescu return 0; 26787e273a8eSIoana Ciocoi Radulescu } 26797e273a8eSIoana Ciocoi Radulescu 26807e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) 26817e273a8eSIoana Ciocoi Radulescu { 26827e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 26837e273a8eSIoana Ciocoi Radulescu int err; 26847e273a8eSIoana Ciocoi Radulescu 26857e273a8eSIoana Ciocoi Radulescu if (!priv->xdp_prog) 26867e273a8eSIoana Ciocoi Radulescu goto out; 26877e273a8eSIoana Ciocoi Radulescu 26887e273a8eSIoana Ciocoi Radulescu if (!xdp_mtu_valid(priv, new_mtu)) 26897e273a8eSIoana Ciocoi Radulescu return -EINVAL; 26907e273a8eSIoana Ciocoi Radulescu 26915d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true); 26927e273a8eSIoana Ciocoi Radulescu if (err) 26937e273a8eSIoana Ciocoi Radulescu return err; 26947e273a8eSIoana Ciocoi Radulescu 26957e273a8eSIoana Ciocoi Radulescu out: 26967e273a8eSIoana Ciocoi Radulescu dev->mtu = new_mtu; 26977e273a8eSIoana Ciocoi Radulescu return 0; 26987e273a8eSIoana Ciocoi Radulescu } 26997e273a8eSIoana Ciocoi Radulescu 27005d8dccf8SIoana Ciornei static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) 27017b1eea1aSIoana Ciocoi Radulescu { 27027b1eea1aSIoana Ciocoi Radulescu struct dpni_buffer_layout buf_layout = {0}; 27037b1eea1aSIoana Ciocoi Radulescu int err; 27047b1eea1aSIoana Ciocoi Radulescu 27057b1eea1aSIoana Ciocoi Radulescu err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, 27067b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 27077b1eea1aSIoana Ciocoi Radulescu if (err) { 27087b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); 27097b1eea1aSIoana Ciocoi Radulescu return err; 27107b1eea1aSIoana Ciocoi Radulescu } 27117b1eea1aSIoana Ciocoi Radulescu 27127b1eea1aSIoana Ciocoi Radulescu /* Reserve extra headroom for XDP header size changes */ 27137b1eea1aSIoana Ciocoi Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + 27147b1eea1aSIoana Ciocoi Radulescu (has_xdp ? XDP_PACKET_HEADROOM : 0); 27157b1eea1aSIoana Ciocoi Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; 27167b1eea1aSIoana Ciocoi Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 27177b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 27187b1eea1aSIoana Ciocoi Radulescu if (err) { 27197b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); 27207b1eea1aSIoana Ciocoi Radulescu return err; 27217b1eea1aSIoana Ciocoi Radulescu } 27227b1eea1aSIoana Ciocoi Radulescu 27237b1eea1aSIoana Ciocoi Radulescu return 0; 27247b1eea1aSIoana Ciocoi Radulescu } 27257b1eea1aSIoana Ciocoi Radulescu 27265d8dccf8SIoana Ciornei static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) 27277e273a8eSIoana Ciocoi Radulescu { 27287e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 27297e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch; 27307e273a8eSIoana Ciocoi Radulescu struct bpf_prog *old; 27317e273a8eSIoana Ciocoi Radulescu bool up, need_update; 27327e273a8eSIoana Ciocoi Radulescu int i, err; 27337e273a8eSIoana Ciocoi Radulescu 27347e273a8eSIoana Ciocoi Radulescu if (prog && !xdp_mtu_valid(priv, dev->mtu)) 27357e273a8eSIoana Ciocoi Radulescu return -EINVAL; 27367e273a8eSIoana Ciocoi Radulescu 273785192dbfSAndrii Nakryiko if (prog) 273885192dbfSAndrii Nakryiko bpf_prog_add(prog, priv->num_channels); 27397e273a8eSIoana Ciocoi Radulescu 27407e273a8eSIoana Ciocoi Radulescu up = netif_running(dev); 27417e273a8eSIoana Ciocoi Radulescu need_update = (!!priv->xdp_prog != !!prog); 27427e273a8eSIoana Ciocoi Radulescu 27437e273a8eSIoana Ciocoi Radulescu if (up) 2744e3caeb2dSIoana Ciornei dev_close(dev); 27457e273a8eSIoana Ciocoi Radulescu 27467b1eea1aSIoana Ciocoi Radulescu /* While in xdp mode, enforce a maximum Rx frame size based on MTU. 27477b1eea1aSIoana Ciocoi Radulescu * Also, when switching between xdp/non-xdp modes we need to reconfigure 27487b1eea1aSIoana Ciocoi Radulescu * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, 27497b1eea1aSIoana Ciocoi Radulescu * so we are sure no old format buffers will be used from now on. 27507b1eea1aSIoana Ciocoi Radulescu */ 27517e273a8eSIoana Ciocoi Radulescu if (need_update) { 27525d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog); 27537e273a8eSIoana Ciocoi Radulescu if (err) 27547e273a8eSIoana Ciocoi Radulescu goto out_err; 27555d8dccf8SIoana Ciornei err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog); 27567b1eea1aSIoana Ciocoi Radulescu if (err) 27577b1eea1aSIoana Ciocoi Radulescu goto out_err; 27587e273a8eSIoana Ciocoi Radulescu } 27597e273a8eSIoana Ciocoi Radulescu 27607e273a8eSIoana Ciocoi Radulescu old = xchg(&priv->xdp_prog, prog); 27617e273a8eSIoana Ciocoi Radulescu if (old) 27627e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 27637e273a8eSIoana Ciocoi Radulescu 27647e273a8eSIoana Ciocoi Radulescu for (i = 0; i < priv->num_channels; i++) { 27657e273a8eSIoana Ciocoi Radulescu ch = priv->channel[i]; 27667e273a8eSIoana Ciocoi Radulescu old = xchg(&ch->xdp.prog, prog); 27677e273a8eSIoana Ciocoi Radulescu if (old) 27687e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 27697e273a8eSIoana Ciocoi Radulescu } 27707e273a8eSIoana Ciocoi Radulescu 27717e273a8eSIoana Ciocoi Radulescu if (up) { 2772e3caeb2dSIoana Ciornei err = dev_open(dev, NULL); 27737e273a8eSIoana Ciocoi Radulescu if (err) 27747e273a8eSIoana Ciocoi Radulescu return err; 27757e273a8eSIoana Ciocoi Radulescu } 27767e273a8eSIoana Ciocoi Radulescu 27777e273a8eSIoana Ciocoi Radulescu return 0; 27787e273a8eSIoana Ciocoi Radulescu 27797e273a8eSIoana Ciocoi Radulescu out_err: 27807e273a8eSIoana Ciocoi Radulescu if (prog) 27817e273a8eSIoana Ciocoi Radulescu bpf_prog_sub(prog, priv->num_channels); 27827e273a8eSIoana Ciocoi Radulescu if (up) 2783e3caeb2dSIoana Ciornei dev_open(dev, NULL); 27847e273a8eSIoana Ciocoi Radulescu 27857e273a8eSIoana Ciocoi Radulescu return err; 27867e273a8eSIoana Ciocoi Radulescu } 27877e273a8eSIoana Ciocoi Radulescu 27887e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) 27897e273a8eSIoana Ciocoi Radulescu { 27907e273a8eSIoana Ciocoi Radulescu switch (xdp->command) { 27917e273a8eSIoana Ciocoi Radulescu case XDP_SETUP_PROG: 27925d8dccf8SIoana Ciornei return dpaa2_eth_setup_xdp(dev, xdp->prog); 279348276c08SRobert-Ionut Alexa case XDP_SETUP_XSK_POOL: 279448276c08SRobert-Ionut Alexa return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id); 27957e273a8eSIoana Ciocoi Radulescu default: 27967e273a8eSIoana Ciocoi Radulescu return -EINVAL; 27977e273a8eSIoana Ciocoi Radulescu } 27987e273a8eSIoana Ciocoi Radulescu 27997e273a8eSIoana Ciocoi Radulescu return 0; 28007e273a8eSIoana Ciocoi Radulescu } 28017e273a8eSIoana Ciocoi Radulescu 28026aa40b9eSIoana Ciornei static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev, 28036aa40b9eSIoana Ciornei struct xdp_frame *xdpf, 28046aa40b9eSIoana Ciornei struct dpaa2_fd *fd) 2805d678be1dSIoana Radulescu { 2806d678be1dSIoana Radulescu struct device *dev = net_dev->dev.parent; 2807d678be1dSIoana Radulescu unsigned int needed_headroom; 2808d678be1dSIoana Radulescu struct dpaa2_eth_swa *swa; 2809d678be1dSIoana Radulescu void *buffer_start, *aligned_start; 2810d678be1dSIoana Radulescu dma_addr_t addr; 2811d678be1dSIoana Radulescu 2812d678be1dSIoana Radulescu /* We require a minimum headroom to be able to transmit the frame. 2813d678be1dSIoana Radulescu * Otherwise return an error and let the original net_device handle it 2814d678be1dSIoana Radulescu */ 28151cf773bdSYangbo Lu needed_headroom = dpaa2_eth_needed_headroom(NULL); 2816d678be1dSIoana Radulescu if (xdpf->headroom < needed_headroom) 2817d678be1dSIoana Radulescu return -EINVAL; 2818d678be1dSIoana Radulescu 2819d678be1dSIoana Radulescu /* Setup the FD fields */ 28206aa40b9eSIoana Ciornei memset(fd, 0, sizeof(*fd)); 2821d678be1dSIoana Radulescu 2822d678be1dSIoana Radulescu /* Align FD address, if possible */ 2823d678be1dSIoana Radulescu buffer_start = xdpf->data - needed_headroom; 2824d678be1dSIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 2825d678be1dSIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 2826d678be1dSIoana Radulescu if (aligned_start >= xdpf->data - xdpf->headroom) 2827d678be1dSIoana Radulescu buffer_start = aligned_start; 2828d678be1dSIoana Radulescu 2829d678be1dSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 2830d678be1dSIoana Radulescu /* fill in necessary fields here */ 2831d678be1dSIoana Radulescu swa->type = DPAA2_ETH_SWA_XDP; 2832d678be1dSIoana Radulescu swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; 2833d678be1dSIoana Radulescu swa->xdp.xdpf = xdpf; 2834d678be1dSIoana Radulescu 2835d678be1dSIoana Radulescu addr = dma_map_single(dev, buffer_start, 2836d678be1dSIoana Radulescu swa->xdp.dma_size, 2837d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 28386aa40b9eSIoana Ciornei if (unlikely(dma_mapping_error(dev, addr))) 2839d678be1dSIoana Radulescu return -ENOMEM; 2840d678be1dSIoana Radulescu 28416aa40b9eSIoana Ciornei dpaa2_fd_set_addr(fd, addr); 28426aa40b9eSIoana Ciornei dpaa2_fd_set_offset(fd, xdpf->data - buffer_start); 28436aa40b9eSIoana Ciornei dpaa2_fd_set_len(fd, xdpf->len); 28446aa40b9eSIoana Ciornei dpaa2_fd_set_format(fd, dpaa2_fd_single); 28456aa40b9eSIoana Ciornei dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 2846d678be1dSIoana Radulescu 2847d678be1dSIoana Radulescu return 0; 2848d678be1dSIoana Radulescu } 2849d678be1dSIoana Radulescu 2850d678be1dSIoana Radulescu static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, 2851d678be1dSIoana Radulescu struct xdp_frame **frames, u32 flags) 2852d678be1dSIoana Radulescu { 28536aa40b9eSIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 285438c440b2SIoana Ciornei struct dpaa2_eth_xdp_fds *xdp_redirect_fds; 28556aa40b9eSIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 28566aa40b9eSIoana Ciornei struct dpaa2_eth_fq *fq; 28578665d978SIoana Ciornei struct dpaa2_fd *fds; 285838c440b2SIoana Ciornei int enqueued, i, err; 2859d678be1dSIoana Radulescu 2860d678be1dSIoana Radulescu if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2861d678be1dSIoana Radulescu return -EINVAL; 2862d678be1dSIoana Radulescu 2863d678be1dSIoana Radulescu if (!netif_running(net_dev)) 2864d678be1dSIoana Radulescu return -ENETDOWN; 2865d678be1dSIoana Radulescu 28668665d978SIoana Ciornei fq = &priv->fq[smp_processor_id()]; 286738c440b2SIoana Ciornei xdp_redirect_fds = &fq->xdp_redirect_fds; 286838c440b2SIoana Ciornei fds = xdp_redirect_fds->fds; 28698665d978SIoana Ciornei 28706aa40b9eSIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 28716aa40b9eSIoana Ciornei 28728665d978SIoana Ciornei /* create a FD for each xdp_frame in the list received */ 2873d678be1dSIoana Radulescu for (i = 0; i < n; i++) { 28748665d978SIoana Ciornei err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]); 28758665d978SIoana Ciornei if (err) 28766aa40b9eSIoana Ciornei break; 28776aa40b9eSIoana Ciornei } 287838c440b2SIoana Ciornei xdp_redirect_fds->num = i; 28796aa40b9eSIoana Ciornei 288038c440b2SIoana Ciornei /* enqueue all the frame descriptors */ 288138c440b2SIoana Ciornei enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds); 2882d678be1dSIoana Radulescu 28838665d978SIoana Ciornei /* update statistics */ 288438c440b2SIoana Ciornei percpu_stats->tx_packets += enqueued; 288538c440b2SIoana Ciornei for (i = 0; i < enqueued; i++) 28868665d978SIoana Ciornei percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 28878665d978SIoana Ciornei 288838c440b2SIoana Ciornei return enqueued; 2889d678be1dSIoana Radulescu } 2890d678be1dSIoana Radulescu 289106d5b179SIoana Radulescu static int update_xps(struct dpaa2_eth_priv *priv) 289206d5b179SIoana Radulescu { 289306d5b179SIoana Radulescu struct net_device *net_dev = priv->net_dev; 289406d5b179SIoana Radulescu struct cpumask xps_mask; 289506d5b179SIoana Radulescu struct dpaa2_eth_fq *fq; 2896ab1e6de2SIoana Radulescu int i, num_queues, netdev_queues; 289706d5b179SIoana Radulescu int err = 0; 289806d5b179SIoana Radulescu 289906d5b179SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 2900ab1e6de2SIoana Radulescu netdev_queues = (net_dev->num_tc ? : 1) * num_queues; 290106d5b179SIoana Radulescu 290206d5b179SIoana Radulescu /* The first <num_queues> entries in priv->fq array are Tx/Tx conf 290306d5b179SIoana Radulescu * queues, so only process those 290406d5b179SIoana Radulescu */ 2905ab1e6de2SIoana Radulescu for (i = 0; i < netdev_queues; i++) { 2906ab1e6de2SIoana Radulescu fq = &priv->fq[i % num_queues]; 290706d5b179SIoana Radulescu 290806d5b179SIoana Radulescu cpumask_clear(&xps_mask); 290906d5b179SIoana Radulescu cpumask_set_cpu(fq->target_cpu, &xps_mask); 291006d5b179SIoana Radulescu 291106d5b179SIoana Radulescu err = netif_set_xps_queue(net_dev, &xps_mask, i); 291206d5b179SIoana Radulescu if (err) { 291306d5b179SIoana Radulescu netdev_warn_once(net_dev, "Error setting XPS queue\n"); 291406d5b179SIoana Radulescu break; 291506d5b179SIoana Radulescu } 291606d5b179SIoana Radulescu } 291706d5b179SIoana Radulescu 291806d5b179SIoana Radulescu return err; 291906d5b179SIoana Radulescu } 292006d5b179SIoana Radulescu 2921e3ec13beSIoana Ciornei static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, 2922e3ec13beSIoana Ciornei struct tc_mqprio_qopt *mqprio) 2923ab1e6de2SIoana Radulescu { 2924ab1e6de2SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2925ab1e6de2SIoana Radulescu u8 num_tc, num_queues; 2926ab1e6de2SIoana Radulescu int i; 2927ab1e6de2SIoana Radulescu 2928ab1e6de2SIoana Radulescu mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 2929ab1e6de2SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 2930ab1e6de2SIoana Radulescu num_tc = mqprio->num_tc; 2931ab1e6de2SIoana Radulescu 2932ab1e6de2SIoana Radulescu if (num_tc == net_dev->num_tc) 2933ab1e6de2SIoana Radulescu return 0; 2934ab1e6de2SIoana Radulescu 2935ab1e6de2SIoana Radulescu if (num_tc > dpaa2_eth_tc_count(priv)) { 2936ab1e6de2SIoana Radulescu netdev_err(net_dev, "Max %d traffic classes supported\n", 2937ab1e6de2SIoana Radulescu dpaa2_eth_tc_count(priv)); 2938b89c1e6bSJesper Dangaard Brouer return -EOPNOTSUPP; 2939ab1e6de2SIoana Radulescu } 2940ab1e6de2SIoana Radulescu 2941ab1e6de2SIoana Radulescu if (!num_tc) { 2942ab1e6de2SIoana Radulescu netdev_reset_tc(net_dev); 2943ab1e6de2SIoana Radulescu netif_set_real_num_tx_queues(net_dev, num_queues); 2944ab1e6de2SIoana Radulescu goto out; 2945ab1e6de2SIoana Radulescu } 2946ab1e6de2SIoana Radulescu 2947ab1e6de2SIoana Radulescu netdev_set_num_tc(net_dev, num_tc); 2948ab1e6de2SIoana Radulescu netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); 2949ab1e6de2SIoana Radulescu 2950ab1e6de2SIoana Radulescu for (i = 0; i < num_tc; i++) 2951ab1e6de2SIoana Radulescu netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); 2952ab1e6de2SIoana Radulescu 2953ab1e6de2SIoana Radulescu out: 2954ab1e6de2SIoana Radulescu update_xps(priv); 2955ab1e6de2SIoana Radulescu 2956ab1e6de2SIoana Radulescu return 0; 2957ab1e6de2SIoana Radulescu } 2958ab1e6de2SIoana Radulescu 29593657cdafSIoana Ciornei #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8) 29603657cdafSIoana Ciornei 29613657cdafSIoana Ciornei static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p) 29623657cdafSIoana Ciornei { 29633657cdafSIoana Ciornei struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params; 29643657cdafSIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 29653657cdafSIoana Ciornei struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 }; 29663657cdafSIoana Ciornei struct dpni_tx_shaping_cfg tx_er_shaper = { 0 }; 29673657cdafSIoana Ciornei int err; 29683657cdafSIoana Ciornei 29693657cdafSIoana Ciornei if (p->command == TC_TBF_STATS) 29703657cdafSIoana Ciornei return -EOPNOTSUPP; 29713657cdafSIoana Ciornei 29723657cdafSIoana Ciornei /* Only per port Tx shaping */ 29733657cdafSIoana Ciornei if (p->parent != TC_H_ROOT) 29743657cdafSIoana Ciornei return -EOPNOTSUPP; 29753657cdafSIoana Ciornei 29763657cdafSIoana Ciornei if (p->command == TC_TBF_REPLACE) { 29773657cdafSIoana Ciornei if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) { 29783657cdafSIoana Ciornei netdev_err(net_dev, "burst size cannot be greater than %d\n", 29793657cdafSIoana Ciornei DPAA2_ETH_MAX_BURST_SIZE); 29803657cdafSIoana Ciornei return -EINVAL; 29813657cdafSIoana Ciornei } 29823657cdafSIoana Ciornei 29833657cdafSIoana Ciornei tx_cr_shaper.max_burst_size = cfg->max_size; 29843657cdafSIoana Ciornei /* The TBF interface is in bytes/s, whereas DPAA2 expects the 29853657cdafSIoana Ciornei * rate in Mbits/s 29863657cdafSIoana Ciornei */ 29873657cdafSIoana Ciornei tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps); 29883657cdafSIoana Ciornei } 29893657cdafSIoana Ciornei 29903657cdafSIoana Ciornei err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper, 29913657cdafSIoana Ciornei &tx_er_shaper, 0); 29923657cdafSIoana Ciornei if (err) { 29933657cdafSIoana Ciornei netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err); 29943657cdafSIoana Ciornei return err; 29953657cdafSIoana Ciornei } 29963657cdafSIoana Ciornei 29973657cdafSIoana Ciornei return 0; 29983657cdafSIoana Ciornei } 29993657cdafSIoana Ciornei 3000e3ec13beSIoana Ciornei static int dpaa2_eth_setup_tc(struct net_device *net_dev, 3001e3ec13beSIoana Ciornei enum tc_setup_type type, void *type_data) 3002e3ec13beSIoana Ciornei { 3003e3ec13beSIoana Ciornei switch (type) { 3004e3ec13beSIoana Ciornei case TC_SETUP_QDISC_MQPRIO: 3005e3ec13beSIoana Ciornei return dpaa2_eth_setup_mqprio(net_dev, type_data); 30063657cdafSIoana Ciornei case TC_SETUP_QDISC_TBF: 30073657cdafSIoana Ciornei return dpaa2_eth_setup_tbf(net_dev, type_data); 3008e3ec13beSIoana Ciornei default: 3009e3ec13beSIoana Ciornei return -EOPNOTSUPP; 3010e3ec13beSIoana Ciornei } 3011e3ec13beSIoana Ciornei } 3012e3ec13beSIoana Ciornei 301334ff6846SIoana Radulescu static const struct net_device_ops dpaa2_eth_ops = { 301434ff6846SIoana Radulescu .ndo_open = dpaa2_eth_open, 301534ff6846SIoana Radulescu .ndo_start_xmit = dpaa2_eth_tx, 301634ff6846SIoana Radulescu .ndo_stop = dpaa2_eth_stop, 301734ff6846SIoana Radulescu .ndo_set_mac_address = dpaa2_eth_set_addr, 301834ff6846SIoana Radulescu .ndo_get_stats64 = dpaa2_eth_get_stats, 301934ff6846SIoana Radulescu .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 302034ff6846SIoana Radulescu .ndo_set_features = dpaa2_eth_set_features, 3021a7605370SArnd Bergmann .ndo_eth_ioctl = dpaa2_eth_ioctl, 30227e273a8eSIoana Ciocoi Radulescu .ndo_change_mtu = dpaa2_eth_change_mtu, 30237e273a8eSIoana Ciocoi Radulescu .ndo_bpf = dpaa2_eth_xdp, 3024d678be1dSIoana Radulescu .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, 302548276c08SRobert-Ionut Alexa .ndo_xsk_wakeup = dpaa2_xsk_wakeup, 3026ab1e6de2SIoana Radulescu .ndo_setup_tc = dpaa2_eth_setup_tc, 302770b32d82SIonut-robert Aron .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, 302870b32d82SIonut-robert Aron .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid 302934ff6846SIoana Radulescu }; 303034ff6846SIoana Radulescu 30315d8dccf8SIoana Ciornei static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) 303234ff6846SIoana Radulescu { 303334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 303434ff6846SIoana Radulescu 303534ff6846SIoana Radulescu ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 303634ff6846SIoana Radulescu 303734ff6846SIoana Radulescu /* Update NAPI statistics */ 303834ff6846SIoana Radulescu ch->stats.cdan++; 303934ff6846SIoana Radulescu 30404a7f6c5aSRobert-Ionut Alexa /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed 30414a7f6c5aSRobert-Ionut Alexa * so that it can be rescheduled again. 30424a7f6c5aSRobert-Ionut Alexa */ 30434a7f6c5aSRobert-Ionut Alexa if (!napi_if_scheduled_mark_missed(&ch->napi)) 30446c33ae1aSJiafei Pan napi_schedule(&ch->napi); 304534ff6846SIoana Radulescu } 304634ff6846SIoana Radulescu 304734ff6846SIoana Radulescu /* Allocate and configure a DPCON object */ 30485d8dccf8SIoana Ciornei static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) 304934ff6846SIoana Radulescu { 305034ff6846SIoana Radulescu struct fsl_mc_device *dpcon; 305134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 305234ff6846SIoana Radulescu int err; 305334ff6846SIoana Radulescu 305434ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 305534ff6846SIoana Radulescu FSL_MC_POOL_DPCON, &dpcon); 305634ff6846SIoana Radulescu if (err) { 305737fe9b98SSean Anderson if (err == -ENXIO) { 305837fe9b98SSean Anderson dev_dbg(dev, "Waiting for DPCON\n"); 3059d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 306037fe9b98SSean Anderson } else { 306134ff6846SIoana Radulescu dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 306237fe9b98SSean Anderson } 3063d7f5a9d8SIoana Ciornei return ERR_PTR(err); 306434ff6846SIoana Radulescu } 306534ff6846SIoana Radulescu 306634ff6846SIoana Radulescu err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 306734ff6846SIoana Radulescu if (err) { 306834ff6846SIoana Radulescu dev_err(dev, "dpcon_open() failed\n"); 306934ff6846SIoana Radulescu goto free; 307034ff6846SIoana Radulescu } 307134ff6846SIoana Radulescu 307234ff6846SIoana Radulescu err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 307334ff6846SIoana Radulescu if (err) { 307434ff6846SIoana Radulescu dev_err(dev, "dpcon_reset() failed\n"); 307534ff6846SIoana Radulescu goto close; 307634ff6846SIoana Radulescu } 307734ff6846SIoana Radulescu 307834ff6846SIoana Radulescu err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 307934ff6846SIoana Radulescu if (err) { 308034ff6846SIoana Radulescu dev_err(dev, "dpcon_enable() failed\n"); 308134ff6846SIoana Radulescu goto close; 308234ff6846SIoana Radulescu } 308334ff6846SIoana Radulescu 308434ff6846SIoana Radulescu return dpcon; 308534ff6846SIoana Radulescu 308634ff6846SIoana Radulescu close: 308734ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 308834ff6846SIoana Radulescu free: 308934ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 309034ff6846SIoana Radulescu 309102afa9c6SYueHaibing return ERR_PTR(err); 309234ff6846SIoana Radulescu } 309334ff6846SIoana Radulescu 30945d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv, 309534ff6846SIoana Radulescu struct fsl_mc_device *dpcon) 309634ff6846SIoana Radulescu { 309734ff6846SIoana Radulescu dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 309834ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 309934ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 310034ff6846SIoana Radulescu } 310134ff6846SIoana Radulescu 31025d8dccf8SIoana Ciornei static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv) 310334ff6846SIoana Radulescu { 310434ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 310534ff6846SIoana Radulescu struct dpcon_attr attr; 310634ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 310734ff6846SIoana Radulescu int err; 310834ff6846SIoana Radulescu 310934ff6846SIoana Radulescu channel = kzalloc(sizeof(*channel), GFP_KERNEL); 311034ff6846SIoana Radulescu if (!channel) 311134ff6846SIoana Radulescu return NULL; 311234ff6846SIoana Radulescu 31135d8dccf8SIoana Ciornei channel->dpcon = dpaa2_eth_setup_dpcon(priv); 311402afa9c6SYueHaibing if (IS_ERR(channel->dpcon)) { 311502afa9c6SYueHaibing err = PTR_ERR(channel->dpcon); 311634ff6846SIoana Radulescu goto err_setup; 3117d7f5a9d8SIoana Ciornei } 311834ff6846SIoana Radulescu 311934ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 312034ff6846SIoana Radulescu &attr); 312134ff6846SIoana Radulescu if (err) { 312234ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 312334ff6846SIoana Radulescu goto err_get_attr; 312434ff6846SIoana Radulescu } 312534ff6846SIoana Radulescu 312634ff6846SIoana Radulescu channel->dpcon_id = attr.id; 312734ff6846SIoana Radulescu channel->ch_id = attr.qbman_ch_id; 312834ff6846SIoana Radulescu channel->priv = priv; 312934ff6846SIoana Radulescu 313034ff6846SIoana Radulescu return channel; 313134ff6846SIoana Radulescu 313234ff6846SIoana Radulescu err_get_attr: 31335d8dccf8SIoana Ciornei dpaa2_eth_free_dpcon(priv, channel->dpcon); 313434ff6846SIoana Radulescu err_setup: 313534ff6846SIoana Radulescu kfree(channel); 3136d7f5a9d8SIoana Ciornei return ERR_PTR(err); 313734ff6846SIoana Radulescu } 313834ff6846SIoana Radulescu 31395d8dccf8SIoana Ciornei static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv, 314034ff6846SIoana Radulescu struct dpaa2_eth_channel *channel) 314134ff6846SIoana Radulescu { 31425d8dccf8SIoana Ciornei dpaa2_eth_free_dpcon(priv, channel->dpcon); 314334ff6846SIoana Radulescu kfree(channel); 314434ff6846SIoana Radulescu } 314534ff6846SIoana Radulescu 314634ff6846SIoana Radulescu /* DPIO setup: allocate and configure QBMan channels, setup core affinity 314734ff6846SIoana Radulescu * and register data availability notifications 314834ff6846SIoana Radulescu */ 31495d8dccf8SIoana Ciornei static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) 315034ff6846SIoana Radulescu { 315134ff6846SIoana Radulescu struct dpaa2_io_notification_ctx *nctx; 315234ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 315334ff6846SIoana Radulescu struct dpcon_notification_cfg dpcon_notif_cfg; 315434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 315534ff6846SIoana Radulescu int i, err; 315634ff6846SIoana Radulescu 315734ff6846SIoana Radulescu /* We want the ability to spread ingress traffic (RX, TX conf) to as 315834ff6846SIoana Radulescu * many cores as possible, so we need one channel for each core 315934ff6846SIoana Radulescu * (unless there's fewer queues than cores, in which case the extra 316034ff6846SIoana Radulescu * channels would be wasted). 316134ff6846SIoana Radulescu * Allocate one channel per core and register it to the core's 316234ff6846SIoana Radulescu * affine DPIO. If not enough channels are available for all cores 316334ff6846SIoana Radulescu * or if some cores don't have an affine DPIO, there will be no 316434ff6846SIoana Radulescu * ingress frame processing on those cores. 316534ff6846SIoana Radulescu */ 316634ff6846SIoana Radulescu cpumask_clear(&priv->dpio_cpumask); 316734ff6846SIoana Radulescu for_each_online_cpu(i) { 316834ff6846SIoana Radulescu /* Try to allocate a channel */ 31695d8dccf8SIoana Ciornei channel = dpaa2_eth_alloc_channel(priv); 3170d7f5a9d8SIoana Ciornei if (IS_ERR_OR_NULL(channel)) { 3171bd8460faSIoana Radulescu err = PTR_ERR_OR_ZERO(channel); 317237fe9b98SSean Anderson if (err == -EPROBE_DEFER) 317337fe9b98SSean Anderson dev_dbg(dev, "waiting for affine channel\n"); 317437fe9b98SSean Anderson else 317534ff6846SIoana Radulescu dev_info(dev, 317634ff6846SIoana Radulescu "No affine channel for cpu %d and above\n", i); 317734ff6846SIoana Radulescu goto err_alloc_ch; 317834ff6846SIoana Radulescu } 317934ff6846SIoana Radulescu 318034ff6846SIoana Radulescu priv->channel[priv->num_channels] = channel; 318134ff6846SIoana Radulescu 318234ff6846SIoana Radulescu nctx = &channel->nctx; 318334ff6846SIoana Radulescu nctx->is_cdan = 1; 31845d8dccf8SIoana Ciornei nctx->cb = dpaa2_eth_cdan_cb; 318534ff6846SIoana Radulescu nctx->id = channel->ch_id; 318634ff6846SIoana Radulescu nctx->desired_cpu = i; 318734ff6846SIoana Radulescu 318834ff6846SIoana Radulescu /* Register the new context */ 318934ff6846SIoana Radulescu channel->dpio = dpaa2_io_service_select(i); 319047441f7fSIoana Ciornei err = dpaa2_io_service_register(channel->dpio, nctx, dev); 319134ff6846SIoana Radulescu if (err) { 319234ff6846SIoana Radulescu dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 319334ff6846SIoana Radulescu /* If no affine DPIO for this core, there's probably 319434ff6846SIoana Radulescu * none available for next cores either. Signal we want 319534ff6846SIoana Radulescu * to retry later, in case the DPIO devices weren't 319634ff6846SIoana Radulescu * probed yet. 319734ff6846SIoana Radulescu */ 319834ff6846SIoana Radulescu err = -EPROBE_DEFER; 319934ff6846SIoana Radulescu goto err_service_reg; 320034ff6846SIoana Radulescu } 320134ff6846SIoana Radulescu 320234ff6846SIoana Radulescu /* Register DPCON notification with MC */ 320334ff6846SIoana Radulescu dpcon_notif_cfg.dpio_id = nctx->dpio_id; 320434ff6846SIoana Radulescu dpcon_notif_cfg.priority = 0; 320534ff6846SIoana Radulescu dpcon_notif_cfg.user_ctx = nctx->qman64; 320634ff6846SIoana Radulescu err = dpcon_set_notification(priv->mc_io, 0, 320734ff6846SIoana Radulescu channel->dpcon->mc_handle, 320834ff6846SIoana Radulescu &dpcon_notif_cfg); 320934ff6846SIoana Radulescu if (err) { 321034ff6846SIoana Radulescu dev_err(dev, "dpcon_set_notification failed()\n"); 321134ff6846SIoana Radulescu goto err_set_cdan; 321234ff6846SIoana Radulescu } 321334ff6846SIoana Radulescu 321434ff6846SIoana Radulescu /* If we managed to allocate a channel and also found an affine 321534ff6846SIoana Radulescu * DPIO for this core, add it to the final mask 321634ff6846SIoana Radulescu */ 321734ff6846SIoana Radulescu cpumask_set_cpu(i, &priv->dpio_cpumask); 321834ff6846SIoana Radulescu priv->num_channels++; 321934ff6846SIoana Radulescu 322034ff6846SIoana Radulescu /* Stop if we already have enough channels to accommodate all 322134ff6846SIoana Radulescu * RX and TX conf queues 322234ff6846SIoana Radulescu */ 3223b0e4f37bSIoana Ciocoi Radulescu if (priv->num_channels == priv->dpni_attrs.num_queues) 322434ff6846SIoana Radulescu break; 322534ff6846SIoana Radulescu } 322634ff6846SIoana Radulescu 322734ff6846SIoana Radulescu return 0; 322834ff6846SIoana Radulescu 322934ff6846SIoana Radulescu err_set_cdan: 323047441f7fSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 323134ff6846SIoana Radulescu err_service_reg: 32325d8dccf8SIoana Ciornei dpaa2_eth_free_channel(priv, channel); 323334ff6846SIoana Radulescu err_alloc_ch: 32345aa4277dSIoana Ciornei if (err == -EPROBE_DEFER) { 32355aa4277dSIoana Ciornei for (i = 0; i < priv->num_channels; i++) { 32365aa4277dSIoana Ciornei channel = priv->channel[i]; 32375aa4277dSIoana Ciornei nctx = &channel->nctx; 32385aa4277dSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 32395d8dccf8SIoana Ciornei dpaa2_eth_free_channel(priv, channel); 32405aa4277dSIoana Ciornei } 32415aa4277dSIoana Ciornei priv->num_channels = 0; 3242d7f5a9d8SIoana Ciornei return err; 32435aa4277dSIoana Ciornei } 3244d7f5a9d8SIoana Ciornei 324534ff6846SIoana Radulescu if (cpumask_empty(&priv->dpio_cpumask)) { 324634ff6846SIoana Radulescu dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 3247d7f5a9d8SIoana Ciornei return -ENODEV; 324834ff6846SIoana Radulescu } 324934ff6846SIoana Radulescu 325034ff6846SIoana Radulescu dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 325134ff6846SIoana Radulescu cpumask_pr_args(&priv->dpio_cpumask)); 325234ff6846SIoana Radulescu 325334ff6846SIoana Radulescu return 0; 325434ff6846SIoana Radulescu } 325534ff6846SIoana Radulescu 32565d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv) 325734ff6846SIoana Radulescu { 325847441f7fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 325934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 326047441f7fSIoana Ciornei int i; 326134ff6846SIoana Radulescu 326234ff6846SIoana Radulescu /* deregister CDAN notifications and free channels */ 326334ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 326434ff6846SIoana Radulescu ch = priv->channel[i]; 326547441f7fSIoana Ciornei dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); 32665d8dccf8SIoana Ciornei dpaa2_eth_free_channel(priv, ch); 326734ff6846SIoana Radulescu } 326834ff6846SIoana Radulescu } 326934ff6846SIoana Radulescu 32705d8dccf8SIoana Ciornei static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv, 327134ff6846SIoana Radulescu int cpu) 327234ff6846SIoana Radulescu { 327334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 327434ff6846SIoana Radulescu int i; 327534ff6846SIoana Radulescu 327634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 327734ff6846SIoana Radulescu if (priv->channel[i]->nctx.desired_cpu == cpu) 327834ff6846SIoana Radulescu return priv->channel[i]; 327934ff6846SIoana Radulescu 328034ff6846SIoana Radulescu /* We should never get here. Issue a warning and return 328134ff6846SIoana Radulescu * the first channel, because it's still better than nothing 328234ff6846SIoana Radulescu */ 328334ff6846SIoana Radulescu dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 328434ff6846SIoana Radulescu 328534ff6846SIoana Radulescu return priv->channel[0]; 328634ff6846SIoana Radulescu } 328734ff6846SIoana Radulescu 32885d8dccf8SIoana Ciornei static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv) 328934ff6846SIoana Radulescu { 329034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 329134ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 329234ff6846SIoana Radulescu int rx_cpu, txc_cpu; 329306d5b179SIoana Radulescu int i; 329434ff6846SIoana Radulescu 329534ff6846SIoana Radulescu /* For each FQ, pick one channel/CPU to deliver frames to. 329634ff6846SIoana Radulescu * This may well change at runtime, either through irqbalance or 329734ff6846SIoana Radulescu * through direct user intervention. 329834ff6846SIoana Radulescu */ 329934ff6846SIoana Radulescu rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 330034ff6846SIoana Radulescu 330134ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 330234ff6846SIoana Radulescu fq = &priv->fq[i]; 330334ff6846SIoana Radulescu switch (fq->type) { 330434ff6846SIoana Radulescu case DPAA2_RX_FQ: 3305061d631fSIoana Ciornei case DPAA2_RX_ERR_FQ: 330634ff6846SIoana Radulescu fq->target_cpu = rx_cpu; 330734ff6846SIoana Radulescu rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 330834ff6846SIoana Radulescu if (rx_cpu >= nr_cpu_ids) 330934ff6846SIoana Radulescu rx_cpu = cpumask_first(&priv->dpio_cpumask); 331034ff6846SIoana Radulescu break; 331134ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 331234ff6846SIoana Radulescu fq->target_cpu = txc_cpu; 331334ff6846SIoana Radulescu txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 331434ff6846SIoana Radulescu if (txc_cpu >= nr_cpu_ids) 331534ff6846SIoana Radulescu txc_cpu = cpumask_first(&priv->dpio_cpumask); 331634ff6846SIoana Radulescu break; 331734ff6846SIoana Radulescu default: 331834ff6846SIoana Radulescu dev_err(dev, "Unknown FQ type: %d\n", fq->type); 331934ff6846SIoana Radulescu } 33205d8dccf8SIoana Ciornei fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu); 332134ff6846SIoana Radulescu } 332206d5b179SIoana Radulescu 332306d5b179SIoana Radulescu update_xps(priv); 332434ff6846SIoana Radulescu } 332534ff6846SIoana Radulescu 33265d8dccf8SIoana Ciornei static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) 332734ff6846SIoana Radulescu { 3328685e39eaSIoana Radulescu int i, j; 332934ff6846SIoana Radulescu 333034ff6846SIoana Radulescu /* We have one TxConf FQ per Tx flow. 333134ff6846SIoana Radulescu * The number of Tx and Rx queues is the same. 333234ff6846SIoana Radulescu * Tx queues come first in the fq array. 333334ff6846SIoana Radulescu */ 333434ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 333534ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 333634ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 333734ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 333834ff6846SIoana Radulescu } 333934ff6846SIoana Radulescu 3340685e39eaSIoana Radulescu for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 334134ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 334234ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 334334ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 3344685e39eaSIoana Radulescu priv->fq[priv->num_fqs].tc = (u8)j; 334534ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 334634ff6846SIoana Radulescu } 3347685e39eaSIoana Radulescu } 334834ff6846SIoana Radulescu 3349061d631fSIoana Ciornei /* We have exactly one Rx error queue per DPNI */ 3350061d631fSIoana Ciornei priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; 3351061d631fSIoana Ciornei priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; 3352061d631fSIoana Ciornei 335334ff6846SIoana Radulescu /* For each FQ, decide on which core to process incoming frames */ 33545d8dccf8SIoana Ciornei dpaa2_eth_set_fq_affinity(priv); 335534ff6846SIoana Radulescu } 335634ff6846SIoana Radulescu 3357095174daSRobert-Ionut Alexa /* Allocate and configure a buffer pool */ 3358095174daSRobert-Ionut Alexa struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv) 335934ff6846SIoana Radulescu { 336034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3361095174daSRobert-Ionut Alexa struct fsl_mc_device *dpbp_dev; 336234ff6846SIoana Radulescu struct dpbp_attr dpbp_attrs; 3363095174daSRobert-Ionut Alexa struct dpaa2_eth_bp *bp; 3364095174daSRobert-Ionut Alexa int err; 336534ff6846SIoana Radulescu 336634ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 336734ff6846SIoana Radulescu &dpbp_dev); 336834ff6846SIoana Radulescu if (err) { 3369d7f5a9d8SIoana Ciornei if (err == -ENXIO) 3370d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 3371d7f5a9d8SIoana Ciornei else 337234ff6846SIoana Radulescu dev_err(dev, "DPBP device allocation failed\n"); 3373095174daSRobert-Ionut Alexa return ERR_PTR(err); 337434ff6846SIoana Radulescu } 337534ff6846SIoana Radulescu 3376095174daSRobert-Ionut Alexa bp = kzalloc(sizeof(*bp), GFP_KERNEL); 3377095174daSRobert-Ionut Alexa if (!bp) { 3378095174daSRobert-Ionut Alexa err = -ENOMEM; 3379095174daSRobert-Ionut Alexa goto err_alloc; 3380095174daSRobert-Ionut Alexa } 338134ff6846SIoana Radulescu 3382095174daSRobert-Ionut Alexa err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id, 338334ff6846SIoana Radulescu &dpbp_dev->mc_handle); 338434ff6846SIoana Radulescu if (err) { 338534ff6846SIoana Radulescu dev_err(dev, "dpbp_open() failed\n"); 338634ff6846SIoana Radulescu goto err_open; 338734ff6846SIoana Radulescu } 338834ff6846SIoana Radulescu 338934ff6846SIoana Radulescu err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 339034ff6846SIoana Radulescu if (err) { 339134ff6846SIoana Radulescu dev_err(dev, "dpbp_reset() failed\n"); 339234ff6846SIoana Radulescu goto err_reset; 339334ff6846SIoana Radulescu } 339434ff6846SIoana Radulescu 339534ff6846SIoana Radulescu err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 339634ff6846SIoana Radulescu if (err) { 339734ff6846SIoana Radulescu dev_err(dev, "dpbp_enable() failed\n"); 339834ff6846SIoana Radulescu goto err_enable; 339934ff6846SIoana Radulescu } 340034ff6846SIoana Radulescu 340134ff6846SIoana Radulescu err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 340234ff6846SIoana Radulescu &dpbp_attrs); 340334ff6846SIoana Radulescu if (err) { 340434ff6846SIoana Radulescu dev_err(dev, "dpbp_get_attributes() failed\n"); 340534ff6846SIoana Radulescu goto err_get_attr; 340634ff6846SIoana Radulescu } 340734ff6846SIoana Radulescu 3408095174daSRobert-Ionut Alexa bp->dev = dpbp_dev; 3409095174daSRobert-Ionut Alexa bp->bpid = dpbp_attrs.bpid; 3410095174daSRobert-Ionut Alexa 3411095174daSRobert-Ionut Alexa return bp; 341234ff6846SIoana Radulescu 341334ff6846SIoana Radulescu err_get_attr: 341434ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 341534ff6846SIoana Radulescu err_enable: 341634ff6846SIoana Radulescu err_reset: 341734ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 341834ff6846SIoana Radulescu err_open: 3419095174daSRobert-Ionut Alexa kfree(bp); 3420095174daSRobert-Ionut Alexa err_alloc: 342134ff6846SIoana Radulescu fsl_mc_object_free(dpbp_dev); 342234ff6846SIoana Radulescu 3423095174daSRobert-Ionut Alexa return ERR_PTR(err); 342434ff6846SIoana Radulescu } 342534ff6846SIoana Radulescu 3426095174daSRobert-Ionut Alexa static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv) 342734ff6846SIoana Radulescu { 3428095174daSRobert-Ionut Alexa struct dpaa2_eth_bp *bp; 3429095174daSRobert-Ionut Alexa int i; 3430095174daSRobert-Ionut Alexa 3431095174daSRobert-Ionut Alexa bp = dpaa2_eth_allocate_dpbp(priv); 3432095174daSRobert-Ionut Alexa if (IS_ERR(bp)) 3433095174daSRobert-Ionut Alexa return PTR_ERR(bp); 3434095174daSRobert-Ionut Alexa 3435095174daSRobert-Ionut Alexa priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp; 3436095174daSRobert-Ionut Alexa priv->num_bps++; 3437095174daSRobert-Ionut Alexa 3438095174daSRobert-Ionut Alexa for (i = 0; i < priv->num_channels; i++) 3439095174daSRobert-Ionut Alexa priv->channel[i]->bp = bp; 3440095174daSRobert-Ionut Alexa 3441095174daSRobert-Ionut Alexa return 0; 3442095174daSRobert-Ionut Alexa } 3443095174daSRobert-Ionut Alexa 3444095174daSRobert-Ionut Alexa void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp) 3445095174daSRobert-Ionut Alexa { 3446095174daSRobert-Ionut Alexa int idx_bp; 3447095174daSRobert-Ionut Alexa 3448095174daSRobert-Ionut Alexa /* Find the index at which this BP is stored */ 3449095174daSRobert-Ionut Alexa for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++) 3450095174daSRobert-Ionut Alexa if (priv->bp[idx_bp] == bp) 3451095174daSRobert-Ionut Alexa break; 3452095174daSRobert-Ionut Alexa 3453095174daSRobert-Ionut Alexa /* Drain the pool and disable the associated MC object */ 3454095174daSRobert-Ionut Alexa dpaa2_eth_drain_pool(priv, bp->bpid); 3455095174daSRobert-Ionut Alexa dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle); 3456095174daSRobert-Ionut Alexa dpbp_close(priv->mc_io, 0, bp->dev->mc_handle); 3457095174daSRobert-Ionut Alexa fsl_mc_object_free(bp->dev); 3458095174daSRobert-Ionut Alexa kfree(bp); 3459095174daSRobert-Ionut Alexa 3460095174daSRobert-Ionut Alexa /* Move the last in use DPBP over in this position */ 3461095174daSRobert-Ionut Alexa priv->bp[idx_bp] = priv->bp[priv->num_bps - 1]; 3462095174daSRobert-Ionut Alexa priv->num_bps--; 3463095174daSRobert-Ionut Alexa } 3464095174daSRobert-Ionut Alexa 3465095174daSRobert-Ionut Alexa static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv) 3466095174daSRobert-Ionut Alexa { 3467095174daSRobert-Ionut Alexa int i; 3468095174daSRobert-Ionut Alexa 3469095174daSRobert-Ionut Alexa for (i = 0; i < priv->num_bps; i++) 3470095174daSRobert-Ionut Alexa dpaa2_eth_free_dpbp(priv, priv->bp[i]); 347134ff6846SIoana Radulescu } 347234ff6846SIoana Radulescu 34735d8dccf8SIoana Ciornei static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) 347434ff6846SIoana Radulescu { 347534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 347634ff6846SIoana Radulescu struct dpni_buffer_layout buf_layout = {0}; 347727c87486SIoana Ciocoi Radulescu u16 rx_buf_align; 347834ff6846SIoana Radulescu int err; 347934ff6846SIoana Radulescu 348034ff6846SIoana Radulescu /* We need to check for WRIOP version 1.0.0, but depending on the MC 348134ff6846SIoana Radulescu * version, this number is not always provided correctly on rev1. 348234ff6846SIoana Radulescu * We need to check for both alternatives in this situation. 348334ff6846SIoana Radulescu */ 348434ff6846SIoana Radulescu if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 348534ff6846SIoana Radulescu priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 348627c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 348734ff6846SIoana Radulescu else 348827c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 348934ff6846SIoana Radulescu 3490efa6a7d0SIoana Ciornei /* We need to ensure that the buffer size seen by WRIOP is a multiple 3491efa6a7d0SIoana Ciornei * of 64 or 256 bytes depending on the WRIOP version. 3492efa6a7d0SIoana Ciornei */ 3493efa6a7d0SIoana Ciornei priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); 3494efa6a7d0SIoana Ciornei 349534ff6846SIoana Radulescu /* tx buffer */ 349634ff6846SIoana Radulescu buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 349734ff6846SIoana Radulescu buf_layout.pass_timestamp = true; 3498c5521189SYangbo Lu buf_layout.pass_frame_status = true; 349934ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 3500c5521189SYangbo Lu DPNI_BUF_LAYOUT_OPT_TIMESTAMP | 3501c5521189SYangbo Lu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 350234ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 350334ff6846SIoana Radulescu DPNI_QUEUE_TX, &buf_layout); 350434ff6846SIoana Radulescu if (err) { 350534ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 350634ff6846SIoana Radulescu return err; 350734ff6846SIoana Radulescu } 350834ff6846SIoana Radulescu 350934ff6846SIoana Radulescu /* tx-confirm buffer */ 3510c5521189SYangbo Lu buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP | 3511c5521189SYangbo Lu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 351234ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 351334ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, &buf_layout); 351434ff6846SIoana Radulescu if (err) { 351534ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 351634ff6846SIoana Radulescu return err; 351734ff6846SIoana Radulescu } 351834ff6846SIoana Radulescu 351934ff6846SIoana Radulescu /* Now that we've set our tx buffer layout, retrieve the minimum 352034ff6846SIoana Radulescu * required tx data offset. 352134ff6846SIoana Radulescu */ 352234ff6846SIoana Radulescu err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 352334ff6846SIoana Radulescu &priv->tx_data_offset); 352434ff6846SIoana Radulescu if (err) { 352534ff6846SIoana Radulescu dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 352634ff6846SIoana Radulescu return err; 352734ff6846SIoana Radulescu } 352834ff6846SIoana Radulescu 352934ff6846SIoana Radulescu if ((priv->tx_data_offset % 64) != 0) 353034ff6846SIoana Radulescu dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 353134ff6846SIoana Radulescu priv->tx_data_offset); 353234ff6846SIoana Radulescu 353334ff6846SIoana Radulescu /* rx buffer */ 353434ff6846SIoana Radulescu buf_layout.pass_frame_status = true; 353534ff6846SIoana Radulescu buf_layout.pass_parser_result = true; 353627c87486SIoana Ciocoi Radulescu buf_layout.data_align = rx_buf_align; 353734ff6846SIoana Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 353834ff6846SIoana Radulescu buf_layout.private_data_size = 0; 353934ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 354034ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 354134ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 354234ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 354334ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 354434ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 354534ff6846SIoana Radulescu DPNI_QUEUE_RX, &buf_layout); 354634ff6846SIoana Radulescu if (err) { 354734ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 354834ff6846SIoana Radulescu return err; 354934ff6846SIoana Radulescu } 355034ff6846SIoana Radulescu 355134ff6846SIoana Radulescu return 0; 355234ff6846SIoana Radulescu } 355334ff6846SIoana Radulescu 35541fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 35551fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MINOR 9 35561fa0f68cSIoana Ciocoi Radulescu 35571fa0f68cSIoana Ciocoi Radulescu static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, 35581fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 355948c0481eSIoana Ciornei struct dpaa2_fd *fd, u8 prio, 35606ff80447SIoana Ciornei u32 num_frames __always_unused, 356148c0481eSIoana Ciornei int *frames_enqueued) 35621fa0f68cSIoana Ciocoi Radulescu { 356348c0481eSIoana Ciornei int err; 356448c0481eSIoana Ciornei 356548c0481eSIoana Ciornei err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 35661fa0f68cSIoana Ciocoi Radulescu priv->tx_qdid, prio, 35671fa0f68cSIoana Ciocoi Radulescu fq->tx_qdbin, fd); 356848c0481eSIoana Ciornei if (!err && frames_enqueued) 356948c0481eSIoana Ciornei *frames_enqueued = 1; 357048c0481eSIoana Ciornei return err; 35711fa0f68cSIoana Ciocoi Radulescu } 35721fa0f68cSIoana Ciocoi Radulescu 35736ff80447SIoana Ciornei static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv, 35741fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 35756ff80447SIoana Ciornei struct dpaa2_fd *fd, 35766ff80447SIoana Ciornei u8 prio, u32 num_frames, 357748c0481eSIoana Ciornei int *frames_enqueued) 35781fa0f68cSIoana Ciocoi Radulescu { 357948c0481eSIoana Ciornei int err; 358048c0481eSIoana Ciornei 35816ff80447SIoana Ciornei err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio, 35826ff80447SIoana Ciornei fq->tx_fqid[prio], 35836ff80447SIoana Ciornei fd, num_frames); 35846ff80447SIoana Ciornei 35856ff80447SIoana Ciornei if (err == 0) 35866ff80447SIoana Ciornei return -EBUSY; 35876ff80447SIoana Ciornei 35886ff80447SIoana Ciornei if (frames_enqueued) 35896ff80447SIoana Ciornei *frames_enqueued = err; 35906ff80447SIoana Ciornei return 0; 35911fa0f68cSIoana Ciocoi Radulescu } 35921fa0f68cSIoana Ciocoi Radulescu 35935d8dccf8SIoana Ciornei static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv) 35941fa0f68cSIoana Ciocoi Radulescu { 35951fa0f68cSIoana Ciocoi Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 35961fa0f68cSIoana Ciocoi Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 35971fa0f68cSIoana Ciocoi Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 35981fa0f68cSIoana Ciocoi Radulescu else 35996ff80447SIoana Ciornei priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 36001fa0f68cSIoana Ciocoi Radulescu } 36011fa0f68cSIoana Ciocoi Radulescu 36025d8dccf8SIoana Ciornei static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv) 36038eb3cef8SIoana Radulescu { 36048eb3cef8SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 36058eb3cef8SIoana Radulescu struct dpni_link_cfg link_cfg = {0}; 36068eb3cef8SIoana Radulescu int err; 36078eb3cef8SIoana Radulescu 36088eb3cef8SIoana Radulescu /* Get the default link options so we don't override other flags */ 36098eb3cef8SIoana Radulescu err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 36108eb3cef8SIoana Radulescu if (err) { 36118eb3cef8SIoana Radulescu dev_err(dev, "dpni_get_link_cfg() failed\n"); 36128eb3cef8SIoana Radulescu return err; 36138eb3cef8SIoana Radulescu } 36148eb3cef8SIoana Radulescu 36158eb3cef8SIoana Radulescu /* By default, enable both Rx and Tx pause frames */ 36168eb3cef8SIoana Radulescu link_cfg.options |= DPNI_LINK_OPT_PAUSE; 36178eb3cef8SIoana Radulescu link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 36188eb3cef8SIoana Radulescu err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 36198eb3cef8SIoana Radulescu if (err) { 36208eb3cef8SIoana Radulescu dev_err(dev, "dpni_set_link_cfg() failed\n"); 36218eb3cef8SIoana Radulescu return err; 36228eb3cef8SIoana Radulescu } 36238eb3cef8SIoana Radulescu 36248eb3cef8SIoana Radulescu priv->link_state.options = link_cfg.options; 36258eb3cef8SIoana Radulescu 36268eb3cef8SIoana Radulescu return 0; 36278eb3cef8SIoana Radulescu } 36288eb3cef8SIoana Radulescu 36295d8dccf8SIoana Ciornei static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv) 3630a690af4fSIoana Radulescu { 3631a690af4fSIoana Radulescu struct dpni_queue_id qid = {0}; 3632a690af4fSIoana Radulescu struct dpaa2_eth_fq *fq; 3633a690af4fSIoana Radulescu struct dpni_queue queue; 3634a690af4fSIoana Radulescu int i, j, err; 3635a690af4fSIoana Radulescu 3636a690af4fSIoana Radulescu /* We only use Tx FQIDs for FQID-based enqueue, so check 3637a690af4fSIoana Radulescu * if DPNI version supports it before updating FQIDs 3638a690af4fSIoana Radulescu */ 3639a690af4fSIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 3640a690af4fSIoana Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 3641a690af4fSIoana Radulescu return; 3642a690af4fSIoana Radulescu 3643a690af4fSIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 3644a690af4fSIoana Radulescu fq = &priv->fq[i]; 3645a690af4fSIoana Radulescu if (fq->type != DPAA2_TX_CONF_FQ) 3646a690af4fSIoana Radulescu continue; 3647a690af4fSIoana Radulescu for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 3648a690af4fSIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3649a690af4fSIoana Radulescu DPNI_QUEUE_TX, j, fq->flowid, 3650a690af4fSIoana Radulescu &queue, &qid); 3651a690af4fSIoana Radulescu if (err) 3652a690af4fSIoana Radulescu goto out_err; 3653a690af4fSIoana Radulescu 3654a690af4fSIoana Radulescu fq->tx_fqid[j] = qid.fqid; 3655a690af4fSIoana Radulescu if (fq->tx_fqid[j] == 0) 3656a690af4fSIoana Radulescu goto out_err; 3657a690af4fSIoana Radulescu } 3658a690af4fSIoana Radulescu } 3659a690af4fSIoana Radulescu 36606ff80447SIoana Ciornei priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 3661a690af4fSIoana Radulescu 3662a690af4fSIoana Radulescu return; 3663a690af4fSIoana Radulescu 3664a690af4fSIoana Radulescu out_err: 3665a690af4fSIoana Radulescu netdev_info(priv->net_dev, 3666a690af4fSIoana Radulescu "Error reading Tx FQID, fallback to QDID-based enqueue\n"); 3667a690af4fSIoana Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 3668a690af4fSIoana Radulescu } 3669a690af4fSIoana Radulescu 36706aa90fe2SIoana Radulescu /* Configure ingress classification based on VLAN PCP */ 36715d8dccf8SIoana Ciornei static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv) 36726aa90fe2SIoana Radulescu { 36736aa90fe2SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 36746aa90fe2SIoana Radulescu struct dpkg_profile_cfg kg_cfg = {0}; 36756aa90fe2SIoana Radulescu struct dpni_qos_tbl_cfg qos_cfg = {0}; 36766aa90fe2SIoana Radulescu struct dpni_rule_cfg key_params; 36776aa90fe2SIoana Radulescu void *dma_mem, *key, *mask; 36786aa90fe2SIoana Radulescu u8 key_size = 2; /* VLAN TCI field */ 36796aa90fe2SIoana Radulescu int i, pcp, err; 36806aa90fe2SIoana Radulescu 36816aa90fe2SIoana Radulescu /* VLAN-based classification only makes sense if we have multiple 36826aa90fe2SIoana Radulescu * traffic classes. 36836aa90fe2SIoana Radulescu * Also, we need to extract just the 3-bit PCP field from the VLAN 36846aa90fe2SIoana Radulescu * header and we can only do that by using a mask 36856aa90fe2SIoana Radulescu */ 36866aa90fe2SIoana Radulescu if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) { 36876aa90fe2SIoana Radulescu dev_dbg(dev, "VLAN-based QoS classification not supported\n"); 36886aa90fe2SIoana Radulescu return -EOPNOTSUPP; 36896aa90fe2SIoana Radulescu } 36906aa90fe2SIoana Radulescu 36916aa90fe2SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 36926aa90fe2SIoana Radulescu if (!dma_mem) 36936aa90fe2SIoana Radulescu return -ENOMEM; 36946aa90fe2SIoana Radulescu 36956aa90fe2SIoana Radulescu kg_cfg.num_extracts = 1; 36966aa90fe2SIoana Radulescu kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 36976aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; 36986aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 36996aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; 37006aa90fe2SIoana Radulescu 37016aa90fe2SIoana Radulescu err = dpni_prepare_key_cfg(&kg_cfg, dma_mem); 37026aa90fe2SIoana Radulescu if (err) { 37036aa90fe2SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg failed\n"); 37046aa90fe2SIoana Radulescu goto out_free_tbl; 37056aa90fe2SIoana Radulescu } 37066aa90fe2SIoana Radulescu 37076aa90fe2SIoana Radulescu /* set QoS table */ 37086aa90fe2SIoana Radulescu qos_cfg.default_tc = 0; 37096aa90fe2SIoana Radulescu qos_cfg.discard_on_miss = 0; 37106aa90fe2SIoana Radulescu qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, 37116aa90fe2SIoana Radulescu DPAA2_CLASSIFIER_DMA_SIZE, 37126aa90fe2SIoana Radulescu DMA_TO_DEVICE); 37136aa90fe2SIoana Radulescu if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { 37146aa90fe2SIoana Radulescu dev_err(dev, "QoS table DMA mapping failed\n"); 37156aa90fe2SIoana Radulescu err = -ENOMEM; 37166aa90fe2SIoana Radulescu goto out_free_tbl; 37176aa90fe2SIoana Radulescu } 37186aa90fe2SIoana Radulescu 37196aa90fe2SIoana Radulescu err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); 37206aa90fe2SIoana Radulescu if (err) { 37216aa90fe2SIoana Radulescu dev_err(dev, "dpni_set_qos_table failed\n"); 37226aa90fe2SIoana Radulescu goto out_unmap_tbl; 37236aa90fe2SIoana Radulescu } 37246aa90fe2SIoana Radulescu 37256aa90fe2SIoana Radulescu /* Add QoS table entries */ 37266aa90fe2SIoana Radulescu key = kzalloc(key_size * 2, GFP_KERNEL); 37276aa90fe2SIoana Radulescu if (!key) { 37286aa90fe2SIoana Radulescu err = -ENOMEM; 37296aa90fe2SIoana Radulescu goto out_unmap_tbl; 37306aa90fe2SIoana Radulescu } 37316aa90fe2SIoana Radulescu mask = key + key_size; 37326aa90fe2SIoana Radulescu *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK); 37336aa90fe2SIoana Radulescu 37346aa90fe2SIoana Radulescu key_params.key_iova = dma_map_single(dev, key, key_size * 2, 37356aa90fe2SIoana Radulescu DMA_TO_DEVICE); 37366aa90fe2SIoana Radulescu if (dma_mapping_error(dev, key_params.key_iova)) { 37376aa90fe2SIoana Radulescu dev_err(dev, "Qos table entry DMA mapping failed\n"); 37386aa90fe2SIoana Radulescu err = -ENOMEM; 37396aa90fe2SIoana Radulescu goto out_free_key; 37406aa90fe2SIoana Radulescu } 37416aa90fe2SIoana Radulescu 37426aa90fe2SIoana Radulescu key_params.mask_iova = key_params.key_iova + key_size; 37436aa90fe2SIoana Radulescu key_params.key_size = key_size; 37446aa90fe2SIoana Radulescu 37456aa90fe2SIoana Radulescu /* We add rules for PCP-based distribution starting with highest 37466aa90fe2SIoana Radulescu * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic 37476aa90fe2SIoana Radulescu * classes to accommodate all priority levels, the lowest ones end up 37486aa90fe2SIoana Radulescu * on TC 0 which was configured as default 37496aa90fe2SIoana Radulescu */ 37506aa90fe2SIoana Radulescu for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { 37516aa90fe2SIoana Radulescu *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT); 37526aa90fe2SIoana Radulescu dma_sync_single_for_device(dev, key_params.key_iova, 37536aa90fe2SIoana Radulescu key_size * 2, DMA_TO_DEVICE); 37546aa90fe2SIoana Radulescu 37556aa90fe2SIoana Radulescu err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, 37566aa90fe2SIoana Radulescu &key_params, i, i); 37576aa90fe2SIoana Radulescu if (err) { 37586aa90fe2SIoana Radulescu dev_err(dev, "dpni_add_qos_entry failed\n"); 37596aa90fe2SIoana Radulescu dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token); 37606aa90fe2SIoana Radulescu goto out_unmap_key; 37616aa90fe2SIoana Radulescu } 37626aa90fe2SIoana Radulescu } 37636aa90fe2SIoana Radulescu 37646aa90fe2SIoana Radulescu priv->vlan_cls_enabled = true; 37656aa90fe2SIoana Radulescu 37666aa90fe2SIoana Radulescu /* Table and key memory is not persistent, clean everything up after 37676aa90fe2SIoana Radulescu * configuration is finished 37686aa90fe2SIoana Radulescu */ 37696aa90fe2SIoana Radulescu out_unmap_key: 37706aa90fe2SIoana Radulescu dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE); 37716aa90fe2SIoana Radulescu out_free_key: 37726aa90fe2SIoana Radulescu kfree(key); 37736aa90fe2SIoana Radulescu out_unmap_tbl: 37746aa90fe2SIoana Radulescu dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE, 37756aa90fe2SIoana Radulescu DMA_TO_DEVICE); 37766aa90fe2SIoana Radulescu out_free_tbl: 37776aa90fe2SIoana Radulescu kfree(dma_mem); 37786aa90fe2SIoana Radulescu 37796aa90fe2SIoana Radulescu return err; 37806aa90fe2SIoana Radulescu } 37816aa90fe2SIoana Radulescu 378234ff6846SIoana Radulescu /* Configure the DPNI object this interface is associated with */ 37835d8dccf8SIoana Ciornei static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) 378434ff6846SIoana Radulescu { 378534ff6846SIoana Radulescu struct device *dev = &ls_dev->dev; 378634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 378734ff6846SIoana Radulescu struct net_device *net_dev; 378834ff6846SIoana Radulescu int err; 378934ff6846SIoana Radulescu 379034ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 379134ff6846SIoana Radulescu priv = netdev_priv(net_dev); 379234ff6846SIoana Radulescu 379334ff6846SIoana Radulescu /* get a handle for the DPNI object */ 379434ff6846SIoana Radulescu err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 379534ff6846SIoana Radulescu if (err) { 379634ff6846SIoana Radulescu dev_err(dev, "dpni_open() failed\n"); 379734ff6846SIoana Radulescu return err; 379834ff6846SIoana Radulescu } 379934ff6846SIoana Radulescu 380034ff6846SIoana Radulescu /* Check if we can work with this DPNI object */ 380134ff6846SIoana Radulescu err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 380234ff6846SIoana Radulescu &priv->dpni_ver_minor); 380334ff6846SIoana Radulescu if (err) { 380434ff6846SIoana Radulescu dev_err(dev, "dpni_get_api_version() failed\n"); 380534ff6846SIoana Radulescu goto close; 380634ff6846SIoana Radulescu } 380734ff6846SIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 380834ff6846SIoana Radulescu dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 380934ff6846SIoana Radulescu priv->dpni_ver_major, priv->dpni_ver_minor, 381034ff6846SIoana Radulescu DPNI_VER_MAJOR, DPNI_VER_MINOR); 381191c71bf1SVladimir Oltean err = -EOPNOTSUPP; 381234ff6846SIoana Radulescu goto close; 381334ff6846SIoana Radulescu } 381434ff6846SIoana Radulescu 381534ff6846SIoana Radulescu ls_dev->mc_io = priv->mc_io; 381634ff6846SIoana Radulescu ls_dev->mc_handle = priv->mc_token; 381734ff6846SIoana Radulescu 381834ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 381934ff6846SIoana Radulescu if (err) { 382034ff6846SIoana Radulescu dev_err(dev, "dpni_reset() failed\n"); 382134ff6846SIoana Radulescu goto close; 382234ff6846SIoana Radulescu } 382334ff6846SIoana Radulescu 382434ff6846SIoana Radulescu err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 382534ff6846SIoana Radulescu &priv->dpni_attrs); 382634ff6846SIoana Radulescu if (err) { 382734ff6846SIoana Radulescu dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 382834ff6846SIoana Radulescu goto close; 382934ff6846SIoana Radulescu } 383034ff6846SIoana Radulescu 38315d8dccf8SIoana Ciornei err = dpaa2_eth_set_buffer_layout(priv); 383234ff6846SIoana Radulescu if (err) 383334ff6846SIoana Radulescu goto close; 383434ff6846SIoana Radulescu 38355d8dccf8SIoana Ciornei dpaa2_eth_set_enqueue_mode(priv); 38361fa0f68cSIoana Ciocoi Radulescu 38378eb3cef8SIoana Radulescu /* Enable pause frame support */ 38388eb3cef8SIoana Radulescu if (dpaa2_eth_has_pause_support(priv)) { 38395d8dccf8SIoana Ciornei err = dpaa2_eth_set_pause(priv); 38408eb3cef8SIoana Radulescu if (err) 38418eb3cef8SIoana Radulescu goto close; 38428eb3cef8SIoana Radulescu } 38438eb3cef8SIoana Radulescu 38445d8dccf8SIoana Ciornei err = dpaa2_eth_set_vlan_qos(priv); 38456aa90fe2SIoana Radulescu if (err && err != -EOPNOTSUPP) 38466aa90fe2SIoana Radulescu goto close; 38476aa90fe2SIoana Radulescu 38489334d5baSXu Wang priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), 38499334d5baSXu Wang sizeof(struct dpaa2_eth_cls_rule), 38509334d5baSXu Wang GFP_KERNEL); 385197fff7c8SWei Yongjun if (!priv->cls_rules) { 385297fff7c8SWei Yongjun err = -ENOMEM; 3853afb90dbbSIoana Radulescu goto close; 385497fff7c8SWei Yongjun } 3855afb90dbbSIoana Radulescu 385634ff6846SIoana Radulescu return 0; 385734ff6846SIoana Radulescu 385834ff6846SIoana Radulescu close: 385934ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 386034ff6846SIoana Radulescu 386134ff6846SIoana Radulescu return err; 386234ff6846SIoana Radulescu } 386334ff6846SIoana Radulescu 38645d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv) 386534ff6846SIoana Radulescu { 386634ff6846SIoana Radulescu int err; 386734ff6846SIoana Radulescu 386834ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 386934ff6846SIoana Radulescu if (err) 387034ff6846SIoana Radulescu netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 387134ff6846SIoana Radulescu err); 387234ff6846SIoana Radulescu 387334ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 387434ff6846SIoana Radulescu } 387534ff6846SIoana Radulescu 38765d8dccf8SIoana Ciornei static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, 387734ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 387834ff6846SIoana Radulescu { 387934ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 388034ff6846SIoana Radulescu struct dpni_queue queue; 388134ff6846SIoana Radulescu struct dpni_queue_id qid; 388234ff6846SIoana Radulescu int err; 388334ff6846SIoana Radulescu 388434ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3885685e39eaSIoana Radulescu DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); 388634ff6846SIoana Radulescu if (err) { 388734ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(RX) failed\n"); 388834ff6846SIoana Radulescu return err; 388934ff6846SIoana Radulescu } 389034ff6846SIoana Radulescu 389134ff6846SIoana Radulescu fq->fqid = qid.fqid; 389234ff6846SIoana Radulescu 389334ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 389434ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 389534ff6846SIoana Radulescu queue.destination.priority = 1; 389634ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 389734ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3898685e39eaSIoana Radulescu DPNI_QUEUE_RX, fq->tc, fq->flowid, 389916fa1cf1SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 390034ff6846SIoana Radulescu &queue); 390134ff6846SIoana Radulescu if (err) { 390234ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(RX) failed\n"); 390334ff6846SIoana Radulescu return err; 390434ff6846SIoana Radulescu } 390534ff6846SIoana Radulescu 3906d678be1dSIoana Radulescu /* xdp_rxq setup */ 3907685e39eaSIoana Radulescu /* only once for each channel */ 3908685e39eaSIoana Radulescu if (fq->tc > 0) 3909685e39eaSIoana Radulescu return 0; 3910685e39eaSIoana Radulescu 3911d678be1dSIoana Radulescu err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, 3912b02e5a0eSBjörn Töpel fq->flowid, 0); 3913d678be1dSIoana Radulescu if (err) { 3914d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg failed\n"); 3915d678be1dSIoana Radulescu return err; 3916d678be1dSIoana Radulescu } 3917d678be1dSIoana Radulescu 3918d678be1dSIoana Radulescu err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, 3919d678be1dSIoana Radulescu MEM_TYPE_PAGE_ORDER0, NULL); 3920d678be1dSIoana Radulescu if (err) { 3921d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); 3922d678be1dSIoana Radulescu return err; 3923d678be1dSIoana Radulescu } 3924d678be1dSIoana Radulescu 392534ff6846SIoana Radulescu return 0; 392634ff6846SIoana Radulescu } 392734ff6846SIoana Radulescu 39285d8dccf8SIoana Ciornei static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv, 392934ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 393034ff6846SIoana Radulescu { 393134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 393234ff6846SIoana Radulescu struct dpni_queue queue; 393334ff6846SIoana Radulescu struct dpni_queue_id qid; 393415c87f6bSIoana Radulescu int i, err; 393534ff6846SIoana Radulescu 393615c87f6bSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 393734ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 393815c87f6bSIoana Radulescu DPNI_QUEUE_TX, i, fq->flowid, 393915c87f6bSIoana Radulescu &queue, &qid); 394034ff6846SIoana Radulescu if (err) { 394134ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX) failed\n"); 394234ff6846SIoana Radulescu return err; 394334ff6846SIoana Radulescu } 394415c87f6bSIoana Radulescu fq->tx_fqid[i] = qid.fqid; 394515c87f6bSIoana Radulescu } 394634ff6846SIoana Radulescu 394715c87f6bSIoana Radulescu /* All Tx queues belonging to the same flowid have the same qdbin */ 394834ff6846SIoana Radulescu fq->tx_qdbin = qid.qdbin; 394934ff6846SIoana Radulescu 395034ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 395134ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 395234ff6846SIoana Radulescu &queue, &qid); 395334ff6846SIoana Radulescu if (err) { 395434ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 395534ff6846SIoana Radulescu return err; 395634ff6846SIoana Radulescu } 395734ff6846SIoana Radulescu 395834ff6846SIoana Radulescu fq->fqid = qid.fqid; 395934ff6846SIoana Radulescu 396034ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 396134ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 396234ff6846SIoana Radulescu queue.destination.priority = 0; 396334ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 396434ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 396534ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 396634ff6846SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 396734ff6846SIoana Radulescu &queue); 396834ff6846SIoana Radulescu if (err) { 396934ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 397034ff6846SIoana Radulescu return err; 397134ff6846SIoana Radulescu } 397234ff6846SIoana Radulescu 397334ff6846SIoana Radulescu return 0; 397434ff6846SIoana Radulescu } 397534ff6846SIoana Radulescu 3976061d631fSIoana Ciornei static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, 3977061d631fSIoana Ciornei struct dpaa2_eth_fq *fq) 3978061d631fSIoana Ciornei { 3979061d631fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 3980061d631fSIoana Ciornei struct dpni_queue q = { { 0 } }; 3981061d631fSIoana Ciornei struct dpni_queue_id qid; 3982061d631fSIoana Ciornei u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; 3983061d631fSIoana Ciornei int err; 3984061d631fSIoana Ciornei 3985061d631fSIoana Ciornei err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3986061d631fSIoana Ciornei DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); 3987061d631fSIoana Ciornei if (err) { 3988061d631fSIoana Ciornei dev_err(dev, "dpni_get_queue() failed (%d)\n", err); 3989061d631fSIoana Ciornei return err; 3990061d631fSIoana Ciornei } 3991061d631fSIoana Ciornei 3992061d631fSIoana Ciornei fq->fqid = qid.fqid; 3993061d631fSIoana Ciornei 3994061d631fSIoana Ciornei q.destination.id = fq->channel->dpcon_id; 3995061d631fSIoana Ciornei q.destination.type = DPNI_DEST_DPCON; 3996061d631fSIoana Ciornei q.destination.priority = 1; 3997061d631fSIoana Ciornei q.user_context = (u64)(uintptr_t)fq; 3998061d631fSIoana Ciornei err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3999061d631fSIoana Ciornei DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); 4000061d631fSIoana Ciornei if (err) { 4001061d631fSIoana Ciornei dev_err(dev, "dpni_set_queue() failed (%d)\n", err); 4002061d631fSIoana Ciornei return err; 4003061d631fSIoana Ciornei } 4004061d631fSIoana Ciornei 4005061d631fSIoana Ciornei return 0; 4006061d631fSIoana Ciornei } 4007061d631fSIoana Ciornei 4008edad8d26SIoana Ciocoi Radulescu /* Supported header fields for Rx hash distribution key */ 4009f76c483aSIoana Radulescu static const struct dpaa2_eth_dist_fields dist_fields[] = { 401034ff6846SIoana Radulescu { 4011edad8d26SIoana Ciocoi Radulescu /* L2 header */ 4012edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_L2DA, 4013edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_ETH, 4014edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_ETH_DA, 40153a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHDST, 4016edad8d26SIoana Ciocoi Radulescu .size = 6, 4017edad8d26SIoana Ciocoi Radulescu }, { 4018afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 4019afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_SA, 40203a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHSRC, 4021afb90dbbSIoana Radulescu .size = 6, 4022afb90dbbSIoana Radulescu }, { 4023afb90dbbSIoana Radulescu /* This is the last ethertype field parsed: 4024afb90dbbSIoana Radulescu * depending on frame format, it can be the MAC ethertype 4025afb90dbbSIoana Radulescu * or the VLAN etype. 4026afb90dbbSIoana Radulescu */ 4027afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 4028afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_TYPE, 40293a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHTYPE, 4030afb90dbbSIoana Radulescu .size = 2, 4031afb90dbbSIoana Radulescu }, { 4032edad8d26SIoana Ciocoi Radulescu /* VLAN header */ 4033edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_VLAN, 4034edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_VLAN, 4035edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_VLAN_TCI, 40363a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_VLAN, 4037edad8d26SIoana Ciocoi Radulescu .size = 2, 4038edad8d26SIoana Ciocoi Radulescu }, { 403934ff6846SIoana Radulescu /* IP header */ 404034ff6846SIoana Radulescu .rxnfc_field = RXH_IP_SRC, 404134ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 404234ff6846SIoana Radulescu .cls_field = NH_FLD_IP_SRC, 40433a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPSRC, 404434ff6846SIoana Radulescu .size = 4, 404534ff6846SIoana Radulescu }, { 404634ff6846SIoana Radulescu .rxnfc_field = RXH_IP_DST, 404734ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 404834ff6846SIoana Radulescu .cls_field = NH_FLD_IP_DST, 40493a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPDST, 405034ff6846SIoana Radulescu .size = 4, 405134ff6846SIoana Radulescu }, { 405234ff6846SIoana Radulescu .rxnfc_field = RXH_L3_PROTO, 405334ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 405434ff6846SIoana Radulescu .cls_field = NH_FLD_IP_PROTO, 40553a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPPROTO, 405634ff6846SIoana Radulescu .size = 1, 405734ff6846SIoana Radulescu }, { 405834ff6846SIoana Radulescu /* Using UDP ports, this is functionally equivalent to raw 405934ff6846SIoana Radulescu * byte pairs from L4 header. 406034ff6846SIoana Radulescu */ 406134ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_0_1, 406234ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 406334ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_SRC, 40643a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4SRC, 406534ff6846SIoana Radulescu .size = 2, 406634ff6846SIoana Radulescu }, { 406734ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_2_3, 406834ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 406934ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_DST, 40703a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4DST, 407134ff6846SIoana Radulescu .size = 2, 407234ff6846SIoana Radulescu }, 407334ff6846SIoana Radulescu }; 407434ff6846SIoana Radulescu 4075df85aeb9SIoana Radulescu /* Configure the Rx hash key using the legacy API */ 40765d8dccf8SIoana Ciornei static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 4077df85aeb9SIoana Radulescu { 4078df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 4079df85aeb9SIoana Radulescu struct dpni_rx_tc_dist_cfg dist_cfg; 4080685e39eaSIoana Radulescu int i, err = 0; 4081df85aeb9SIoana Radulescu 4082df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 4083df85aeb9SIoana Radulescu 4084df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 4085df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 4086df85aeb9SIoana Radulescu dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 4087df85aeb9SIoana Radulescu 4088685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 4089685e39eaSIoana Radulescu err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 4090685e39eaSIoana Radulescu i, &dist_cfg); 4091685e39eaSIoana Radulescu if (err) { 4092df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 4093685e39eaSIoana Radulescu break; 4094685e39eaSIoana Radulescu } 4095685e39eaSIoana Radulescu } 4096df85aeb9SIoana Radulescu 4097df85aeb9SIoana Radulescu return err; 4098df85aeb9SIoana Radulescu } 4099df85aeb9SIoana Radulescu 4100df85aeb9SIoana Radulescu /* Configure the Rx hash key using the new API */ 41015d8dccf8SIoana Ciornei static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 4102df85aeb9SIoana Radulescu { 4103df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 4104df85aeb9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 4105685e39eaSIoana Radulescu int i, err = 0; 4106df85aeb9SIoana Radulescu 4107df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 4108df85aeb9SIoana Radulescu 4109df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 4110df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 4111df85aeb9SIoana Radulescu dist_cfg.enable = 1; 4112df85aeb9SIoana Radulescu 4113685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 4114685e39eaSIoana Radulescu dist_cfg.tc = i; 4115685e39eaSIoana Radulescu err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, 4116685e39eaSIoana Radulescu &dist_cfg); 4117685e39eaSIoana Radulescu if (err) { 4118df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 4119685e39eaSIoana Radulescu break; 4120685e39eaSIoana Radulescu } 41215e29c16fSIonut-robert Aron 41225e29c16fSIonut-robert Aron /* If the flow steering / hashing key is shared between all 41235e29c16fSIonut-robert Aron * traffic classes, install it just once 41245e29c16fSIonut-robert Aron */ 41255e29c16fSIonut-robert Aron if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) 41265e29c16fSIonut-robert Aron break; 4127685e39eaSIoana Radulescu } 4128df85aeb9SIoana Radulescu 4129df85aeb9SIoana Radulescu return err; 4130df85aeb9SIoana Radulescu } 4131df85aeb9SIoana Radulescu 41324aaaf9b9SIoana Radulescu /* Configure the Rx flow classification key */ 41335d8dccf8SIoana Ciornei static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 41344aaaf9b9SIoana Radulescu { 41354aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 41364aaaf9b9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 4137685e39eaSIoana Radulescu int i, err = 0; 41384aaaf9b9SIoana Radulescu 41394aaaf9b9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 41404aaaf9b9SIoana Radulescu 41414aaaf9b9SIoana Radulescu dist_cfg.key_cfg_iova = key; 41424aaaf9b9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 41434aaaf9b9SIoana Radulescu dist_cfg.enable = 1; 41444aaaf9b9SIoana Radulescu 4145685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 4146685e39eaSIoana Radulescu dist_cfg.tc = i; 4147685e39eaSIoana Radulescu err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, 4148685e39eaSIoana Radulescu &dist_cfg); 4149685e39eaSIoana Radulescu if (err) { 41504aaaf9b9SIoana Radulescu dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 4151685e39eaSIoana Radulescu break; 4152685e39eaSIoana Radulescu } 41535e29c16fSIonut-robert Aron 41545e29c16fSIonut-robert Aron /* If the flow steering / hashing key is shared between all 41555e29c16fSIonut-robert Aron * traffic classes, install it just once 41565e29c16fSIonut-robert Aron */ 41575e29c16fSIonut-robert Aron if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) 41585e29c16fSIonut-robert Aron break; 4159685e39eaSIoana Radulescu } 41604aaaf9b9SIoana Radulescu 41614aaaf9b9SIoana Radulescu return err; 41624aaaf9b9SIoana Radulescu } 41634aaaf9b9SIoana Radulescu 4164afb90dbbSIoana Radulescu /* Size of the Rx flow classification key */ 41652d680237SIoana Ciocoi Radulescu int dpaa2_eth_cls_key_size(u64 fields) 4166afb90dbbSIoana Radulescu { 4167afb90dbbSIoana Radulescu int i, size = 0; 4168afb90dbbSIoana Radulescu 41692d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 41702d680237SIoana Ciocoi Radulescu if (!(fields & dist_fields[i].id)) 41712d680237SIoana Ciocoi Radulescu continue; 4172afb90dbbSIoana Radulescu size += dist_fields[i].size; 41732d680237SIoana Ciocoi Radulescu } 4174afb90dbbSIoana Radulescu 4175afb90dbbSIoana Radulescu return size; 4176afb90dbbSIoana Radulescu } 4177afb90dbbSIoana Radulescu 4178afb90dbbSIoana Radulescu /* Offset of header field in Rx classification key */ 4179afb90dbbSIoana Radulescu int dpaa2_eth_cls_fld_off(int prot, int field) 4180afb90dbbSIoana Radulescu { 4181afb90dbbSIoana Radulescu int i, off = 0; 4182afb90dbbSIoana Radulescu 4183afb90dbbSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 4184afb90dbbSIoana Radulescu if (dist_fields[i].cls_prot == prot && 4185afb90dbbSIoana Radulescu dist_fields[i].cls_field == field) 4186afb90dbbSIoana Radulescu return off; 4187afb90dbbSIoana Radulescu off += dist_fields[i].size; 4188afb90dbbSIoana Radulescu } 4189afb90dbbSIoana Radulescu 4190afb90dbbSIoana Radulescu WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 4191afb90dbbSIoana Radulescu return 0; 4192afb90dbbSIoana Radulescu } 4193afb90dbbSIoana Radulescu 41942d680237SIoana Ciocoi Radulescu /* Prune unused fields from the classification rule. 41952d680237SIoana Ciocoi Radulescu * Used when masking is not supported 41962d680237SIoana Ciocoi Radulescu */ 41972d680237SIoana Ciocoi Radulescu void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) 41982d680237SIoana Ciocoi Radulescu { 41992d680237SIoana Ciocoi Radulescu int off = 0, new_off = 0; 42002d680237SIoana Ciocoi Radulescu int i, size; 42012d680237SIoana Ciocoi Radulescu 42022d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 42032d680237SIoana Ciocoi Radulescu size = dist_fields[i].size; 42042d680237SIoana Ciocoi Radulescu if (dist_fields[i].id & fields) { 42052d680237SIoana Ciocoi Radulescu memcpy(key_mem + new_off, key_mem + off, size); 42062d680237SIoana Ciocoi Radulescu new_off += size; 42072d680237SIoana Ciocoi Radulescu } 42082d680237SIoana Ciocoi Radulescu off += size; 42092d680237SIoana Ciocoi Radulescu } 42102d680237SIoana Ciocoi Radulescu } 42112d680237SIoana Ciocoi Radulescu 42124aaaf9b9SIoana Radulescu /* Set Rx distribution (hash or flow classification) key 421334ff6846SIoana Radulescu * flags is a combination of RXH_ bits 421434ff6846SIoana Radulescu */ 42153233c151SIoana Ciornei static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 42164aaaf9b9SIoana Radulescu enum dpaa2_eth_rx_dist type, u64 flags) 421734ff6846SIoana Radulescu { 421834ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 421934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 422034ff6846SIoana Radulescu struct dpkg_profile_cfg cls_cfg; 4221edad8d26SIoana Ciocoi Radulescu u32 rx_hash_fields = 0; 4222df85aeb9SIoana Radulescu dma_addr_t key_iova; 422334ff6846SIoana Radulescu u8 *dma_mem; 422434ff6846SIoana Radulescu int i; 422534ff6846SIoana Radulescu int err = 0; 422634ff6846SIoana Radulescu 422734ff6846SIoana Radulescu memset(&cls_cfg, 0, sizeof(cls_cfg)); 422834ff6846SIoana Radulescu 4229f76c483aSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 423034ff6846SIoana Radulescu struct dpkg_extract *key = 423134ff6846SIoana Radulescu &cls_cfg.extracts[cls_cfg.num_extracts]; 423234ff6846SIoana Radulescu 42332d680237SIoana Ciocoi Radulescu /* For both Rx hashing and classification keys 42342d680237SIoana Ciocoi Radulescu * we set only the selected fields. 42354aaaf9b9SIoana Radulescu */ 42363a1e6b84SIoana Ciocoi Radulescu if (!(flags & dist_fields[i].id)) 423734ff6846SIoana Radulescu continue; 42382d680237SIoana Ciocoi Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) 42394aaaf9b9SIoana Radulescu rx_hash_fields |= dist_fields[i].rxnfc_field; 424034ff6846SIoana Radulescu 424134ff6846SIoana Radulescu if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 424234ff6846SIoana Radulescu dev_err(dev, "error adding key extraction rule, too many rules?\n"); 424334ff6846SIoana Radulescu return -E2BIG; 424434ff6846SIoana Radulescu } 424534ff6846SIoana Radulescu 424634ff6846SIoana Radulescu key->type = DPKG_EXTRACT_FROM_HDR; 4247f76c483aSIoana Radulescu key->extract.from_hdr.prot = dist_fields[i].cls_prot; 424834ff6846SIoana Radulescu key->extract.from_hdr.type = DPKG_FULL_FIELD; 4249f76c483aSIoana Radulescu key->extract.from_hdr.field = dist_fields[i].cls_field; 425034ff6846SIoana Radulescu cls_cfg.num_extracts++; 425134ff6846SIoana Radulescu } 425234ff6846SIoana Radulescu 425334ff6846SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 425434ff6846SIoana Radulescu if (!dma_mem) 425534ff6846SIoana Radulescu return -ENOMEM; 425634ff6846SIoana Radulescu 425734ff6846SIoana Radulescu err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 425834ff6846SIoana Radulescu if (err) { 425934ff6846SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 4260df85aeb9SIoana Radulescu goto free_key; 426134ff6846SIoana Radulescu } 426234ff6846SIoana Radulescu 426334ff6846SIoana Radulescu /* Prepare for setting the rx dist */ 4264df85aeb9SIoana Radulescu key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 426534ff6846SIoana Radulescu DMA_TO_DEVICE); 4266df85aeb9SIoana Radulescu if (dma_mapping_error(dev, key_iova)) { 426734ff6846SIoana Radulescu dev_err(dev, "DMA mapping failed\n"); 426834ff6846SIoana Radulescu err = -ENOMEM; 4269df85aeb9SIoana Radulescu goto free_key; 427034ff6846SIoana Radulescu } 427134ff6846SIoana Radulescu 42724aaaf9b9SIoana Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) { 4273df85aeb9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) 42745d8dccf8SIoana Ciornei err = dpaa2_eth_config_legacy_hash_key(priv, key_iova); 4275edad8d26SIoana Ciocoi Radulescu else 42765d8dccf8SIoana Ciornei err = dpaa2_eth_config_hash_key(priv, key_iova); 42774aaaf9b9SIoana Radulescu } else { 42785d8dccf8SIoana Ciornei err = dpaa2_eth_config_cls_key(priv, key_iova); 42794aaaf9b9SIoana Radulescu } 4280df85aeb9SIoana Radulescu 4281df85aeb9SIoana Radulescu dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 4282df85aeb9SIoana Radulescu DMA_TO_DEVICE); 42834aaaf9b9SIoana Radulescu if (!err && type == DPAA2_ETH_RX_DIST_HASH) 4284edad8d26SIoana Ciocoi Radulescu priv->rx_hash_fields = rx_hash_fields; 428534ff6846SIoana Radulescu 4286df85aeb9SIoana Radulescu free_key: 428734ff6846SIoana Radulescu kfree(dma_mem); 428834ff6846SIoana Radulescu return err; 428934ff6846SIoana Radulescu } 429034ff6846SIoana Radulescu 42914aaaf9b9SIoana Radulescu int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 42924aaaf9b9SIoana Radulescu { 42934aaaf9b9SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 42943a1e6b84SIoana Ciocoi Radulescu u64 key = 0; 42953a1e6b84SIoana Ciocoi Radulescu int i; 42964aaaf9b9SIoana Radulescu 42974aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) 42984aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 42994aaaf9b9SIoana Radulescu 43003a1e6b84SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 43013a1e6b84SIoana Ciocoi Radulescu if (dist_fields[i].rxnfc_field & flags) 43023a1e6b84SIoana Ciocoi Radulescu key |= dist_fields[i].id; 43033a1e6b84SIoana Ciocoi Radulescu 43043a1e6b84SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); 43054aaaf9b9SIoana Radulescu } 43064aaaf9b9SIoana Radulescu 43072d680237SIoana Ciocoi Radulescu int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) 43082d680237SIoana Ciocoi Radulescu { 43092d680237SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); 43102d680237SIoana Ciocoi Radulescu } 43112d680237SIoana Ciocoi Radulescu 43122d680237SIoana Ciocoi Radulescu static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) 43134aaaf9b9SIoana Radulescu { 43144aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 4315df8e249bSIoana Ciocoi Radulescu int err; 43164aaaf9b9SIoana Radulescu 43174aaaf9b9SIoana Radulescu /* Check if we actually support Rx flow classification */ 43184aaaf9b9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) { 43194aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls not supported by current MC version\n"); 43204aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 43214aaaf9b9SIoana Radulescu } 43224aaaf9b9SIoana Radulescu 43232d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_enabled(priv)) { 43244aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 43254aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 43264aaaf9b9SIoana Radulescu } 43274aaaf9b9SIoana Radulescu 43284aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) { 43294aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 43304aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 43314aaaf9b9SIoana Radulescu } 43324aaaf9b9SIoana Radulescu 43332d680237SIoana Ciocoi Radulescu /* If there is no support for masking in the classification table, 43342d680237SIoana Ciocoi Radulescu * we don't set a default key, as it will depend on the rules 43352d680237SIoana Ciocoi Radulescu * added by the user at runtime. 43362d680237SIoana Ciocoi Radulescu */ 43372d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_mask_enabled(priv)) 43382d680237SIoana Ciocoi Radulescu goto out; 43392d680237SIoana Ciocoi Radulescu 43402d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); 4341df8e249bSIoana Ciocoi Radulescu if (err) 4342df8e249bSIoana Ciocoi Radulescu return err; 4343df8e249bSIoana Ciocoi Radulescu 43442d680237SIoana Ciocoi Radulescu out: 43454aaaf9b9SIoana Radulescu priv->rx_cls_enabled = 1; 43464aaaf9b9SIoana Radulescu 4347df8e249bSIoana Ciocoi Radulescu return 0; 43484aaaf9b9SIoana Radulescu } 43494aaaf9b9SIoana Radulescu 435034ff6846SIoana Radulescu /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 435134ff6846SIoana Radulescu * frame queues and channels 435234ff6846SIoana Radulescu */ 43535d8dccf8SIoana Ciornei static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) 435434ff6846SIoana Radulescu { 4355095174daSRobert-Ionut Alexa struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; 435634ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 435748276c08SRobert-Ionut Alexa struct dpni_pools_cfg pools_params = { 0 }; 435834ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 435934ff6846SIoana Radulescu struct dpni_error_cfg err_cfg; 436034ff6846SIoana Radulescu int err = 0; 436134ff6846SIoana Radulescu int i; 436234ff6846SIoana Radulescu 436334ff6846SIoana Radulescu pools_params.num_dpbp = 1; 4364095174daSRobert-Ionut Alexa pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; 436534ff6846SIoana Radulescu pools_params.pools[0].backup_pool = 0; 4366efa6a7d0SIoana Ciornei pools_params.pools[0].buffer_size = priv->rx_buf_size; 436734ff6846SIoana Radulescu err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 436834ff6846SIoana Radulescu if (err) { 436934ff6846SIoana Radulescu dev_err(dev, "dpni_set_pools() failed\n"); 437034ff6846SIoana Radulescu return err; 437134ff6846SIoana Radulescu } 437234ff6846SIoana Radulescu 437334ff6846SIoana Radulescu /* have the interface implicitly distribute traffic based on 437434ff6846SIoana Radulescu * the default hash key 437534ff6846SIoana Radulescu */ 437634ff6846SIoana Radulescu err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 4377edad8d26SIoana Ciocoi Radulescu if (err && err != -EOPNOTSUPP) 437834ff6846SIoana Radulescu dev_err(dev, "Failed to configure hashing\n"); 437934ff6846SIoana Radulescu 43804aaaf9b9SIoana Radulescu /* Configure the flow classification key; it includes all 43814aaaf9b9SIoana Radulescu * supported header fields and cannot be modified at runtime 43824aaaf9b9SIoana Radulescu */ 43832d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_default_cls(priv); 43844aaaf9b9SIoana Radulescu if (err && err != -EOPNOTSUPP) 43854aaaf9b9SIoana Radulescu dev_err(dev, "Failed to configure Rx classification key\n"); 43864aaaf9b9SIoana Radulescu 438734ff6846SIoana Radulescu /* Configure handling of error frames */ 438834ff6846SIoana Radulescu err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 438934ff6846SIoana Radulescu err_cfg.set_frame_annotation = 1; 439034ff6846SIoana Radulescu err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 439134ff6846SIoana Radulescu err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 439234ff6846SIoana Radulescu &err_cfg); 439334ff6846SIoana Radulescu if (err) { 439434ff6846SIoana Radulescu dev_err(dev, "dpni_set_errors_behavior failed\n"); 439534ff6846SIoana Radulescu return err; 439634ff6846SIoana Radulescu } 439734ff6846SIoana Radulescu 439834ff6846SIoana Radulescu /* Configure Rx and Tx conf queues to generate CDANs */ 439934ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 440034ff6846SIoana Radulescu switch (priv->fq[i].type) { 440134ff6846SIoana Radulescu case DPAA2_RX_FQ: 44025d8dccf8SIoana Ciornei err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]); 440334ff6846SIoana Radulescu break; 440434ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 44055d8dccf8SIoana Ciornei err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]); 440634ff6846SIoana Radulescu break; 4407061d631fSIoana Ciornei case DPAA2_RX_ERR_FQ: 4408061d631fSIoana Ciornei err = setup_rx_err_flow(priv, &priv->fq[i]); 4409061d631fSIoana Ciornei break; 441034ff6846SIoana Radulescu default: 441134ff6846SIoana Radulescu dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 441234ff6846SIoana Radulescu return -EINVAL; 441334ff6846SIoana Radulescu } 441434ff6846SIoana Radulescu if (err) 441534ff6846SIoana Radulescu return err; 441634ff6846SIoana Radulescu } 441734ff6846SIoana Radulescu 441834ff6846SIoana Radulescu err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 441934ff6846SIoana Radulescu DPNI_QUEUE_TX, &priv->tx_qdid); 442034ff6846SIoana Radulescu if (err) { 442134ff6846SIoana Radulescu dev_err(dev, "dpni_get_qdid() failed\n"); 442234ff6846SIoana Radulescu return err; 442334ff6846SIoana Radulescu } 442434ff6846SIoana Radulescu 442534ff6846SIoana Radulescu return 0; 442634ff6846SIoana Radulescu } 442734ff6846SIoana Radulescu 442834ff6846SIoana Radulescu /* Allocate rings for storing incoming frame descriptors */ 44295d8dccf8SIoana Ciornei static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) 443034ff6846SIoana Radulescu { 443134ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 443234ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 443334ff6846SIoana Radulescu int i; 443434ff6846SIoana Radulescu 443534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 443634ff6846SIoana Radulescu priv->channel[i]->store = 443734ff6846SIoana Radulescu dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 443834ff6846SIoana Radulescu if (!priv->channel[i]->store) { 443934ff6846SIoana Radulescu netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 444034ff6846SIoana Radulescu goto err_ring; 444134ff6846SIoana Radulescu } 444234ff6846SIoana Radulescu } 444334ff6846SIoana Radulescu 444434ff6846SIoana Radulescu return 0; 444534ff6846SIoana Radulescu 444634ff6846SIoana Radulescu err_ring: 444734ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 444834ff6846SIoana Radulescu if (!priv->channel[i]->store) 444934ff6846SIoana Radulescu break; 445034ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 445134ff6846SIoana Radulescu } 445234ff6846SIoana Radulescu 445334ff6846SIoana Radulescu return -ENOMEM; 445434ff6846SIoana Radulescu } 445534ff6846SIoana Radulescu 44565d8dccf8SIoana Ciornei static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) 445734ff6846SIoana Radulescu { 445834ff6846SIoana Radulescu int i; 445934ff6846SIoana Radulescu 446034ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 446134ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 446234ff6846SIoana Radulescu } 446334ff6846SIoana Radulescu 44645d8dccf8SIoana Ciornei static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) 446534ff6846SIoana Radulescu { 446634ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 446734ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 446834ff6846SIoana Radulescu u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 446934ff6846SIoana Radulescu int err; 447034ff6846SIoana Radulescu 447134ff6846SIoana Radulescu /* Get firmware address, if any */ 447234ff6846SIoana Radulescu err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 447334ff6846SIoana Radulescu if (err) { 447434ff6846SIoana Radulescu dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 447534ff6846SIoana Radulescu return err; 447634ff6846SIoana Radulescu } 447734ff6846SIoana Radulescu 447834ff6846SIoana Radulescu /* Get DPNI attributes address, if any */ 447934ff6846SIoana Radulescu err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 448034ff6846SIoana Radulescu dpni_mac_addr); 448134ff6846SIoana Radulescu if (err) { 448234ff6846SIoana Radulescu dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 448334ff6846SIoana Radulescu return err; 448434ff6846SIoana Radulescu } 448534ff6846SIoana Radulescu 448634ff6846SIoana Radulescu /* First check if firmware has any address configured by bootloader */ 448734ff6846SIoana Radulescu if (!is_zero_ether_addr(mac_addr)) { 448834ff6846SIoana Radulescu /* If the DPMAC addr != DPNI addr, update it */ 448934ff6846SIoana Radulescu if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 449034ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, 449134ff6846SIoana Radulescu priv->mc_token, 449234ff6846SIoana Radulescu mac_addr); 449334ff6846SIoana Radulescu if (err) { 449434ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 449534ff6846SIoana Radulescu return err; 449634ff6846SIoana Radulescu } 449734ff6846SIoana Radulescu } 4498a05e4c0aSJakub Kicinski eth_hw_addr_set(net_dev, mac_addr); 449934ff6846SIoana Radulescu } else if (is_zero_ether_addr(dpni_mac_addr)) { 450034ff6846SIoana Radulescu /* No MAC address configured, fill in net_dev->dev_addr 450134ff6846SIoana Radulescu * with a random one 450234ff6846SIoana Radulescu */ 450334ff6846SIoana Radulescu eth_hw_addr_random(net_dev); 450434ff6846SIoana Radulescu dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 450534ff6846SIoana Radulescu 450634ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 450734ff6846SIoana Radulescu net_dev->dev_addr); 450834ff6846SIoana Radulescu if (err) { 450934ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 451034ff6846SIoana Radulescu return err; 451134ff6846SIoana Radulescu } 451234ff6846SIoana Radulescu 451334ff6846SIoana Radulescu /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 451434ff6846SIoana Radulescu * practical purposes, this will be our "permanent" mac address, 451534ff6846SIoana Radulescu * at least until the next reboot. This move will also permit 451634ff6846SIoana Radulescu * register_netdevice() to properly fill up net_dev->perm_addr. 451734ff6846SIoana Radulescu */ 451834ff6846SIoana Radulescu net_dev->addr_assign_type = NET_ADDR_PERM; 451934ff6846SIoana Radulescu } else { 452034ff6846SIoana Radulescu /* NET_ADDR_PERM is default, all we have to do is 452134ff6846SIoana Radulescu * fill in the device addr. 452234ff6846SIoana Radulescu */ 4523a05e4c0aSJakub Kicinski eth_hw_addr_set(net_dev, dpni_mac_addr); 452434ff6846SIoana Radulescu } 452534ff6846SIoana Radulescu 452634ff6846SIoana Radulescu return 0; 452734ff6846SIoana Radulescu } 452834ff6846SIoana Radulescu 45295d8dccf8SIoana Ciornei static int dpaa2_eth_netdev_init(struct net_device *net_dev) 453034ff6846SIoana Radulescu { 453134ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 453234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 453334ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 453434ff6846SIoana Radulescu u64 supported = 0, not_supported = 0; 453534ff6846SIoana Radulescu u8 bcast_addr[ETH_ALEN]; 453634ff6846SIoana Radulescu u8 num_queues; 453734ff6846SIoana Radulescu int err; 453834ff6846SIoana Radulescu 453934ff6846SIoana Radulescu net_dev->netdev_ops = &dpaa2_eth_ops; 454034ff6846SIoana Radulescu net_dev->ethtool_ops = &dpaa2_ethtool_ops; 454134ff6846SIoana Radulescu 45425d8dccf8SIoana Ciornei err = dpaa2_eth_set_mac_addr(priv); 454334ff6846SIoana Radulescu if (err) 454434ff6846SIoana Radulescu return err; 454534ff6846SIoana Radulescu 454634ff6846SIoana Radulescu /* Explicitly add the broadcast address to the MAC filtering table */ 454734ff6846SIoana Radulescu eth_broadcast_addr(bcast_addr); 454834ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 454934ff6846SIoana Radulescu if (err) { 455034ff6846SIoana Radulescu dev_err(dev, "dpni_add_mac_addr() failed\n"); 455134ff6846SIoana Radulescu return err; 455234ff6846SIoana Radulescu } 455334ff6846SIoana Radulescu 455434ff6846SIoana Radulescu /* Set MTU upper limit; lower limit is 68B (default value) */ 455534ff6846SIoana Radulescu net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 455634ff6846SIoana Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 455734ff6846SIoana Radulescu DPAA2_ETH_MFL); 455834ff6846SIoana Radulescu if (err) { 455934ff6846SIoana Radulescu dev_err(dev, "dpni_set_max_frame_length() failed\n"); 456034ff6846SIoana Radulescu return err; 456134ff6846SIoana Radulescu } 456234ff6846SIoana Radulescu 456334ff6846SIoana Radulescu /* Set actual number of queues in the net device */ 456434ff6846SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 456534ff6846SIoana Radulescu err = netif_set_real_num_tx_queues(net_dev, num_queues); 456634ff6846SIoana Radulescu if (err) { 456734ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 456834ff6846SIoana Radulescu return err; 456934ff6846SIoana Radulescu } 457034ff6846SIoana Radulescu err = netif_set_real_num_rx_queues(net_dev, num_queues); 457134ff6846SIoana Radulescu if (err) { 457234ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 457334ff6846SIoana Radulescu return err; 457434ff6846SIoana Radulescu } 457534ff6846SIoana Radulescu 4576c4680c97SRadu Bulie dpaa2_eth_detect_features(priv); 4577c4680c97SRadu Bulie 457834ff6846SIoana Radulescu /* Capabilities listing */ 457934ff6846SIoana Radulescu supported |= IFF_LIVE_ADDR_CHANGE; 458034ff6846SIoana Radulescu 458134ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER) 458234ff6846SIoana Radulescu not_supported |= IFF_UNICAST_FLT; 458334ff6846SIoana Radulescu else 458434ff6846SIoana Radulescu supported |= IFF_UNICAST_FLT; 458534ff6846SIoana Radulescu 458634ff6846SIoana Radulescu net_dev->priv_flags |= supported; 458734ff6846SIoana Radulescu net_dev->priv_flags &= ~not_supported; 458834ff6846SIoana Radulescu 458934ff6846SIoana Radulescu /* Features */ 459034ff6846SIoana Radulescu net_dev->features = NETIF_F_RXCSUM | 459134ff6846SIoana Radulescu NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 459234ff6846SIoana Radulescu NETIF_F_SG | NETIF_F_HIGHDMA | 45933dc709e0SIoana Ciornei NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO; 45943dc709e0SIoana Ciornei net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS; 459534ff6846SIoana Radulescu net_dev->hw_features = net_dev->features; 459634ff6846SIoana Radulescu 459770b32d82SIonut-robert Aron if (priv->dpni_attrs.vlan_filter_entries) 459870b32d82SIonut-robert Aron net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 459970b32d82SIonut-robert Aron 460034ff6846SIoana Radulescu return 0; 460134ff6846SIoana Radulescu } 460234ff6846SIoana Radulescu 46035d8dccf8SIoana Ciornei static int dpaa2_eth_poll_link_state(void *arg) 460434ff6846SIoana Radulescu { 460534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 460634ff6846SIoana Radulescu int err; 460734ff6846SIoana Radulescu 460834ff6846SIoana Radulescu while (!kthread_should_stop()) { 46095d8dccf8SIoana Ciornei err = dpaa2_eth_link_state_update(priv); 461034ff6846SIoana Radulescu if (unlikely(err)) 461134ff6846SIoana Radulescu return err; 461234ff6846SIoana Radulescu 461334ff6846SIoana Radulescu msleep(DPAA2_ETH_LINK_STATE_REFRESH); 461434ff6846SIoana Radulescu } 461534ff6846SIoana Radulescu 461634ff6846SIoana Radulescu return 0; 461734ff6846SIoana Radulescu } 461834ff6846SIoana Radulescu 461971947923SIoana Ciornei static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) 462071947923SIoana Ciornei { 462171947923SIoana Ciornei struct fsl_mc_device *dpni_dev, *dpmac_dev; 462271947923SIoana Ciornei struct dpaa2_mac *mac; 462371947923SIoana Ciornei int err; 462471947923SIoana Ciornei 462571947923SIoana Ciornei dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); 462627cfdaddSIoana Ciornei dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0); 462747325da2SIoana Ciornei 462837fe9b98SSean Anderson if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) { 462937fe9b98SSean Anderson netdev_dbg(priv->net_dev, "waiting for mac\n"); 463047325da2SIoana Ciornei return PTR_ERR(dpmac_dev); 463137fe9b98SSean Anderson } 463247325da2SIoana Ciornei 463347325da2SIoana Ciornei if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 463471947923SIoana Ciornei return 0; 463571947923SIoana Ciornei 463671947923SIoana Ciornei mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); 463771947923SIoana Ciornei if (!mac) 463871947923SIoana Ciornei return -ENOMEM; 463971947923SIoana Ciornei 464071947923SIoana Ciornei mac->mc_dev = dpmac_dev; 464171947923SIoana Ciornei mac->mc_io = priv->mc_io; 464271947923SIoana Ciornei mac->net_dev = priv->net_dev; 464371947923SIoana Ciornei 4644095dca16SIoana Ciornei err = dpaa2_mac_open(mac); 4645095dca16SIoana Ciornei if (err) 4646095dca16SIoana Ciornei goto err_free_mac; 4647095dca16SIoana Ciornei 464802d61948SVladimir Oltean if (dpaa2_mac_is_type_phy(mac)) { 464971947923SIoana Ciornei err = dpaa2_mac_connect(mac); 465037fe9b98SSean Anderson if (err) { 465137fe9b98SSean Anderson if (err == -EPROBE_DEFER) 465237fe9b98SSean Anderson netdev_dbg(priv->net_dev, 465337fe9b98SSean Anderson "could not connect to MAC\n"); 465437fe9b98SSean Anderson else 465537fe9b98SSean Anderson netdev_err(priv->net_dev, 465637fe9b98SSean Anderson "Error connecting to the MAC endpoint: %pe", 4657f5120f59SVladimir Oltean ERR_PTR(err)); 4658095dca16SIoana Ciornei goto err_close_mac; 465971947923SIoana Ciornei } 466037fe9b98SSean Anderson } 466171947923SIoana Ciornei 4662*2291982eSVladimir Oltean mutex_lock(&priv->mac_lock); 466302d61948SVladimir Oltean priv->mac = mac; 4664*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 466502d61948SVladimir Oltean 466671947923SIoana Ciornei return 0; 4667095dca16SIoana Ciornei 4668095dca16SIoana Ciornei err_close_mac: 4669095dca16SIoana Ciornei dpaa2_mac_close(mac); 4670095dca16SIoana Ciornei err_free_mac: 4671095dca16SIoana Ciornei kfree(mac); 4672095dca16SIoana Ciornei return err; 467371947923SIoana Ciornei } 467471947923SIoana Ciornei 467571947923SIoana Ciornei static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) 467671947923SIoana Ciornei { 4677*2291982eSVladimir Oltean struct dpaa2_mac *mac; 4678d87e6063SIoana Ciornei 4679*2291982eSVladimir Oltean mutex_lock(&priv->mac_lock); 4680*2291982eSVladimir Oltean mac = priv->mac; 468102d61948SVladimir Oltean priv->mac = NULL; 4682*2291982eSVladimir Oltean mutex_unlock(&priv->mac_lock); 468302d61948SVladimir Oltean 468402d61948SVladimir Oltean if (!mac) 4685848c1903SIoana Ciornei return; 4686848c1903SIoana Ciornei 468702d61948SVladimir Oltean if (dpaa2_mac_is_type_phy(mac)) 468802d61948SVladimir Oltean dpaa2_mac_disconnect(mac); 468902d61948SVladimir Oltean 469002d61948SVladimir Oltean dpaa2_mac_close(mac); 469102d61948SVladimir Oltean kfree(mac); 469271947923SIoana Ciornei } 469371947923SIoana Ciornei 469434ff6846SIoana Radulescu static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 469534ff6846SIoana Radulescu { 469634ff6846SIoana Radulescu u32 status = ~0; 469734ff6846SIoana Radulescu struct device *dev = (struct device *)arg; 469834ff6846SIoana Radulescu struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 469934ff6846SIoana Radulescu struct net_device *net_dev = dev_get_drvdata(dev); 470071947923SIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 4701*2291982eSVladimir Oltean bool had_mac; 470234ff6846SIoana Radulescu int err; 470334ff6846SIoana Radulescu 470434ff6846SIoana Radulescu err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 470534ff6846SIoana Radulescu DPNI_IRQ_INDEX, &status); 470634ff6846SIoana Radulescu if (unlikely(err)) { 470734ff6846SIoana Radulescu netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 470834ff6846SIoana Radulescu return IRQ_HANDLED; 470934ff6846SIoana Radulescu } 471034ff6846SIoana Radulescu 471134ff6846SIoana Radulescu if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 47125d8dccf8SIoana Ciornei dpaa2_eth_link_state_update(netdev_priv(net_dev)); 471334ff6846SIoana Radulescu 4714f5c3fffaSIoana Ciornei if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { 47155d8dccf8SIoana Ciornei dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); 47165d8dccf8SIoana Ciornei dpaa2_eth_update_tx_fqids(priv); 471771947923SIoana Ciornei 471871947923SIoana Ciornei rtnl_lock(); 4719*2291982eSVladimir Oltean /* We can avoid locking because the "endpoint changed" IRQ 4720*2291982eSVladimir Oltean * handler is the only one who changes priv->mac at runtime, 4721*2291982eSVladimir Oltean * so we are not racing with anyone. 4722*2291982eSVladimir Oltean */ 4723*2291982eSVladimir Oltean had_mac = !!priv->mac; 4724*2291982eSVladimir Oltean if (had_mac) 472571947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 472671947923SIoana Ciornei else 472771947923SIoana Ciornei dpaa2_eth_connect_mac(priv); 472871947923SIoana Ciornei rtnl_unlock(); 4729f5c3fffaSIoana Ciornei } 47308398b375SFlorin Chiculita 473134ff6846SIoana Radulescu return IRQ_HANDLED; 473234ff6846SIoana Radulescu } 473334ff6846SIoana Radulescu 47345d8dccf8SIoana Ciornei static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) 473534ff6846SIoana Radulescu { 473634ff6846SIoana Radulescu int err = 0; 473734ff6846SIoana Radulescu struct fsl_mc_device_irq *irq; 473834ff6846SIoana Radulescu 473934ff6846SIoana Radulescu err = fsl_mc_allocate_irqs(ls_dev); 474034ff6846SIoana Radulescu if (err) { 474134ff6846SIoana Radulescu dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 474234ff6846SIoana Radulescu return err; 474334ff6846SIoana Radulescu } 474434ff6846SIoana Radulescu 474534ff6846SIoana Radulescu irq = ls_dev->irqs[0]; 4746d86a6d47SThomas Gleixner err = devm_request_threaded_irq(&ls_dev->dev, irq->virq, 474734ff6846SIoana Radulescu NULL, dpni_irq0_handler_thread, 474834ff6846SIoana Radulescu IRQF_NO_SUSPEND | IRQF_ONESHOT, 474934ff6846SIoana Radulescu dev_name(&ls_dev->dev), &ls_dev->dev); 475034ff6846SIoana Radulescu if (err < 0) { 475134ff6846SIoana Radulescu dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 475234ff6846SIoana Radulescu goto free_mc_irq; 475334ff6846SIoana Radulescu } 475434ff6846SIoana Radulescu 475534ff6846SIoana Radulescu err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 47568398b375SFlorin Chiculita DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | 47578398b375SFlorin Chiculita DPNI_IRQ_EVENT_ENDPOINT_CHANGED); 475834ff6846SIoana Radulescu if (err < 0) { 475934ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 476034ff6846SIoana Radulescu goto free_irq; 476134ff6846SIoana Radulescu } 476234ff6846SIoana Radulescu 476334ff6846SIoana Radulescu err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 476434ff6846SIoana Radulescu DPNI_IRQ_INDEX, 1); 476534ff6846SIoana Radulescu if (err < 0) { 476634ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 476734ff6846SIoana Radulescu goto free_irq; 476834ff6846SIoana Radulescu } 476934ff6846SIoana Radulescu 477034ff6846SIoana Radulescu return 0; 477134ff6846SIoana Radulescu 477234ff6846SIoana Radulescu free_irq: 4773d86a6d47SThomas Gleixner devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev); 477434ff6846SIoana Radulescu free_mc_irq: 477534ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 477634ff6846SIoana Radulescu 477734ff6846SIoana Radulescu return err; 477834ff6846SIoana Radulescu } 477934ff6846SIoana Radulescu 47805d8dccf8SIoana Ciornei static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv) 478134ff6846SIoana Radulescu { 478234ff6846SIoana Radulescu int i; 478334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 478434ff6846SIoana Radulescu 478534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 478634ff6846SIoana Radulescu ch = priv->channel[i]; 478734ff6846SIoana Radulescu /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 4788b48b89f9SJakub Kicinski netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll); 478934ff6846SIoana Radulescu } 479034ff6846SIoana Radulescu } 479134ff6846SIoana Radulescu 47925d8dccf8SIoana Ciornei static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) 479334ff6846SIoana Radulescu { 479434ff6846SIoana Radulescu int i; 479534ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 479634ff6846SIoana Radulescu 479734ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 479834ff6846SIoana Radulescu ch = priv->channel[i]; 479934ff6846SIoana Radulescu netif_napi_del(&ch->napi); 480034ff6846SIoana Radulescu } 480134ff6846SIoana Radulescu } 480234ff6846SIoana Radulescu 480334ff6846SIoana Radulescu static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 480434ff6846SIoana Radulescu { 480534ff6846SIoana Radulescu struct device *dev; 480634ff6846SIoana Radulescu struct net_device *net_dev = NULL; 480734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = NULL; 480834ff6846SIoana Radulescu int err = 0; 480934ff6846SIoana Radulescu 481034ff6846SIoana Radulescu dev = &dpni_dev->dev; 481134ff6846SIoana Radulescu 481234ff6846SIoana Radulescu /* Net device */ 4813ab1e6de2SIoana Radulescu net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); 481434ff6846SIoana Radulescu if (!net_dev) { 481534ff6846SIoana Radulescu dev_err(dev, "alloc_etherdev_mq() failed\n"); 481634ff6846SIoana Radulescu return -ENOMEM; 481734ff6846SIoana Radulescu } 481834ff6846SIoana Radulescu 481934ff6846SIoana Radulescu SET_NETDEV_DEV(net_dev, dev); 482034ff6846SIoana Radulescu dev_set_drvdata(dev, net_dev); 482134ff6846SIoana Radulescu 482234ff6846SIoana Radulescu priv = netdev_priv(net_dev); 482334ff6846SIoana Radulescu priv->net_dev = net_dev; 4824ac73d4bfSJiri Pirko SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port); 482534ff6846SIoana Radulescu 4826*2291982eSVladimir Oltean mutex_init(&priv->mac_lock); 4827*2291982eSVladimir Oltean 482834ff6846SIoana Radulescu priv->iommu_domain = iommu_get_domain_for_dev(dev); 482934ff6846SIoana Radulescu 48301cf773bdSYangbo Lu priv->tx_tstamp_type = HWTSTAMP_TX_OFF; 48311cf773bdSYangbo Lu priv->rx_tstamp = false; 48321cf773bdSYangbo Lu 4833c5521189SYangbo Lu priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0); 4834c5521189SYangbo Lu if (!priv->dpaa2_ptp_wq) { 4835c5521189SYangbo Lu err = -ENOMEM; 4836c5521189SYangbo Lu goto err_wq_alloc; 4837c5521189SYangbo Lu } 4838c5521189SYangbo Lu 4839c5521189SYangbo Lu INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); 484007dd4485SRadu Bulie mutex_init(&priv->onestep_tstamp_lock); 4841c5521189SYangbo Lu skb_queue_head_init(&priv->tx_skbs); 4842c5521189SYangbo Lu 48438ed3cefcSIoana Ciornei priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; 48448ed3cefcSIoana Ciornei 484534ff6846SIoana Radulescu /* Obtain a MC portal */ 484634ff6846SIoana Radulescu err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 484734ff6846SIoana Radulescu &priv->mc_io); 484834ff6846SIoana Radulescu if (err) { 484937fe9b98SSean Anderson if (err == -ENXIO) { 485037fe9b98SSean Anderson dev_dbg(dev, "waiting for MC portal\n"); 485134ff6846SIoana Radulescu err = -EPROBE_DEFER; 485237fe9b98SSean Anderson } else { 485334ff6846SIoana Radulescu dev_err(dev, "MC portal allocation failed\n"); 485437fe9b98SSean Anderson } 485534ff6846SIoana Radulescu goto err_portal_alloc; 485634ff6846SIoana Radulescu } 485734ff6846SIoana Radulescu 485834ff6846SIoana Radulescu /* MC objects initialization and configuration */ 48595d8dccf8SIoana Ciornei err = dpaa2_eth_setup_dpni(dpni_dev); 486034ff6846SIoana Radulescu if (err) 486134ff6846SIoana Radulescu goto err_dpni_setup; 486234ff6846SIoana Radulescu 48635d8dccf8SIoana Ciornei err = dpaa2_eth_setup_dpio(priv); 486434ff6846SIoana Radulescu if (err) 486534ff6846SIoana Radulescu goto err_dpio_setup; 486634ff6846SIoana Radulescu 48675d8dccf8SIoana Ciornei dpaa2_eth_setup_fqs(priv); 486834ff6846SIoana Radulescu 4869095174daSRobert-Ionut Alexa err = dpaa2_eth_setup_default_dpbp(priv); 487034ff6846SIoana Radulescu if (err) 487134ff6846SIoana Radulescu goto err_dpbp_setup; 487234ff6846SIoana Radulescu 48735d8dccf8SIoana Ciornei err = dpaa2_eth_bind_dpni(priv); 487434ff6846SIoana Radulescu if (err) 487534ff6846SIoana Radulescu goto err_bind; 487634ff6846SIoana Radulescu 487734ff6846SIoana Radulescu /* Add a NAPI context for each channel */ 48785d8dccf8SIoana Ciornei dpaa2_eth_add_ch_napi(priv); 487934ff6846SIoana Radulescu 488034ff6846SIoana Radulescu /* Percpu statistics */ 488134ff6846SIoana Radulescu priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 488234ff6846SIoana Radulescu if (!priv->percpu_stats) { 488334ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 488434ff6846SIoana Radulescu err = -ENOMEM; 488534ff6846SIoana Radulescu goto err_alloc_percpu_stats; 488634ff6846SIoana Radulescu } 488734ff6846SIoana Radulescu priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 488834ff6846SIoana Radulescu if (!priv->percpu_extras) { 488934ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 489034ff6846SIoana Radulescu err = -ENOMEM; 489134ff6846SIoana Radulescu goto err_alloc_percpu_extras; 489234ff6846SIoana Radulescu } 489334ff6846SIoana Radulescu 4894d70446eeSIoana Ciornei priv->sgt_cache = alloc_percpu(*priv->sgt_cache); 4895d70446eeSIoana Ciornei if (!priv->sgt_cache) { 4896d70446eeSIoana Ciornei dev_err(dev, "alloc_percpu(sgt_cache) failed\n"); 4897d70446eeSIoana Ciornei err = -ENOMEM; 4898d70446eeSIoana Ciornei goto err_alloc_sgt_cache; 4899d70446eeSIoana Ciornei } 4900d70446eeSIoana Ciornei 4901a4ca448eSIoana Ciornei priv->fd = alloc_percpu(*priv->fd); 4902a4ca448eSIoana Ciornei if (!priv->fd) { 4903a4ca448eSIoana Ciornei dev_err(dev, "alloc_percpu(fds) failed\n"); 4904a4ca448eSIoana Ciornei err = -ENOMEM; 4905a4ca448eSIoana Ciornei goto err_alloc_fds; 4906a4ca448eSIoana Ciornei } 4907a4ca448eSIoana Ciornei 49085d8dccf8SIoana Ciornei err = dpaa2_eth_netdev_init(net_dev); 490934ff6846SIoana Radulescu if (err) 491034ff6846SIoana Radulescu goto err_netdev_init; 491134ff6846SIoana Radulescu 491234ff6846SIoana Radulescu /* Configure checksum offload based on current interface flags */ 49135d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 491434ff6846SIoana Radulescu if (err) 491534ff6846SIoana Radulescu goto err_csum; 491634ff6846SIoana Radulescu 49175d8dccf8SIoana Ciornei err = dpaa2_eth_set_tx_csum(priv, 49185d8dccf8SIoana Ciornei !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 491934ff6846SIoana Radulescu if (err) 492034ff6846SIoana Radulescu goto err_csum; 492134ff6846SIoana Radulescu 49225d8dccf8SIoana Ciornei err = dpaa2_eth_alloc_rings(priv); 492334ff6846SIoana Radulescu if (err) 492434ff6846SIoana Radulescu goto err_alloc_rings; 492534ff6846SIoana Radulescu 4926f395b69fSIoana Ciornei #ifdef CONFIG_FSL_DPAA2_ETH_DCB 4927f395b69fSIoana Ciornei if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { 4928f395b69fSIoana Ciornei priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; 4929f395b69fSIoana Ciornei net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; 4930f395b69fSIoana Ciornei } else { 4931f395b69fSIoana Ciornei dev_dbg(dev, "PFC not supported\n"); 4932f395b69fSIoana Ciornei } 4933f395b69fSIoana Ciornei #endif 4934f395b69fSIoana Ciornei 493555f90a4dSVladimir Oltean err = dpaa2_eth_connect_mac(priv); 493655f90a4dSVladimir Oltean if (err) 493755f90a4dSVladimir Oltean goto err_connect_mac; 493855f90a4dSVladimir Oltean 49395d8dccf8SIoana Ciornei err = dpaa2_eth_setup_irqs(dpni_dev); 494034ff6846SIoana Radulescu if (err) { 494134ff6846SIoana Radulescu netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 49425d8dccf8SIoana Ciornei priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, 494334ff6846SIoana Radulescu "%s_poll_link", net_dev->name); 494434ff6846SIoana Radulescu if (IS_ERR(priv->poll_thread)) { 494534ff6846SIoana Radulescu dev_err(dev, "Error starting polling thread\n"); 494634ff6846SIoana Radulescu goto err_poll_thread; 494734ff6846SIoana Radulescu } 494834ff6846SIoana Radulescu priv->do_link_poll = true; 494934ff6846SIoana Radulescu } 495034ff6846SIoana Radulescu 4951bbb9ae25SLeon Romanovsky err = dpaa2_eth_dl_alloc(priv); 4952ceeb03adSIoana Ciornei if (err) 4953ceeb03adSIoana Ciornei goto err_dl_register; 4954ceeb03adSIoana Ciornei 4955061d631fSIoana Ciornei err = dpaa2_eth_dl_traps_register(priv); 4956061d631fSIoana Ciornei if (err) 4957061d631fSIoana Ciornei goto err_dl_trap_register; 4958061d631fSIoana Ciornei 4959ceeb03adSIoana Ciornei err = dpaa2_eth_dl_port_add(priv); 4960ceeb03adSIoana Ciornei if (err) 4961ceeb03adSIoana Ciornei goto err_dl_port_add; 4962ceeb03adSIoana Ciornei 496334ff6846SIoana Radulescu err = register_netdev(net_dev); 496434ff6846SIoana Radulescu if (err < 0) { 496534ff6846SIoana Radulescu dev_err(dev, "register_netdev() failed\n"); 496634ff6846SIoana Radulescu goto err_netdev_reg; 496734ff6846SIoana Radulescu } 496834ff6846SIoana Radulescu 4969091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 4970091a19eaSIoana Radulescu dpaa2_dbg_add(priv); 4971091a19eaSIoana Radulescu #endif 4972091a19eaSIoana Radulescu 4973bbb9ae25SLeon Romanovsky dpaa2_eth_dl_register(priv); 497434ff6846SIoana Radulescu dev_info(dev, "Probed interface %s\n", net_dev->name); 497534ff6846SIoana Radulescu return 0; 497634ff6846SIoana Radulescu 497734ff6846SIoana Radulescu err_netdev_reg: 4978ceeb03adSIoana Ciornei dpaa2_eth_dl_port_del(priv); 4979ceeb03adSIoana Ciornei err_dl_port_add: 4980061d631fSIoana Ciornei dpaa2_eth_dl_traps_unregister(priv); 4981061d631fSIoana Ciornei err_dl_trap_register: 4982bbb9ae25SLeon Romanovsky dpaa2_eth_dl_free(priv); 4983ceeb03adSIoana Ciornei err_dl_register: 498434ff6846SIoana Radulescu if (priv->do_link_poll) 498534ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 498634ff6846SIoana Radulescu else 498734ff6846SIoana Radulescu fsl_mc_free_irqs(dpni_dev); 498834ff6846SIoana Radulescu err_poll_thread: 498955f90a4dSVladimir Oltean dpaa2_eth_disconnect_mac(priv); 499055f90a4dSVladimir Oltean err_connect_mac: 49915d8dccf8SIoana Ciornei dpaa2_eth_free_rings(priv); 499234ff6846SIoana Radulescu err_alloc_rings: 499334ff6846SIoana Radulescu err_csum: 499434ff6846SIoana Radulescu err_netdev_init: 4995a4ca448eSIoana Ciornei free_percpu(priv->fd); 4996a4ca448eSIoana Ciornei err_alloc_fds: 4997d70446eeSIoana Ciornei free_percpu(priv->sgt_cache); 4998d70446eeSIoana Ciornei err_alloc_sgt_cache: 499934ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 500034ff6846SIoana Radulescu err_alloc_percpu_extras: 500134ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 500234ff6846SIoana Radulescu err_alloc_percpu_stats: 50035d8dccf8SIoana Ciornei dpaa2_eth_del_ch_napi(priv); 500434ff6846SIoana Radulescu err_bind: 5005095174daSRobert-Ionut Alexa dpaa2_eth_free_dpbps(priv); 500634ff6846SIoana Radulescu err_dpbp_setup: 50075d8dccf8SIoana Ciornei dpaa2_eth_free_dpio(priv); 500834ff6846SIoana Radulescu err_dpio_setup: 50095d8dccf8SIoana Ciornei dpaa2_eth_free_dpni(priv); 501034ff6846SIoana Radulescu err_dpni_setup: 501134ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 501234ff6846SIoana Radulescu err_portal_alloc: 5013c5521189SYangbo Lu destroy_workqueue(priv->dpaa2_ptp_wq); 5014c5521189SYangbo Lu err_wq_alloc: 501534ff6846SIoana Radulescu dev_set_drvdata(dev, NULL); 501634ff6846SIoana Radulescu free_netdev(net_dev); 501734ff6846SIoana Radulescu 501834ff6846SIoana Radulescu return err; 501934ff6846SIoana Radulescu } 502034ff6846SIoana Radulescu 502134ff6846SIoana Radulescu static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 502234ff6846SIoana Radulescu { 502334ff6846SIoana Radulescu struct device *dev; 502434ff6846SIoana Radulescu struct net_device *net_dev; 502534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 502634ff6846SIoana Radulescu 502734ff6846SIoana Radulescu dev = &ls_dev->dev; 502834ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 502934ff6846SIoana Radulescu priv = netdev_priv(net_dev); 503034ff6846SIoana Radulescu 5031bbb9ae25SLeon Romanovsky dpaa2_eth_dl_unregister(priv); 5032bbb9ae25SLeon Romanovsky 5033091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 5034091a19eaSIoana Radulescu dpaa2_dbg_remove(priv); 5035091a19eaSIoana Radulescu #endif 50369ccc6e0cSRobert-Ionut Alexa 50379ccc6e0cSRobert-Ionut Alexa unregister_netdev(net_dev); 503871947923SIoana Ciornei 5039ceeb03adSIoana Ciornei dpaa2_eth_dl_port_del(priv); 5040061d631fSIoana Ciornei dpaa2_eth_dl_traps_unregister(priv); 5041bbb9ae25SLeon Romanovsky dpaa2_eth_dl_free(priv); 5042ceeb03adSIoana Ciornei 504334ff6846SIoana Radulescu if (priv->do_link_poll) 504434ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 504534ff6846SIoana Radulescu else 504634ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 504734ff6846SIoana Radulescu 504855f90a4dSVladimir Oltean rtnl_lock(); 504955f90a4dSVladimir Oltean dpaa2_eth_disconnect_mac(priv); 505055f90a4dSVladimir Oltean rtnl_unlock(); 50515d8dccf8SIoana Ciornei dpaa2_eth_free_rings(priv); 5052a4ca448eSIoana Ciornei free_percpu(priv->fd); 5053d70446eeSIoana Ciornei free_percpu(priv->sgt_cache); 505434ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 505534ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 505634ff6846SIoana Radulescu 50575d8dccf8SIoana Ciornei dpaa2_eth_del_ch_napi(priv); 5058095174daSRobert-Ionut Alexa dpaa2_eth_free_dpbps(priv); 50595d8dccf8SIoana Ciornei dpaa2_eth_free_dpio(priv); 50605d8dccf8SIoana Ciornei dpaa2_eth_free_dpni(priv); 5061c4680c97SRadu Bulie if (priv->onestep_reg_base) 5062c4680c97SRadu Bulie iounmap(priv->onestep_reg_base); 506334ff6846SIoana Radulescu 506434ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 506534ff6846SIoana Radulescu 5066f4a8adbfSDongliang Mu destroy_workqueue(priv->dpaa2_ptp_wq); 5067f4a8adbfSDongliang Mu 506834ff6846SIoana Radulescu dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 506934ff6846SIoana Radulescu 50709b5a3332SPavel Skripkin free_netdev(net_dev); 50719b5a3332SPavel Skripkin 507234ff6846SIoana Radulescu return 0; 507334ff6846SIoana Radulescu } 507434ff6846SIoana Radulescu 507534ff6846SIoana Radulescu static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 507634ff6846SIoana Radulescu { 507734ff6846SIoana Radulescu .vendor = FSL_MC_VENDOR_FREESCALE, 507834ff6846SIoana Radulescu .obj_type = "dpni", 507934ff6846SIoana Radulescu }, 508034ff6846SIoana Radulescu { .vendor = 0x0 } 508134ff6846SIoana Radulescu }; 508234ff6846SIoana Radulescu MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 508334ff6846SIoana Radulescu 508434ff6846SIoana Radulescu static struct fsl_mc_driver dpaa2_eth_driver = { 508534ff6846SIoana Radulescu .driver = { 508634ff6846SIoana Radulescu .name = KBUILD_MODNAME, 508734ff6846SIoana Radulescu .owner = THIS_MODULE, 508834ff6846SIoana Radulescu }, 508934ff6846SIoana Radulescu .probe = dpaa2_eth_probe, 509034ff6846SIoana Radulescu .remove = dpaa2_eth_remove, 509134ff6846SIoana Radulescu .match_id_table = dpaa2_eth_match_id_table 509234ff6846SIoana Radulescu }; 509334ff6846SIoana Radulescu 5094091a19eaSIoana Radulescu static int __init dpaa2_eth_driver_init(void) 5095091a19eaSIoana Radulescu { 5096091a19eaSIoana Radulescu int err; 5097091a19eaSIoana Radulescu 5098091a19eaSIoana Radulescu dpaa2_eth_dbg_init(); 5099091a19eaSIoana Radulescu err = fsl_mc_driver_register(&dpaa2_eth_driver); 5100091a19eaSIoana Radulescu if (err) { 5101091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 5102091a19eaSIoana Radulescu return err; 5103091a19eaSIoana Radulescu } 5104091a19eaSIoana Radulescu 5105091a19eaSIoana Radulescu return 0; 5106091a19eaSIoana Radulescu } 5107091a19eaSIoana Radulescu 5108091a19eaSIoana Radulescu static void __exit dpaa2_eth_driver_exit(void) 5109091a19eaSIoana Radulescu { 5110091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 5111091a19eaSIoana Radulescu fsl_mc_driver_unregister(&dpaa2_eth_driver); 5112091a19eaSIoana Radulescu } 5113091a19eaSIoana Radulescu 5114091a19eaSIoana Radulescu module_init(dpaa2_eth_driver_init); 5115091a19eaSIoana Radulescu module_exit(dpaa2_eth_driver_exit); 5116