134ff6846SIoana Radulescu // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 234ff6846SIoana Radulescu /* Copyright 2014-2016 Freescale Semiconductor Inc. 348c0481eSIoana Ciornei * Copyright 2016-2020 NXP 434ff6846SIoana Radulescu */ 534ff6846SIoana Radulescu #include <linux/init.h> 634ff6846SIoana Radulescu #include <linux/module.h> 734ff6846SIoana Radulescu #include <linux/platform_device.h> 834ff6846SIoana Radulescu #include <linux/etherdevice.h> 934ff6846SIoana Radulescu #include <linux/of_net.h> 1034ff6846SIoana Radulescu #include <linux/interrupt.h> 1134ff6846SIoana Radulescu #include <linux/msi.h> 1234ff6846SIoana Radulescu #include <linux/kthread.h> 1334ff6846SIoana Radulescu #include <linux/iommu.h> 1434ff6846SIoana Radulescu #include <linux/fsl/mc.h> 157e273a8eSIoana Ciocoi Radulescu #include <linux/bpf.h> 167e273a8eSIoana Ciocoi Radulescu #include <linux/bpf_trace.h> 17d21c784cSYangbo Lu #include <linux/fsl/ptp_qoriq.h> 18c5521189SYangbo Lu #include <linux/ptp_classify.h> 193657cdafSIoana Ciornei #include <net/pkt_cls.h> 2034ff6846SIoana Radulescu #include <net/sock.h> 2134ff6846SIoana Radulescu 2234ff6846SIoana Radulescu #include "dpaa2-eth.h" 2334ff6846SIoana Radulescu 2434ff6846SIoana Radulescu /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 2534ff6846SIoana Radulescu * using trace events only need to #include <trace/events/sched.h> 2634ff6846SIoana Radulescu */ 2734ff6846SIoana Radulescu #define CREATE_TRACE_POINTS 2834ff6846SIoana Radulescu #include "dpaa2-eth-trace.h" 2934ff6846SIoana Radulescu 3034ff6846SIoana Radulescu MODULE_LICENSE("Dual BSD/GPL"); 3134ff6846SIoana Radulescu MODULE_AUTHOR("Freescale Semiconductor, Inc"); 3234ff6846SIoana Radulescu MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 3334ff6846SIoana Radulescu 34d21c784cSYangbo Lu struct ptp_qoriq *dpaa2_ptp; 35d21c784cSYangbo Lu EXPORT_SYMBOL(dpaa2_ptp); 36d21c784cSYangbo Lu 3734ff6846SIoana Radulescu static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 3834ff6846SIoana Radulescu dma_addr_t iova_addr) 3934ff6846SIoana Radulescu { 4034ff6846SIoana Radulescu phys_addr_t phys_addr; 4134ff6846SIoana Radulescu 4234ff6846SIoana Radulescu phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 4334ff6846SIoana Radulescu 4434ff6846SIoana Radulescu return phys_to_virt(phys_addr); 4534ff6846SIoana Radulescu } 4634ff6846SIoana Radulescu 475d8dccf8SIoana Ciornei static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv, 4834ff6846SIoana Radulescu u32 fd_status, 4934ff6846SIoana Radulescu struct sk_buff *skb) 5034ff6846SIoana Radulescu { 5134ff6846SIoana Radulescu skb_checksum_none_assert(skb); 5234ff6846SIoana Radulescu 5334ff6846SIoana Radulescu /* HW checksum validation is disabled, nothing to do here */ 5434ff6846SIoana Radulescu if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 5534ff6846SIoana Radulescu return; 5634ff6846SIoana Radulescu 5734ff6846SIoana Radulescu /* Read checksum validation bits */ 5834ff6846SIoana Radulescu if (!((fd_status & DPAA2_FAS_L3CV) && 5934ff6846SIoana Radulescu (fd_status & DPAA2_FAS_L4CV))) 6034ff6846SIoana Radulescu return; 6134ff6846SIoana Radulescu 6234ff6846SIoana Radulescu /* Inform the stack there's no need to compute L3/L4 csum anymore */ 6334ff6846SIoana Radulescu skb->ip_summed = CHECKSUM_UNNECESSARY; 6434ff6846SIoana Radulescu } 6534ff6846SIoana Radulescu 6634ff6846SIoana Radulescu /* Free a received FD. 6734ff6846SIoana Radulescu * Not to be used for Tx conf FDs or on any other paths. 6834ff6846SIoana Radulescu */ 695d8dccf8SIoana Ciornei static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, 7034ff6846SIoana Radulescu const struct dpaa2_fd *fd, 7134ff6846SIoana Radulescu void *vaddr) 7234ff6846SIoana Radulescu { 7334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 7434ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 7534ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 7634ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 7734ff6846SIoana Radulescu void *sg_vaddr; 7834ff6846SIoana Radulescu int i; 7934ff6846SIoana Radulescu 8034ff6846SIoana Radulescu /* If single buffer frame, just free the data buffer */ 8134ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) 8234ff6846SIoana Radulescu goto free_buf; 8334ff6846SIoana Radulescu else if (fd_format != dpaa2_fd_sg) 8434ff6846SIoana Radulescu /* We don't support any other format */ 8534ff6846SIoana Radulescu return; 8634ff6846SIoana Radulescu 8734ff6846SIoana Radulescu /* For S/G frames, we first need to free all SG entries 8834ff6846SIoana Radulescu * except the first one, which was taken care of already 8934ff6846SIoana Radulescu */ 9034ff6846SIoana Radulescu sgt = vaddr + dpaa2_fd_get_offset(fd); 9134ff6846SIoana Radulescu for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 9234ff6846SIoana Radulescu addr = dpaa2_sg_get_addr(&sgt[i]); 9334ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 94efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 9518c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 9634ff6846SIoana Radulescu 9727c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 9834ff6846SIoana Radulescu if (dpaa2_sg_is_final(&sgt[i])) 9934ff6846SIoana Radulescu break; 10034ff6846SIoana Radulescu } 10134ff6846SIoana Radulescu 10234ff6846SIoana Radulescu free_buf: 10327c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 10434ff6846SIoana Radulescu } 10534ff6846SIoana Radulescu 10634ff6846SIoana Radulescu /* Build a linear skb based on a single-buffer frame descriptor */ 1075d8dccf8SIoana Ciornei static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, 10834ff6846SIoana Radulescu const struct dpaa2_fd *fd, 10934ff6846SIoana Radulescu void *fd_vaddr) 11034ff6846SIoana Radulescu { 11134ff6846SIoana Radulescu struct sk_buff *skb = NULL; 11234ff6846SIoana Radulescu u16 fd_offset = dpaa2_fd_get_offset(fd); 11334ff6846SIoana Radulescu u32 fd_length = dpaa2_fd_get_len(fd); 11434ff6846SIoana Radulescu 11534ff6846SIoana Radulescu ch->buf_count--; 11634ff6846SIoana Radulescu 11727c87486SIoana Ciocoi Radulescu skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 11834ff6846SIoana Radulescu if (unlikely(!skb)) 11934ff6846SIoana Radulescu return NULL; 12034ff6846SIoana Radulescu 12134ff6846SIoana Radulescu skb_reserve(skb, fd_offset); 12234ff6846SIoana Radulescu skb_put(skb, fd_length); 12334ff6846SIoana Radulescu 12434ff6846SIoana Radulescu return skb; 12534ff6846SIoana Radulescu } 12634ff6846SIoana Radulescu 12734ff6846SIoana Radulescu /* Build a non linear (fragmented) skb based on a S/G table */ 1285d8dccf8SIoana Ciornei static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, 12934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 13034ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt) 13134ff6846SIoana Radulescu { 13234ff6846SIoana Radulescu struct sk_buff *skb = NULL; 13334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 13434ff6846SIoana Radulescu void *sg_vaddr; 13534ff6846SIoana Radulescu dma_addr_t sg_addr; 13634ff6846SIoana Radulescu u16 sg_offset; 13734ff6846SIoana Radulescu u32 sg_length; 13834ff6846SIoana Radulescu struct page *page, *head_page; 13934ff6846SIoana Radulescu int page_offset; 14034ff6846SIoana Radulescu int i; 14134ff6846SIoana Radulescu 14234ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 14334ff6846SIoana Radulescu struct dpaa2_sg_entry *sge = &sgt[i]; 14434ff6846SIoana Radulescu 14534ff6846SIoana Radulescu /* NOTE: We only support SG entries in dpaa2_sg_single format, 14634ff6846SIoana Radulescu * but this is the only format we may receive from HW anyway 14734ff6846SIoana Radulescu */ 14834ff6846SIoana Radulescu 14934ff6846SIoana Radulescu /* Get the address and length from the S/G entry */ 15034ff6846SIoana Radulescu sg_addr = dpaa2_sg_get_addr(sge); 15134ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 152efa6a7d0SIoana Ciornei dma_unmap_page(dev, sg_addr, priv->rx_buf_size, 15318c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 15434ff6846SIoana Radulescu 15534ff6846SIoana Radulescu sg_length = dpaa2_sg_get_len(sge); 15634ff6846SIoana Radulescu 15734ff6846SIoana Radulescu if (i == 0) { 15834ff6846SIoana Radulescu /* We build the skb around the first data buffer */ 15927c87486SIoana Ciocoi Radulescu skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 16034ff6846SIoana Radulescu if (unlikely(!skb)) { 16134ff6846SIoana Radulescu /* Free the first SG entry now, since we already 16234ff6846SIoana Radulescu * unmapped it and obtained the virtual address 16334ff6846SIoana Radulescu */ 16427c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 16534ff6846SIoana Radulescu 16634ff6846SIoana Radulescu /* We still need to subtract the buffers used 16734ff6846SIoana Radulescu * by this FD from our software counter 16834ff6846SIoana Radulescu */ 16934ff6846SIoana Radulescu while (!dpaa2_sg_is_final(&sgt[i]) && 17034ff6846SIoana Radulescu i < DPAA2_ETH_MAX_SG_ENTRIES) 17134ff6846SIoana Radulescu i++; 17234ff6846SIoana Radulescu break; 17334ff6846SIoana Radulescu } 17434ff6846SIoana Radulescu 17534ff6846SIoana Radulescu sg_offset = dpaa2_sg_get_offset(sge); 17634ff6846SIoana Radulescu skb_reserve(skb, sg_offset); 17734ff6846SIoana Radulescu skb_put(skb, sg_length); 17834ff6846SIoana Radulescu } else { 17934ff6846SIoana Radulescu /* Rest of the data buffers are stored as skb frags */ 18034ff6846SIoana Radulescu page = virt_to_page(sg_vaddr); 18134ff6846SIoana Radulescu head_page = virt_to_head_page(sg_vaddr); 18234ff6846SIoana Radulescu 18334ff6846SIoana Radulescu /* Offset in page (which may be compound). 18434ff6846SIoana Radulescu * Data in subsequent SG entries is stored from the 18534ff6846SIoana Radulescu * beginning of the buffer, so we don't need to add the 18634ff6846SIoana Radulescu * sg_offset. 18734ff6846SIoana Radulescu */ 18834ff6846SIoana Radulescu page_offset = ((unsigned long)sg_vaddr & 18934ff6846SIoana Radulescu (PAGE_SIZE - 1)) + 19034ff6846SIoana Radulescu (page_address(page) - page_address(head_page)); 19134ff6846SIoana Radulescu 19234ff6846SIoana Radulescu skb_add_rx_frag(skb, i - 1, head_page, page_offset, 193efa6a7d0SIoana Ciornei sg_length, priv->rx_buf_size); 19434ff6846SIoana Radulescu } 19534ff6846SIoana Radulescu 19634ff6846SIoana Radulescu if (dpaa2_sg_is_final(sge)) 19734ff6846SIoana Radulescu break; 19834ff6846SIoana Radulescu } 19934ff6846SIoana Radulescu 20034ff6846SIoana Radulescu WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 20134ff6846SIoana Radulescu 20234ff6846SIoana Radulescu /* Count all data buffers + SG table buffer */ 20334ff6846SIoana Radulescu ch->buf_count -= i + 2; 20434ff6846SIoana Radulescu 20534ff6846SIoana Radulescu return skb; 20634ff6846SIoana Radulescu } 20734ff6846SIoana Radulescu 208569375fbSIoana Ciocoi Radulescu /* Free buffers acquired from the buffer pool or which were meant to 209569375fbSIoana Ciocoi Radulescu * be released in the pool 210569375fbSIoana Ciocoi Radulescu */ 2115d8dccf8SIoana Ciornei static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, 2125d8dccf8SIoana Ciornei int count) 213569375fbSIoana Ciocoi Radulescu { 214569375fbSIoana Ciocoi Radulescu struct device *dev = priv->net_dev->dev.parent; 215569375fbSIoana Ciocoi Radulescu void *vaddr; 216569375fbSIoana Ciocoi Radulescu int i; 217569375fbSIoana Ciocoi Radulescu 218569375fbSIoana Ciocoi Radulescu for (i = 0; i < count; i++) { 219569375fbSIoana Ciocoi Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 220efa6a7d0SIoana Ciornei dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, 22118c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 22227c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 223569375fbSIoana Ciocoi Radulescu } 224569375fbSIoana Ciocoi Radulescu } 225569375fbSIoana Ciocoi Radulescu 2265d8dccf8SIoana Ciornei static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv, 2275d39dc21SIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 2285d39dc21SIoana Ciocoi Radulescu dma_addr_t addr) 2295d39dc21SIoana Ciocoi Radulescu { 230ef17bd7cSIoana Radulescu int retries = 0; 2315d39dc21SIoana Ciocoi Radulescu int err; 2325d39dc21SIoana Ciocoi Radulescu 2335d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; 2345d39dc21SIoana Ciocoi Radulescu if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) 2355d39dc21SIoana Ciocoi Radulescu return; 2365d39dc21SIoana Ciocoi Radulescu 2375d39dc21SIoana Ciocoi Radulescu while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, 2385d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_bufs, 239ef17bd7cSIoana Radulescu ch->xdp.drop_cnt)) == -EBUSY) { 240ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 241ef17bd7cSIoana Radulescu break; 2425d39dc21SIoana Ciocoi Radulescu cpu_relax(); 243ef17bd7cSIoana Radulescu } 2445d39dc21SIoana Ciocoi Radulescu 2455d39dc21SIoana Ciocoi Radulescu if (err) { 2465d8dccf8SIoana Ciornei dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); 2475d39dc21SIoana Ciocoi Radulescu ch->buf_count -= ch->xdp.drop_cnt; 2485d39dc21SIoana Ciocoi Radulescu } 2495d39dc21SIoana Ciocoi Radulescu 2505d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_cnt = 0; 2515d39dc21SIoana Ciocoi Radulescu } 2525d39dc21SIoana Ciocoi Radulescu 25338c440b2SIoana Ciornei static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, 25438c440b2SIoana Ciornei struct dpaa2_eth_fq *fq, 25538c440b2SIoana Ciornei struct dpaa2_eth_xdp_fds *xdp_fds) 25638c440b2SIoana Ciornei { 25738c440b2SIoana Ciornei int total_enqueued = 0, retries = 0, enqueued; 25838c440b2SIoana Ciornei struct dpaa2_eth_drv_stats *percpu_extras; 25938c440b2SIoana Ciornei int num_fds, err, max_retries; 26038c440b2SIoana Ciornei struct dpaa2_fd *fds; 26138c440b2SIoana Ciornei 26238c440b2SIoana Ciornei percpu_extras = this_cpu_ptr(priv->percpu_extras); 26338c440b2SIoana Ciornei 26438c440b2SIoana Ciornei /* try to enqueue all the FDs until the max number of retries is hit */ 26538c440b2SIoana Ciornei fds = xdp_fds->fds; 26638c440b2SIoana Ciornei num_fds = xdp_fds->num; 26738c440b2SIoana Ciornei max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; 26838c440b2SIoana Ciornei while (total_enqueued < num_fds && retries < max_retries) { 26938c440b2SIoana Ciornei err = priv->enqueue(priv, fq, &fds[total_enqueued], 27038c440b2SIoana Ciornei 0, num_fds - total_enqueued, &enqueued); 27138c440b2SIoana Ciornei if (err == -EBUSY) { 27238c440b2SIoana Ciornei percpu_extras->tx_portal_busy += ++retries; 27338c440b2SIoana Ciornei continue; 27438c440b2SIoana Ciornei } 27538c440b2SIoana Ciornei total_enqueued += enqueued; 27638c440b2SIoana Ciornei } 27738c440b2SIoana Ciornei xdp_fds->num = 0; 27838c440b2SIoana Ciornei 27938c440b2SIoana Ciornei return total_enqueued; 28038c440b2SIoana Ciornei } 28138c440b2SIoana Ciornei 2825d8dccf8SIoana Ciornei static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, 28374a1c059SIoana Ciornei struct dpaa2_eth_channel *ch, 28474a1c059SIoana Ciornei struct dpaa2_eth_fq *fq) 28574a1c059SIoana Ciornei { 28674a1c059SIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 28774a1c059SIoana Ciornei struct dpaa2_fd *fds; 28874a1c059SIoana Ciornei int enqueued, i; 28974a1c059SIoana Ciornei 29074a1c059SIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 29174a1c059SIoana Ciornei 29274a1c059SIoana Ciornei // enqueue the array of XDP_TX frames 29374a1c059SIoana Ciornei enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); 29474a1c059SIoana Ciornei 29574a1c059SIoana Ciornei /* update statistics */ 29674a1c059SIoana Ciornei percpu_stats->tx_packets += enqueued; 29774a1c059SIoana Ciornei fds = fq->xdp_tx_fds.fds; 29874a1c059SIoana Ciornei for (i = 0; i < enqueued; i++) { 29974a1c059SIoana Ciornei percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 30074a1c059SIoana Ciornei ch->stats.xdp_tx++; 30174a1c059SIoana Ciornei } 30274a1c059SIoana Ciornei for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { 3035d8dccf8SIoana Ciornei dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); 30474a1c059SIoana Ciornei percpu_stats->tx_errors++; 30574a1c059SIoana Ciornei ch->stats.xdp_tx_err++; 30674a1c059SIoana Ciornei } 30774a1c059SIoana Ciornei fq->xdp_tx_fds.num = 0; 30874a1c059SIoana Ciornei } 30974a1c059SIoana Ciornei 3105d8dccf8SIoana Ciornei static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, 31174a1c059SIoana Ciornei struct dpaa2_eth_channel *ch, 31274a1c059SIoana Ciornei struct dpaa2_fd *fd, 31399e43521SIoana Ciocoi Radulescu void *buf_start, u16 queue_id) 31499e43521SIoana Ciocoi Radulescu { 31599e43521SIoana Ciocoi Radulescu struct dpaa2_faead *faead; 31674a1c059SIoana Ciornei struct dpaa2_fd *dest_fd; 31774a1c059SIoana Ciornei struct dpaa2_eth_fq *fq; 31899e43521SIoana Ciocoi Radulescu u32 ctrl, frc; 31999e43521SIoana Ciocoi Radulescu 32099e43521SIoana Ciocoi Radulescu /* Mark the egress frame hardware annotation area as valid */ 32199e43521SIoana Ciocoi Radulescu frc = dpaa2_fd_get_frc(fd); 32299e43521SIoana Ciocoi Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 32399e43521SIoana Ciocoi Radulescu dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); 32499e43521SIoana Ciocoi Radulescu 32599e43521SIoana Ciocoi Radulescu /* Instruct hardware to release the FD buffer directly into 32699e43521SIoana Ciocoi Radulescu * the buffer pool once transmission is completed, instead of 32799e43521SIoana Ciocoi Radulescu * sending a Tx confirmation frame to us 32899e43521SIoana Ciocoi Radulescu */ 32999e43521SIoana Ciocoi Radulescu ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; 33099e43521SIoana Ciocoi Radulescu faead = dpaa2_get_faead(buf_start, false); 33199e43521SIoana Ciocoi Radulescu faead->ctrl = cpu_to_le32(ctrl); 33299e43521SIoana Ciocoi Radulescu faead->conf_fqid = 0; 33399e43521SIoana Ciocoi Radulescu 33499e43521SIoana Ciocoi Radulescu fq = &priv->fq[queue_id]; 33574a1c059SIoana Ciornei dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; 33674a1c059SIoana Ciornei memcpy(dest_fd, fd, sizeof(*dest_fd)); 33799e43521SIoana Ciocoi Radulescu 33874a1c059SIoana Ciornei if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) 33974a1c059SIoana Ciornei return; 34074a1c059SIoana Ciornei 3415d8dccf8SIoana Ciornei dpaa2_eth_xdp_tx_flush(priv, ch, fq); 34299e43521SIoana Ciocoi Radulescu } 34399e43521SIoana Ciocoi Radulescu 3445d8dccf8SIoana Ciornei static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, 3457e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 34699e43521SIoana Ciocoi Radulescu struct dpaa2_eth_fq *rx_fq, 3477e273a8eSIoana Ciocoi Radulescu struct dpaa2_fd *fd, void *vaddr) 3487e273a8eSIoana Ciocoi Radulescu { 3495d39dc21SIoana Ciocoi Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 3507e273a8eSIoana Ciocoi Radulescu struct bpf_prog *xdp_prog; 3517e273a8eSIoana Ciocoi Radulescu struct xdp_buff xdp; 3527e273a8eSIoana Ciocoi Radulescu u32 xdp_act = XDP_PASS; 35399e43521SIoana Ciocoi Radulescu int err; 35499e43521SIoana Ciocoi Radulescu 3557e273a8eSIoana Ciocoi Radulescu rcu_read_lock(); 3567e273a8eSIoana Ciocoi Radulescu 3577e273a8eSIoana Ciocoi Radulescu xdp_prog = READ_ONCE(ch->xdp.prog); 3587e273a8eSIoana Ciocoi Radulescu if (!xdp_prog) 3597e273a8eSIoana Ciocoi Radulescu goto out; 3607e273a8eSIoana Ciocoi Radulescu 3617e273a8eSIoana Ciocoi Radulescu xdp.data = vaddr + dpaa2_fd_get_offset(fd); 3627e273a8eSIoana Ciocoi Radulescu xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); 3637b1eea1aSIoana Ciocoi Radulescu xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; 3647e273a8eSIoana Ciocoi Radulescu xdp_set_data_meta_invalid(&xdp); 365d678be1dSIoana Radulescu xdp.rxq = &ch->xdp_rxq; 3667e273a8eSIoana Ciocoi Radulescu 3674a9b052aSJesper Dangaard Brouer xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE - 3684a9b052aSJesper Dangaard Brouer (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM); 3694a9b052aSJesper Dangaard Brouer 3707e273a8eSIoana Ciocoi Radulescu xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); 3717e273a8eSIoana Ciocoi Radulescu 3727b1eea1aSIoana Ciocoi Radulescu /* xdp.data pointer may have changed */ 3737b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_offset(fd, xdp.data - vaddr); 3747b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); 3757b1eea1aSIoana Ciocoi Radulescu 3767e273a8eSIoana Ciocoi Radulescu switch (xdp_act) { 3777e273a8eSIoana Ciocoi Radulescu case XDP_PASS: 3787e273a8eSIoana Ciocoi Radulescu break; 37999e43521SIoana Ciocoi Radulescu case XDP_TX: 3805d8dccf8SIoana Ciornei dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); 38199e43521SIoana Ciocoi Radulescu break; 3827e273a8eSIoana Ciocoi Radulescu default: 3837e273a8eSIoana Ciocoi Radulescu bpf_warn_invalid_xdp_action(xdp_act); 384df561f66SGustavo A. R. Silva fallthrough; 3857e273a8eSIoana Ciocoi Radulescu case XDP_ABORTED: 3867e273a8eSIoana Ciocoi Radulescu trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 387df561f66SGustavo A. R. Silva fallthrough; 3887e273a8eSIoana Ciocoi Radulescu case XDP_DROP: 3895d8dccf8SIoana Ciornei dpaa2_eth_xdp_release_buf(priv, ch, addr); 390a4a7b762SIoana Ciocoi Radulescu ch->stats.xdp_drop++; 3917e273a8eSIoana Ciocoi Radulescu break; 392d678be1dSIoana Radulescu case XDP_REDIRECT: 393d678be1dSIoana Radulescu dma_unmap_page(priv->net_dev->dev.parent, addr, 394efa6a7d0SIoana Ciornei priv->rx_buf_size, DMA_BIDIRECTIONAL); 395d678be1dSIoana Radulescu ch->buf_count--; 3964a9b052aSJesper Dangaard Brouer 3974a9b052aSJesper Dangaard Brouer /* Allow redirect use of full headroom */ 398d678be1dSIoana Radulescu xdp.data_hard_start = vaddr; 3994a9b052aSJesper Dangaard Brouer xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE; 4004a9b052aSJesper Dangaard Brouer 401d678be1dSIoana Radulescu err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); 402d678be1dSIoana Radulescu if (unlikely(err)) 403d678be1dSIoana Radulescu ch->stats.xdp_drop++; 404d678be1dSIoana Radulescu else 405d678be1dSIoana Radulescu ch->stats.xdp_redirect++; 406d678be1dSIoana Radulescu break; 4077e273a8eSIoana Ciocoi Radulescu } 4087e273a8eSIoana Ciocoi Radulescu 409d678be1dSIoana Radulescu ch->xdp.res |= xdp_act; 4107e273a8eSIoana Ciocoi Radulescu out: 4117e273a8eSIoana Ciocoi Radulescu rcu_read_unlock(); 4127e273a8eSIoana Ciocoi Radulescu return xdp_act; 4137e273a8eSIoana Ciocoi Radulescu } 4147e273a8eSIoana Ciocoi Radulescu 41534ff6846SIoana Radulescu /* Main Rx frame processing routine */ 41634ff6846SIoana Radulescu static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 41734ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 41834ff6846SIoana Radulescu const struct dpaa2_fd *fd, 419dbcdf728SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 42034ff6846SIoana Radulescu { 42134ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 42234ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 42334ff6846SIoana Radulescu void *vaddr; 42434ff6846SIoana Radulescu struct sk_buff *skb; 42534ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 42634ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 42734ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 42834ff6846SIoana Radulescu struct dpaa2_fas *fas; 42934ff6846SIoana Radulescu void *buf_data; 43034ff6846SIoana Radulescu u32 status = 0; 4317e273a8eSIoana Ciocoi Radulescu u32 xdp_act; 43234ff6846SIoana Radulescu 43334ff6846SIoana Radulescu /* Tracing point */ 43434ff6846SIoana Radulescu trace_dpaa2_rx_fd(priv->net_dev, fd); 43534ff6846SIoana Radulescu 43634ff6846SIoana Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 437efa6a7d0SIoana Ciornei dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 43818c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 43934ff6846SIoana Radulescu 44034ff6846SIoana Radulescu fas = dpaa2_get_fas(vaddr, false); 44134ff6846SIoana Radulescu prefetch(fas); 44234ff6846SIoana Radulescu buf_data = vaddr + dpaa2_fd_get_offset(fd); 44334ff6846SIoana Radulescu prefetch(buf_data); 44434ff6846SIoana Radulescu 44534ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 44634ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 44734ff6846SIoana Radulescu 44834ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 4495d8dccf8SIoana Ciornei xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); 4507e273a8eSIoana Ciocoi Radulescu if (xdp_act != XDP_PASS) { 4517e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_packets++; 4527e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 4537e273a8eSIoana Ciocoi Radulescu return; 4547e273a8eSIoana Ciocoi Radulescu } 4557e273a8eSIoana Ciocoi Radulescu 456efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 45718c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 4585d8dccf8SIoana Ciornei skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 45934ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 4607e273a8eSIoana Ciocoi Radulescu WARN_ON(priv->xdp_prog); 4617e273a8eSIoana Ciocoi Radulescu 462efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 46318c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 4645d8dccf8SIoana Ciornei skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); 46527c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 46634ff6846SIoana Radulescu percpu_extras->rx_sg_frames++; 46734ff6846SIoana Radulescu percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 46834ff6846SIoana Radulescu } else { 46934ff6846SIoana Radulescu /* We don't support any other format */ 47034ff6846SIoana Radulescu goto err_frame_format; 47134ff6846SIoana Radulescu } 47234ff6846SIoana Radulescu 47334ff6846SIoana Radulescu if (unlikely(!skb)) 47434ff6846SIoana Radulescu goto err_build_skb; 47534ff6846SIoana Radulescu 47634ff6846SIoana Radulescu prefetch(skb->data); 47734ff6846SIoana Radulescu 47834ff6846SIoana Radulescu /* Get the timestamp value */ 47934ff6846SIoana Radulescu if (priv->rx_tstamp) { 48034ff6846SIoana Radulescu struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 48134ff6846SIoana Radulescu __le64 *ts = dpaa2_get_ts(vaddr, false); 48234ff6846SIoana Radulescu u64 ns; 48334ff6846SIoana Radulescu 48434ff6846SIoana Radulescu memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 48534ff6846SIoana Radulescu 48634ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 48734ff6846SIoana Radulescu shhwtstamps->hwtstamp = ns_to_ktime(ns); 48834ff6846SIoana Radulescu } 48934ff6846SIoana Radulescu 49034ff6846SIoana Radulescu /* Check if we need to validate the L4 csum */ 49134ff6846SIoana Radulescu if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 49234ff6846SIoana Radulescu status = le32_to_cpu(fas->status); 4935d8dccf8SIoana Ciornei dpaa2_eth_validate_rx_csum(priv, status, skb); 49434ff6846SIoana Radulescu } 49534ff6846SIoana Radulescu 49634ff6846SIoana Radulescu skb->protocol = eth_type_trans(skb, priv->net_dev); 497dbcdf728SIoana Ciocoi Radulescu skb_record_rx_queue(skb, fq->flowid); 49834ff6846SIoana Radulescu 49934ff6846SIoana Radulescu percpu_stats->rx_packets++; 50034ff6846SIoana Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 50134ff6846SIoana Radulescu 5020a25d92cSIoana Ciornei list_add_tail(&skb->list, ch->rx_list); 50334ff6846SIoana Radulescu 50434ff6846SIoana Radulescu return; 50534ff6846SIoana Radulescu 50634ff6846SIoana Radulescu err_build_skb: 5075d8dccf8SIoana Ciornei dpaa2_eth_free_rx_fd(priv, fd, vaddr); 50834ff6846SIoana Radulescu err_frame_format: 50934ff6846SIoana Radulescu percpu_stats->rx_dropped++; 51034ff6846SIoana Radulescu } 51134ff6846SIoana Radulescu 512061d631fSIoana Ciornei /* Processing of Rx frames received on the error FQ 513061d631fSIoana Ciornei * We check and print the error bits and then free the frame 514061d631fSIoana Ciornei */ 515061d631fSIoana Ciornei static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, 516061d631fSIoana Ciornei struct dpaa2_eth_channel *ch, 517061d631fSIoana Ciornei const struct dpaa2_fd *fd, 518061d631fSIoana Ciornei struct dpaa2_eth_fq *fq __always_unused) 519061d631fSIoana Ciornei { 520061d631fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 521061d631fSIoana Ciornei dma_addr_t addr = dpaa2_fd_get_addr(fd); 522061d631fSIoana Ciornei u8 fd_format = dpaa2_fd_get_format(fd); 523061d631fSIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 524061d631fSIoana Ciornei struct dpaa2_eth_trap_item *trap_item; 525061d631fSIoana Ciornei struct dpaa2_fapr *fapr; 526061d631fSIoana Ciornei struct sk_buff *skb; 527061d631fSIoana Ciornei void *buf_data; 528061d631fSIoana Ciornei void *vaddr; 529061d631fSIoana Ciornei 530061d631fSIoana Ciornei vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 531061d631fSIoana Ciornei dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 532061d631fSIoana Ciornei DMA_BIDIRECTIONAL); 533061d631fSIoana Ciornei 534061d631fSIoana Ciornei buf_data = vaddr + dpaa2_fd_get_offset(fd); 535061d631fSIoana Ciornei 536061d631fSIoana Ciornei if (fd_format == dpaa2_fd_single) { 537061d631fSIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 538061d631fSIoana Ciornei DMA_BIDIRECTIONAL); 539061d631fSIoana Ciornei skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 540061d631fSIoana Ciornei } else if (fd_format == dpaa2_fd_sg) { 541061d631fSIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 542061d631fSIoana Ciornei DMA_BIDIRECTIONAL); 543061d631fSIoana Ciornei skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); 544061d631fSIoana Ciornei free_pages((unsigned long)vaddr, 0); 545061d631fSIoana Ciornei } else { 546061d631fSIoana Ciornei /* We don't support any other format */ 547061d631fSIoana Ciornei dpaa2_eth_free_rx_fd(priv, fd, vaddr); 548061d631fSIoana Ciornei goto err_frame_format; 549061d631fSIoana Ciornei } 550061d631fSIoana Ciornei 551061d631fSIoana Ciornei fapr = dpaa2_get_fapr(vaddr, false); 552061d631fSIoana Ciornei trap_item = dpaa2_eth_dl_get_trap(priv, fapr); 553061d631fSIoana Ciornei if (trap_item) 554061d631fSIoana Ciornei devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx, 555061d631fSIoana Ciornei &priv->devlink_port, NULL); 556061d631fSIoana Ciornei consume_skb(skb); 557061d631fSIoana Ciornei 558061d631fSIoana Ciornei err_frame_format: 559061d631fSIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 560061d631fSIoana Ciornei percpu_stats->rx_errors++; 561061d631fSIoana Ciornei ch->buf_count--; 562061d631fSIoana Ciornei } 563061d631fSIoana Ciornei 56434ff6846SIoana Radulescu /* Consume all frames pull-dequeued into the store. This is the simplest way to 56534ff6846SIoana Radulescu * make sure we don't accidentally issue another volatile dequeue which would 56634ff6846SIoana Radulescu * overwrite (leak) frames already in the store. 56734ff6846SIoana Radulescu * 56834ff6846SIoana Radulescu * Observance of NAPI budget is not our concern, leaving that to the caller. 56934ff6846SIoana Radulescu */ 5705d8dccf8SIoana Ciornei static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, 571569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq **src) 57234ff6846SIoana Radulescu { 57334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = ch->priv; 57468049a5fSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq = NULL; 57534ff6846SIoana Radulescu struct dpaa2_dq *dq; 57634ff6846SIoana Radulescu const struct dpaa2_fd *fd; 577ef17bd7cSIoana Radulescu int cleaned = 0, retries = 0; 57834ff6846SIoana Radulescu int is_last; 57934ff6846SIoana Radulescu 58034ff6846SIoana Radulescu do { 58134ff6846SIoana Radulescu dq = dpaa2_io_store_next(ch->store, &is_last); 58234ff6846SIoana Radulescu if (unlikely(!dq)) { 58334ff6846SIoana Radulescu /* If we're here, we *must* have placed a 58434ff6846SIoana Radulescu * volatile dequeue comnmand, so keep reading through 58534ff6846SIoana Radulescu * the store until we get some sort of valid response 58634ff6846SIoana Radulescu * token (either a valid frame or an "empty dequeue") 58734ff6846SIoana Radulescu */ 588ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { 589ef17bd7cSIoana Radulescu netdev_err_once(priv->net_dev, 590ef17bd7cSIoana Radulescu "Unable to read a valid dequeue response\n"); 591ef17bd7cSIoana Radulescu return -ETIMEDOUT; 592ef17bd7cSIoana Radulescu } 59334ff6846SIoana Radulescu continue; 59434ff6846SIoana Radulescu } 59534ff6846SIoana Radulescu 59634ff6846SIoana Radulescu fd = dpaa2_dq_fd(dq); 59734ff6846SIoana Radulescu fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 59834ff6846SIoana Radulescu 599dbcdf728SIoana Ciocoi Radulescu fq->consume(priv, ch, fd, fq); 60034ff6846SIoana Radulescu cleaned++; 601ef17bd7cSIoana Radulescu retries = 0; 60234ff6846SIoana Radulescu } while (!is_last); 60334ff6846SIoana Radulescu 60468049a5fSIoana Ciocoi Radulescu if (!cleaned) 60568049a5fSIoana Ciocoi Radulescu return 0; 60668049a5fSIoana Ciocoi Radulescu 60768049a5fSIoana Ciocoi Radulescu fq->stats.frames += cleaned; 608460fd830SIoana Ciornei ch->stats.frames += cleaned; 60968049a5fSIoana Ciocoi Radulescu 61068049a5fSIoana Ciocoi Radulescu /* A dequeue operation only pulls frames from a single queue 611569dac6aSIoana Ciocoi Radulescu * into the store. Return the frame queue as an out param. 61268049a5fSIoana Ciocoi Radulescu */ 613569dac6aSIoana Ciocoi Radulescu if (src) 614569dac6aSIoana Ciocoi Radulescu *src = fq; 61568049a5fSIoana Ciocoi Radulescu 61634ff6846SIoana Radulescu return cleaned; 61734ff6846SIoana Radulescu } 61834ff6846SIoana Radulescu 619c5521189SYangbo Lu static int dpaa2_eth_ptp_parse(struct sk_buff *skb, 620c5521189SYangbo Lu u8 *msgtype, u8 *twostep, u8 *udp, 621c5521189SYangbo Lu u16 *correction_offset, 622c5521189SYangbo Lu u16 *origintimestamp_offset) 62334ff6846SIoana Radulescu { 624c5521189SYangbo Lu unsigned int ptp_class; 625c5521189SYangbo Lu struct ptp_header *hdr; 626c5521189SYangbo Lu unsigned int type; 627c5521189SYangbo Lu u8 *base; 628c5521189SYangbo Lu 629c5521189SYangbo Lu ptp_class = ptp_classify_raw(skb); 630c5521189SYangbo Lu if (ptp_class == PTP_CLASS_NONE) 631c5521189SYangbo Lu return -EINVAL; 632c5521189SYangbo Lu 633c5521189SYangbo Lu hdr = ptp_parse_header(skb, ptp_class); 634c5521189SYangbo Lu if (!hdr) 635c5521189SYangbo Lu return -EINVAL; 636c5521189SYangbo Lu 637c5521189SYangbo Lu *msgtype = ptp_get_msgtype(hdr, ptp_class); 638c5521189SYangbo Lu *twostep = hdr->flag_field[0] & 0x2; 639c5521189SYangbo Lu 640c5521189SYangbo Lu type = ptp_class & PTP_CLASS_PMASK; 641c5521189SYangbo Lu if (type == PTP_CLASS_IPV4 || 642c5521189SYangbo Lu type == PTP_CLASS_IPV6) 643c5521189SYangbo Lu *udp = 1; 644c5521189SYangbo Lu else 645c5521189SYangbo Lu *udp = 0; 646c5521189SYangbo Lu 647c5521189SYangbo Lu base = skb_mac_header(skb); 648c5521189SYangbo Lu *correction_offset = (u8 *)&hdr->correction - base; 649c5521189SYangbo Lu *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 650c5521189SYangbo Lu 651c5521189SYangbo Lu return 0; 652c5521189SYangbo Lu } 653c5521189SYangbo Lu 654c5521189SYangbo Lu /* Configure the egress frame annotation for timestamp update */ 655c5521189SYangbo Lu static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, 656c5521189SYangbo Lu struct dpaa2_fd *fd, 657c5521189SYangbo Lu void *buf_start, 658c5521189SYangbo Lu struct sk_buff *skb) 659c5521189SYangbo Lu { 660c5521189SYangbo Lu struct ptp_tstamp origin_timestamp; 661c5521189SYangbo Lu struct dpni_single_step_cfg cfg; 662c5521189SYangbo Lu u8 msgtype, twostep, udp; 66334ff6846SIoana Radulescu struct dpaa2_faead *faead; 664c5521189SYangbo Lu struct dpaa2_fas *fas; 665c5521189SYangbo Lu struct timespec64 ts; 666c5521189SYangbo Lu u16 offset1, offset2; 66734ff6846SIoana Radulescu u32 ctrl, frc; 668c5521189SYangbo Lu __le64 *ns; 669c5521189SYangbo Lu u8 *data; 67034ff6846SIoana Radulescu 67134ff6846SIoana Radulescu /* Mark the egress frame annotation area as valid */ 67234ff6846SIoana Radulescu frc = dpaa2_fd_get_frc(fd); 67334ff6846SIoana Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 67434ff6846SIoana Radulescu 67534ff6846SIoana Radulescu /* Set hardware annotation size */ 67634ff6846SIoana Radulescu ctrl = dpaa2_fd_get_ctrl(fd); 67734ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 67834ff6846SIoana Radulescu 67934ff6846SIoana Radulescu /* enable UPD (update prepanded data) bit in FAEAD field of 68034ff6846SIoana Radulescu * hardware frame annotation area 68134ff6846SIoana Radulescu */ 68234ff6846SIoana Radulescu ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 68334ff6846SIoana Radulescu faead = dpaa2_get_faead(buf_start, true); 68434ff6846SIoana Radulescu faead->ctrl = cpu_to_le32(ctrl); 685c5521189SYangbo Lu 686c5521189SYangbo Lu if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 687c5521189SYangbo Lu if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, 688c5521189SYangbo Lu &offset1, &offset2) || 6896b6817c5SChristian Eggers msgtype != PTP_MSGTYPE_SYNC || twostep) { 690c5521189SYangbo Lu WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 691c5521189SYangbo Lu return; 692c5521189SYangbo Lu } 693c5521189SYangbo Lu 694c5521189SYangbo Lu /* Mark the frame annotation status as valid */ 695c5521189SYangbo Lu frc = dpaa2_fd_get_frc(fd); 696c5521189SYangbo Lu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV); 697c5521189SYangbo Lu 698c5521189SYangbo Lu /* Mark the PTP flag for one step timestamping */ 699c5521189SYangbo Lu fas = dpaa2_get_fas(buf_start, true); 700c5521189SYangbo Lu fas->status = cpu_to_le32(DPAA2_FAS_PTP); 701c5521189SYangbo Lu 702c5521189SYangbo Lu dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts); 703c5521189SYangbo Lu ns = dpaa2_get_ts(buf_start, true); 704c5521189SYangbo Lu *ns = cpu_to_le64(timespec64_to_ns(&ts) / 705c5521189SYangbo Lu DPAA2_PTP_CLK_PERIOD_NS); 706c5521189SYangbo Lu 707c5521189SYangbo Lu /* Update current time to PTP message originTimestamp field */ 708c5521189SYangbo Lu ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns)); 709c5521189SYangbo Lu data = skb_mac_header(skb); 710c5521189SYangbo Lu *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb); 711c5521189SYangbo Lu *(__be32 *)(data + offset2 + 2) = 712c5521189SYangbo Lu htonl(origin_timestamp.sec_lsb); 713c5521189SYangbo Lu *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec); 714c5521189SYangbo Lu 715c5521189SYangbo Lu cfg.en = 1; 716c5521189SYangbo Lu cfg.ch_update = udp; 717c5521189SYangbo Lu cfg.offset = offset1; 718c5521189SYangbo Lu cfg.peer_delay = 0; 719c5521189SYangbo Lu 720c5521189SYangbo Lu if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, 721c5521189SYangbo Lu &cfg)) 722c5521189SYangbo Lu WARN_ONCE(1, "Failed to set single step register"); 723c5521189SYangbo Lu } 72434ff6846SIoana Radulescu } 72534ff6846SIoana Radulescu 72634ff6846SIoana Radulescu /* Create a frame descriptor based on a fragmented skb */ 7275d8dccf8SIoana Ciornei static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, 72834ff6846SIoana Radulescu struct sk_buff *skb, 72964a965deSYangbo Lu struct dpaa2_fd *fd, 73064a965deSYangbo Lu void **swa_addr) 73134ff6846SIoana Radulescu { 73234ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 73334ff6846SIoana Radulescu void *sgt_buf = NULL; 73434ff6846SIoana Radulescu dma_addr_t addr; 73534ff6846SIoana Radulescu int nr_frags = skb_shinfo(skb)->nr_frags; 73634ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 73734ff6846SIoana Radulescu int i, err; 73834ff6846SIoana Radulescu int sgt_buf_size; 73934ff6846SIoana Radulescu struct scatterlist *scl, *crt_scl; 74034ff6846SIoana Radulescu int num_sg; 74134ff6846SIoana Radulescu int num_dma_bufs; 74234ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 74334ff6846SIoana Radulescu 74434ff6846SIoana Radulescu /* Create and map scatterlist. 74534ff6846SIoana Radulescu * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 74634ff6846SIoana Radulescu * to go beyond nr_frags+1. 74734ff6846SIoana Radulescu * Note: We don't support chained scatterlists 74834ff6846SIoana Radulescu */ 74934ff6846SIoana Radulescu if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 75034ff6846SIoana Radulescu return -EINVAL; 75134ff6846SIoana Radulescu 752d4ceb8deSJulia Lawall scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 75334ff6846SIoana Radulescu if (unlikely(!scl)) 75434ff6846SIoana Radulescu return -ENOMEM; 75534ff6846SIoana Radulescu 75634ff6846SIoana Radulescu sg_init_table(scl, nr_frags + 1); 75734ff6846SIoana Radulescu num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 75837fbbddaSIoana Ciornei if (unlikely(num_sg < 0)) { 75937fbbddaSIoana Ciornei err = -ENOMEM; 76037fbbddaSIoana Ciornei goto dma_map_sg_failed; 76137fbbddaSIoana Ciornei } 76234ff6846SIoana Radulescu num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 76334ff6846SIoana Radulescu if (unlikely(!num_dma_bufs)) { 76434ff6846SIoana Radulescu err = -ENOMEM; 76534ff6846SIoana Radulescu goto dma_map_sg_failed; 76634ff6846SIoana Radulescu } 76734ff6846SIoana Radulescu 76834ff6846SIoana Radulescu /* Prepare the HW SGT structure */ 76934ff6846SIoana Radulescu sgt_buf_size = priv->tx_data_offset + 77034ff6846SIoana Radulescu sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 77190bc6d4bSSebastian Andrzej Siewior sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); 77234ff6846SIoana Radulescu if (unlikely(!sgt_buf)) { 77334ff6846SIoana Radulescu err = -ENOMEM; 77434ff6846SIoana Radulescu goto sgt_buf_alloc_failed; 77534ff6846SIoana Radulescu } 77634ff6846SIoana Radulescu sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 77734ff6846SIoana Radulescu memset(sgt_buf, 0, sgt_buf_size); 77834ff6846SIoana Radulescu 77934ff6846SIoana Radulescu sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 78034ff6846SIoana Radulescu 78134ff6846SIoana Radulescu /* Fill in the HW SGT structure. 78234ff6846SIoana Radulescu * 78334ff6846SIoana Radulescu * sgt_buf is zeroed out, so the following fields are implicit 78434ff6846SIoana Radulescu * in all sgt entries: 78534ff6846SIoana Radulescu * - offset is 0 78634ff6846SIoana Radulescu * - format is 'dpaa2_sg_single' 78734ff6846SIoana Radulescu */ 78834ff6846SIoana Radulescu for_each_sg(scl, crt_scl, num_dma_bufs, i) { 78934ff6846SIoana Radulescu dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 79034ff6846SIoana Radulescu dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 79134ff6846SIoana Radulescu } 79234ff6846SIoana Radulescu dpaa2_sg_set_final(&sgt[i - 1], true); 79334ff6846SIoana Radulescu 79434ff6846SIoana Radulescu /* Store the skb backpointer in the SGT buffer. 79534ff6846SIoana Radulescu * Fit the scatterlist and the number of buffers alongside the 79634ff6846SIoana Radulescu * skb backpointer in the software annotation area. We'll need 79734ff6846SIoana Radulescu * all of them on Tx Conf. 79834ff6846SIoana Radulescu */ 79964a965deSYangbo Lu *swa_addr = (void *)sgt_buf; 80034ff6846SIoana Radulescu swa = (struct dpaa2_eth_swa *)sgt_buf; 801e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SG; 802e3fdf6baSIoana Radulescu swa->sg.skb = skb; 803e3fdf6baSIoana Radulescu swa->sg.scl = scl; 804e3fdf6baSIoana Radulescu swa->sg.num_sg = num_sg; 805e3fdf6baSIoana Radulescu swa->sg.sgt_size = sgt_buf_size; 80634ff6846SIoana Radulescu 80734ff6846SIoana Radulescu /* Separately map the SGT buffer */ 80834ff6846SIoana Radulescu addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 80934ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) { 81034ff6846SIoana Radulescu err = -ENOMEM; 81134ff6846SIoana Radulescu goto dma_map_single_failed; 81234ff6846SIoana Radulescu } 81334ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, priv->tx_data_offset); 81434ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_sg); 81534ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 81634ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 817b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 81834ff6846SIoana Radulescu 81934ff6846SIoana Radulescu return 0; 82034ff6846SIoana Radulescu 82134ff6846SIoana Radulescu dma_map_single_failed: 82234ff6846SIoana Radulescu skb_free_frag(sgt_buf); 82334ff6846SIoana Radulescu sgt_buf_alloc_failed: 82434ff6846SIoana Radulescu dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 82534ff6846SIoana Radulescu dma_map_sg_failed: 82634ff6846SIoana Radulescu kfree(scl); 82734ff6846SIoana Radulescu return err; 82834ff6846SIoana Radulescu } 82934ff6846SIoana Radulescu 830d70446eeSIoana Ciornei /* Create a SG frame descriptor based on a linear skb. 831d70446eeSIoana Ciornei * 832d70446eeSIoana Ciornei * This function is used on the Tx path when the skb headroom is not large 833d70446eeSIoana Ciornei * enough for the HW requirements, thus instead of realloc-ing the skb we 834d70446eeSIoana Ciornei * create a SG frame descriptor with only one entry. 835d70446eeSIoana Ciornei */ 8365d8dccf8SIoana Ciornei static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, 837d70446eeSIoana Ciornei struct sk_buff *skb, 83864a965deSYangbo Lu struct dpaa2_fd *fd, 83964a965deSYangbo Lu void **swa_addr) 840d70446eeSIoana Ciornei { 841d70446eeSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 842d70446eeSIoana Ciornei struct dpaa2_eth_sgt_cache *sgt_cache; 843d70446eeSIoana Ciornei struct dpaa2_sg_entry *sgt; 844d70446eeSIoana Ciornei struct dpaa2_eth_swa *swa; 845d70446eeSIoana Ciornei dma_addr_t addr, sgt_addr; 846d70446eeSIoana Ciornei void *sgt_buf = NULL; 847d70446eeSIoana Ciornei int sgt_buf_size; 848d70446eeSIoana Ciornei int err; 849d70446eeSIoana Ciornei 850d70446eeSIoana Ciornei /* Prepare the HW SGT structure */ 851d70446eeSIoana Ciornei sgt_cache = this_cpu_ptr(priv->sgt_cache); 852d70446eeSIoana Ciornei sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); 853d70446eeSIoana Ciornei 854d70446eeSIoana Ciornei if (sgt_cache->count == 0) 855d70446eeSIoana Ciornei sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, 856d70446eeSIoana Ciornei GFP_ATOMIC); 857d70446eeSIoana Ciornei else 858d70446eeSIoana Ciornei sgt_buf = sgt_cache->buf[--sgt_cache->count]; 859d70446eeSIoana Ciornei if (unlikely(!sgt_buf)) 860d70446eeSIoana Ciornei return -ENOMEM; 861d70446eeSIoana Ciornei 862d70446eeSIoana Ciornei sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 863d70446eeSIoana Ciornei sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 864d70446eeSIoana Ciornei 865d70446eeSIoana Ciornei addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); 866d70446eeSIoana Ciornei if (unlikely(dma_mapping_error(dev, addr))) { 867d70446eeSIoana Ciornei err = -ENOMEM; 868d70446eeSIoana Ciornei goto data_map_failed; 869d70446eeSIoana Ciornei } 870d70446eeSIoana Ciornei 871d70446eeSIoana Ciornei /* Fill in the HW SGT structure */ 872d70446eeSIoana Ciornei dpaa2_sg_set_addr(sgt, addr); 873d70446eeSIoana Ciornei dpaa2_sg_set_len(sgt, skb->len); 874d70446eeSIoana Ciornei dpaa2_sg_set_final(sgt, true); 875d70446eeSIoana Ciornei 876d70446eeSIoana Ciornei /* Store the skb backpointer in the SGT buffer */ 87764a965deSYangbo Lu *swa_addr = (void *)sgt_buf; 878d70446eeSIoana Ciornei swa = (struct dpaa2_eth_swa *)sgt_buf; 879d70446eeSIoana Ciornei swa->type = DPAA2_ETH_SWA_SINGLE; 880d70446eeSIoana Ciornei swa->single.skb = skb; 88154a57d1cSIoana Ciornei swa->single.sgt_size = sgt_buf_size; 882d70446eeSIoana Ciornei 883d70446eeSIoana Ciornei /* Separately map the SGT buffer */ 884d70446eeSIoana Ciornei sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 885d70446eeSIoana Ciornei if (unlikely(dma_mapping_error(dev, sgt_addr))) { 886d70446eeSIoana Ciornei err = -ENOMEM; 887d70446eeSIoana Ciornei goto sgt_map_failed; 888d70446eeSIoana Ciornei } 889d70446eeSIoana Ciornei 890d70446eeSIoana Ciornei dpaa2_fd_set_offset(fd, priv->tx_data_offset); 891d70446eeSIoana Ciornei dpaa2_fd_set_format(fd, dpaa2_fd_sg); 892d70446eeSIoana Ciornei dpaa2_fd_set_addr(fd, sgt_addr); 893d70446eeSIoana Ciornei dpaa2_fd_set_len(fd, skb->len); 894d70446eeSIoana Ciornei dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 895d70446eeSIoana Ciornei 896d70446eeSIoana Ciornei return 0; 897d70446eeSIoana Ciornei 898d70446eeSIoana Ciornei sgt_map_failed: 899d70446eeSIoana Ciornei dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); 900d70446eeSIoana Ciornei data_map_failed: 901d70446eeSIoana Ciornei if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) 902d70446eeSIoana Ciornei kfree(sgt_buf); 903d70446eeSIoana Ciornei else 904d70446eeSIoana Ciornei sgt_cache->buf[sgt_cache->count++] = sgt_buf; 905d70446eeSIoana Ciornei 906d70446eeSIoana Ciornei return err; 907d70446eeSIoana Ciornei } 908d70446eeSIoana Ciornei 90934ff6846SIoana Radulescu /* Create a frame descriptor based on a linear skb */ 9105d8dccf8SIoana Ciornei static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, 91134ff6846SIoana Radulescu struct sk_buff *skb, 91264a965deSYangbo Lu struct dpaa2_fd *fd, 91364a965deSYangbo Lu void **swa_addr) 91434ff6846SIoana Radulescu { 91534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 91634ff6846SIoana Radulescu u8 *buffer_start, *aligned_start; 917e3fdf6baSIoana Radulescu struct dpaa2_eth_swa *swa; 91834ff6846SIoana Radulescu dma_addr_t addr; 91934ff6846SIoana Radulescu 9201cf773bdSYangbo Lu buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); 92134ff6846SIoana Radulescu 92234ff6846SIoana Radulescu /* If there's enough room to align the FD address, do it. 92334ff6846SIoana Radulescu * It will help hardware optimize accesses. 92434ff6846SIoana Radulescu */ 92534ff6846SIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 92634ff6846SIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 92734ff6846SIoana Radulescu if (aligned_start >= skb->head) 92834ff6846SIoana Radulescu buffer_start = aligned_start; 92934ff6846SIoana Radulescu 93034ff6846SIoana Radulescu /* Store a backpointer to the skb at the beginning of the buffer 93134ff6846SIoana Radulescu * (in the private data area) such that we can release it 93234ff6846SIoana Radulescu * on Tx confirm 93334ff6846SIoana Radulescu */ 93464a965deSYangbo Lu *swa_addr = (void *)buffer_start; 935e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 936e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SINGLE; 937e3fdf6baSIoana Radulescu swa->single.skb = skb; 93834ff6846SIoana Radulescu 93934ff6846SIoana Radulescu addr = dma_map_single(dev, buffer_start, 94034ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 94134ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 94234ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 94334ff6846SIoana Radulescu return -ENOMEM; 94434ff6846SIoana Radulescu 94534ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 94634ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 94734ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 94834ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_single); 949b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 95034ff6846SIoana Radulescu 95134ff6846SIoana Radulescu return 0; 95234ff6846SIoana Radulescu } 95334ff6846SIoana Radulescu 95434ff6846SIoana Radulescu /* FD freeing routine on the Tx path 95534ff6846SIoana Radulescu * 95634ff6846SIoana Radulescu * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 95734ff6846SIoana Radulescu * back-pointed to is also freed. 95834ff6846SIoana Radulescu * This can be called either from dpaa2_eth_tx_conf() or on the error path of 95934ff6846SIoana Radulescu * dpaa2_eth_tx(). 96034ff6846SIoana Radulescu */ 961c5521189SYangbo Lu static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, 962d678be1dSIoana Radulescu struct dpaa2_eth_fq *fq, 9630723a3aeSIoana Ciocoi Radulescu const struct dpaa2_fd *fd, bool in_napi) 96434ff6846SIoana Radulescu { 96534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 966d70446eeSIoana Ciornei dma_addr_t fd_addr, sg_addr; 967d678be1dSIoana Radulescu struct sk_buff *skb = NULL; 96834ff6846SIoana Radulescu unsigned char *buffer_start; 96934ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 97034ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 971d678be1dSIoana Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 97234ff6846SIoana Radulescu 973d70446eeSIoana Ciornei struct dpaa2_eth_sgt_cache *sgt_cache; 974d70446eeSIoana Ciornei struct dpaa2_sg_entry *sgt; 975d70446eeSIoana Ciornei 97634ff6846SIoana Radulescu fd_addr = dpaa2_fd_get_addr(fd); 977e3fdf6baSIoana Radulescu buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 978e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 97934ff6846SIoana Radulescu 98034ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 981d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_SINGLE) { 982e3fdf6baSIoana Radulescu skb = swa->single.skb; 983d678be1dSIoana Radulescu /* Accessing the skb buffer is safe before dma unmap, 984d678be1dSIoana Radulescu * because we didn't map the actual skb shell. 98534ff6846SIoana Radulescu */ 98634ff6846SIoana Radulescu dma_unmap_single(dev, fd_addr, 98734ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 98834ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 989d678be1dSIoana Radulescu } else { 990d678be1dSIoana Radulescu WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); 991d678be1dSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, 992d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 993d678be1dSIoana Radulescu } 99434ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 995d70446eeSIoana Ciornei if (swa->type == DPAA2_ETH_SWA_SG) { 996e3fdf6baSIoana Radulescu skb = swa->sg.skb; 99734ff6846SIoana Radulescu 99834ff6846SIoana Radulescu /* Unmap the scatterlist */ 999e3fdf6baSIoana Radulescu dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, 1000e3fdf6baSIoana Radulescu DMA_BIDIRECTIONAL); 1001e3fdf6baSIoana Radulescu kfree(swa->sg.scl); 100234ff6846SIoana Radulescu 100334ff6846SIoana Radulescu /* Unmap the SGT buffer */ 1004e3fdf6baSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, 100534ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 100634ff6846SIoana Radulescu } else { 1007d70446eeSIoana Ciornei skb = swa->single.skb; 1008d70446eeSIoana Ciornei 1009d70446eeSIoana Ciornei /* Unmap the SGT Buffer */ 1010d70446eeSIoana Ciornei dma_unmap_single(dev, fd_addr, swa->single.sgt_size, 1011d70446eeSIoana Ciornei DMA_BIDIRECTIONAL); 1012d70446eeSIoana Ciornei 1013d70446eeSIoana Ciornei sgt = (struct dpaa2_sg_entry *)(buffer_start + 1014d70446eeSIoana Ciornei priv->tx_data_offset); 1015d70446eeSIoana Ciornei sg_addr = dpaa2_sg_get_addr(sgt); 1016d70446eeSIoana Ciornei dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); 1017d70446eeSIoana Ciornei } 1018d70446eeSIoana Ciornei } else { 101934ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "Invalid FD format\n"); 102034ff6846SIoana Radulescu return; 102134ff6846SIoana Radulescu } 102234ff6846SIoana Radulescu 1023d678be1dSIoana Radulescu if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { 1024d678be1dSIoana Radulescu fq->dq_frames++; 1025d678be1dSIoana Radulescu fq->dq_bytes += fd_len; 1026d678be1dSIoana Radulescu } 1027d678be1dSIoana Radulescu 1028d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_XDP) { 1029d678be1dSIoana Radulescu xdp_return_frame(swa->xdp.xdpf); 1030d678be1dSIoana Radulescu return; 1031d678be1dSIoana Radulescu } 1032d678be1dSIoana Radulescu 103334ff6846SIoana Radulescu /* Get the timestamp value */ 10341cf773bdSYangbo Lu if (skb->cb[0] == TX_TSTAMP) { 103534ff6846SIoana Radulescu struct skb_shared_hwtstamps shhwtstamps; 1036e3fdf6baSIoana Radulescu __le64 *ts = dpaa2_get_ts(buffer_start, true); 103734ff6846SIoana Radulescu u64 ns; 103834ff6846SIoana Radulescu 103934ff6846SIoana Radulescu memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 104034ff6846SIoana Radulescu 104134ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 104234ff6846SIoana Radulescu shhwtstamps.hwtstamp = ns_to_ktime(ns); 104334ff6846SIoana Radulescu skb_tstamp_tx(skb, &shhwtstamps); 1044c5521189SYangbo Lu } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 1045c5521189SYangbo Lu mutex_unlock(&priv->onestep_tstamp_lock); 104634ff6846SIoana Radulescu } 104734ff6846SIoana Radulescu 104834ff6846SIoana Radulescu /* Free SGT buffer allocated on tx */ 1049d70446eeSIoana Ciornei if (fd_format != dpaa2_fd_single) { 1050d70446eeSIoana Ciornei sgt_cache = this_cpu_ptr(priv->sgt_cache); 1051d70446eeSIoana Ciornei if (swa->type == DPAA2_ETH_SWA_SG) { 1052e3fdf6baSIoana Radulescu skb_free_frag(buffer_start); 1053d70446eeSIoana Ciornei } else { 1054d70446eeSIoana Ciornei if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) 1055d70446eeSIoana Ciornei kfree(buffer_start); 1056d70446eeSIoana Ciornei else 1057d70446eeSIoana Ciornei sgt_cache->buf[sgt_cache->count++] = buffer_start; 1058d70446eeSIoana Ciornei } 1059d70446eeSIoana Ciornei } 106034ff6846SIoana Radulescu 106134ff6846SIoana Radulescu /* Move on with skb release */ 10620723a3aeSIoana Ciocoi Radulescu napi_consume_skb(skb, in_napi); 106334ff6846SIoana Radulescu } 106434ff6846SIoana Radulescu 1065c5521189SYangbo Lu static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, 1066c5521189SYangbo Lu struct net_device *net_dev) 106734ff6846SIoana Radulescu { 106834ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 106934ff6846SIoana Radulescu struct dpaa2_fd fd; 107034ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 107134ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 107234ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 1073569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 107434ff6846SIoana Radulescu u16 queue_mapping; 107534ff6846SIoana Radulescu unsigned int needed_headroom; 1076569dac6aSIoana Ciocoi Radulescu u32 fd_len; 1077ab1e6de2SIoana Radulescu u8 prio = 0; 107834ff6846SIoana Radulescu int err, i; 107964a965deSYangbo Lu void *swa; 108034ff6846SIoana Radulescu 108134ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 108234ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 108334ff6846SIoana Radulescu 10841cf773bdSYangbo Lu needed_headroom = dpaa2_eth_needed_headroom(skb); 108534ff6846SIoana Radulescu 108634ff6846SIoana Radulescu /* We'll be holding a back-reference to the skb until Tx Confirmation; 108734ff6846SIoana Radulescu * we don't want that overwritten by a concurrent Tx with a cloned skb. 108834ff6846SIoana Radulescu */ 108934ff6846SIoana Radulescu skb = skb_unshare(skb, GFP_ATOMIC); 109034ff6846SIoana Radulescu if (unlikely(!skb)) { 109134ff6846SIoana Radulescu /* skb_unshare() has already freed the skb */ 109234ff6846SIoana Radulescu percpu_stats->tx_dropped++; 109334ff6846SIoana Radulescu return NETDEV_TX_OK; 109434ff6846SIoana Radulescu } 109534ff6846SIoana Radulescu 109634ff6846SIoana Radulescu /* Setup the FD fields */ 109734ff6846SIoana Radulescu memset(&fd, 0, sizeof(fd)); 109834ff6846SIoana Radulescu 109934ff6846SIoana Radulescu if (skb_is_nonlinear(skb)) { 110064a965deSYangbo Lu err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa); 110134ff6846SIoana Radulescu percpu_extras->tx_sg_frames++; 110234ff6846SIoana Radulescu percpu_extras->tx_sg_bytes += skb->len; 1103d70446eeSIoana Ciornei } else if (skb_headroom(skb) < needed_headroom) { 110464a965deSYangbo Lu err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa); 1105d70446eeSIoana Ciornei percpu_extras->tx_sg_frames++; 1106d70446eeSIoana Ciornei percpu_extras->tx_sg_bytes += skb->len; 11074c96c0acSIoana Ciornei percpu_extras->tx_converted_sg_frames++; 11084c96c0acSIoana Ciornei percpu_extras->tx_converted_sg_bytes += skb->len; 110934ff6846SIoana Radulescu } else { 111064a965deSYangbo Lu err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa); 111134ff6846SIoana Radulescu } 111234ff6846SIoana Radulescu 111334ff6846SIoana Radulescu if (unlikely(err)) { 111434ff6846SIoana Radulescu percpu_stats->tx_dropped++; 111534ff6846SIoana Radulescu goto err_build_fd; 111634ff6846SIoana Radulescu } 111734ff6846SIoana Radulescu 1118c5521189SYangbo Lu if (skb->cb[0]) 1119c5521189SYangbo Lu dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb); 112064a965deSYangbo Lu 112134ff6846SIoana Radulescu /* Tracing point */ 112234ff6846SIoana Radulescu trace_dpaa2_tx_fd(net_dev, &fd); 112334ff6846SIoana Radulescu 112434ff6846SIoana Radulescu /* TxConf FQ selection relies on queue id from the stack. 112534ff6846SIoana Radulescu * In case of a forwarded frame from another DPNI interface, we choose 112634ff6846SIoana Radulescu * a queue affined to the same core that processed the Rx frame 112734ff6846SIoana Radulescu */ 112834ff6846SIoana Radulescu queue_mapping = skb_get_queue_mapping(skb); 1129ab1e6de2SIoana Radulescu 1130ab1e6de2SIoana Radulescu if (net_dev->num_tc) { 1131ab1e6de2SIoana Radulescu prio = netdev_txq_to_tc(net_dev, queue_mapping); 1132ab1e6de2SIoana Radulescu /* Hardware interprets priority level 0 as being the highest, 1133ab1e6de2SIoana Radulescu * so we need to do a reverse mapping to the netdev tc index 1134ab1e6de2SIoana Radulescu */ 1135ab1e6de2SIoana Radulescu prio = net_dev->num_tc - prio - 1; 1136ab1e6de2SIoana Radulescu /* We have only one FQ array entry for all Tx hardware queues 1137ab1e6de2SIoana Radulescu * with the same flow id (but different priority levels) 1138ab1e6de2SIoana Radulescu */ 1139ab1e6de2SIoana Radulescu queue_mapping %= dpaa2_eth_queue_count(priv); 1140ab1e6de2SIoana Radulescu } 114134ff6846SIoana Radulescu fq = &priv->fq[queue_mapping]; 11428c838f53SIoana Ciornei 11438c838f53SIoana Ciornei fd_len = dpaa2_fd_get_len(&fd); 11448c838f53SIoana Ciornei nq = netdev_get_tx_queue(net_dev, queue_mapping); 11458c838f53SIoana Ciornei netdev_tx_sent_queue(nq, fd_len); 11468c838f53SIoana Ciornei 11478c838f53SIoana Ciornei /* Everything that happens after this enqueues might race with 11488c838f53SIoana Ciornei * the Tx confirmation callback for this frame 11498c838f53SIoana Ciornei */ 115034ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 11516ff80447SIoana Ciornei err = priv->enqueue(priv, fq, &fd, prio, 1, NULL); 115234ff6846SIoana Radulescu if (err != -EBUSY) 115334ff6846SIoana Radulescu break; 115434ff6846SIoana Radulescu } 115534ff6846SIoana Radulescu percpu_extras->tx_portal_busy += i; 115634ff6846SIoana Radulescu if (unlikely(err < 0)) { 115734ff6846SIoana Radulescu percpu_stats->tx_errors++; 115834ff6846SIoana Radulescu /* Clean up everything, including freeing the skb */ 11595d8dccf8SIoana Ciornei dpaa2_eth_free_tx_fd(priv, fq, &fd, false); 11608c838f53SIoana Ciornei netdev_tx_completed_queue(nq, 1, fd_len); 116134ff6846SIoana Radulescu } else { 116234ff6846SIoana Radulescu percpu_stats->tx_packets++; 1163569dac6aSIoana Ciocoi Radulescu percpu_stats->tx_bytes += fd_len; 116434ff6846SIoana Radulescu } 116534ff6846SIoana Radulescu 116634ff6846SIoana Radulescu return NETDEV_TX_OK; 116734ff6846SIoana Radulescu 116834ff6846SIoana Radulescu err_build_fd: 116934ff6846SIoana Radulescu dev_kfree_skb(skb); 117034ff6846SIoana Radulescu 117134ff6846SIoana Radulescu return NETDEV_TX_OK; 117234ff6846SIoana Radulescu } 117334ff6846SIoana Radulescu 1174c5521189SYangbo Lu static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work) 1175c5521189SYangbo Lu { 1176c5521189SYangbo Lu struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv, 1177c5521189SYangbo Lu tx_onestep_tstamp); 1178c5521189SYangbo Lu struct sk_buff *skb; 1179c5521189SYangbo Lu 1180c5521189SYangbo Lu while (true) { 1181c5521189SYangbo Lu skb = skb_dequeue(&priv->tx_skbs); 1182c5521189SYangbo Lu if (!skb) 1183c5521189SYangbo Lu return; 1184c5521189SYangbo Lu 1185c5521189SYangbo Lu /* Lock just before TX one-step timestamping packet, 1186c5521189SYangbo Lu * and release the lock in dpaa2_eth_free_tx_fd when 1187c5521189SYangbo Lu * confirm the packet has been sent on hardware, or 1188c5521189SYangbo Lu * when clean up during transmit failure. 1189c5521189SYangbo Lu */ 1190c5521189SYangbo Lu mutex_lock(&priv->onestep_tstamp_lock); 1191c5521189SYangbo Lu __dpaa2_eth_tx(skb, priv->net_dev); 1192c5521189SYangbo Lu } 1193c5521189SYangbo Lu } 1194c5521189SYangbo Lu 1195c5521189SYangbo Lu static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 1196c5521189SYangbo Lu { 1197c5521189SYangbo Lu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1198c5521189SYangbo Lu u8 msgtype, twostep, udp; 1199c5521189SYangbo Lu u16 offset1, offset2; 1200c5521189SYangbo Lu 1201c5521189SYangbo Lu /* Utilize skb->cb[0] for timestamping request per skb */ 1202c5521189SYangbo Lu skb->cb[0] = 0; 1203c5521189SYangbo Lu 1204c5521189SYangbo Lu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) { 1205c5521189SYangbo Lu if (priv->tx_tstamp_type == HWTSTAMP_TX_ON) 1206c5521189SYangbo Lu skb->cb[0] = TX_TSTAMP; 1207c5521189SYangbo Lu else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) 1208c5521189SYangbo Lu skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC; 1209c5521189SYangbo Lu } 1210c5521189SYangbo Lu 1211c5521189SYangbo Lu /* TX for one-step timestamping PTP Sync packet */ 1212c5521189SYangbo Lu if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 1213c5521189SYangbo Lu if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, 1214c5521189SYangbo Lu &offset1, &offset2)) 12156b6817c5SChristian Eggers if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) { 1216c5521189SYangbo Lu skb_queue_tail(&priv->tx_skbs, skb); 1217c5521189SYangbo Lu queue_work(priv->dpaa2_ptp_wq, 1218c5521189SYangbo Lu &priv->tx_onestep_tstamp); 1219c5521189SYangbo Lu return NETDEV_TX_OK; 1220c5521189SYangbo Lu } 1221c5521189SYangbo Lu /* Use two-step timestamping if not one-step timestamping 1222c5521189SYangbo Lu * PTP Sync packet 1223c5521189SYangbo Lu */ 1224c5521189SYangbo Lu skb->cb[0] = TX_TSTAMP; 1225c5521189SYangbo Lu } 1226c5521189SYangbo Lu 1227c5521189SYangbo Lu /* TX for other packets */ 1228c5521189SYangbo Lu return __dpaa2_eth_tx(skb, net_dev); 1229c5521189SYangbo Lu } 1230c5521189SYangbo Lu 123134ff6846SIoana Radulescu /* Tx confirmation frame processing routine */ 123234ff6846SIoana Radulescu static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 1233b00c898cSIoana Ciornei struct dpaa2_eth_channel *ch __always_unused, 123434ff6846SIoana Radulescu const struct dpaa2_fd *fd, 1235569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 123634ff6846SIoana Radulescu { 123734ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 123834ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 1239569dac6aSIoana Ciocoi Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 124034ff6846SIoana Radulescu u32 fd_errors; 124134ff6846SIoana Radulescu 124234ff6846SIoana Radulescu /* Tracing point */ 124334ff6846SIoana Radulescu trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 124434ff6846SIoana Radulescu 124534ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 124634ff6846SIoana Radulescu percpu_extras->tx_conf_frames++; 1247569dac6aSIoana Ciocoi Radulescu percpu_extras->tx_conf_bytes += fd_len; 1248569dac6aSIoana Ciocoi Radulescu 124934ff6846SIoana Radulescu /* Check frame errors in the FD field */ 125034ff6846SIoana Radulescu fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 12515d8dccf8SIoana Ciornei dpaa2_eth_free_tx_fd(priv, fq, fd, true); 125234ff6846SIoana Radulescu 125334ff6846SIoana Radulescu if (likely(!fd_errors)) 125434ff6846SIoana Radulescu return; 125534ff6846SIoana Radulescu 125634ff6846SIoana Radulescu if (net_ratelimit()) 125734ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 125834ff6846SIoana Radulescu fd_errors); 125934ff6846SIoana Radulescu 126034ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 126134ff6846SIoana Radulescu /* Tx-conf logically pertains to the egress path. */ 126234ff6846SIoana Radulescu percpu_stats->tx_errors++; 126334ff6846SIoana Radulescu } 126434ff6846SIoana Radulescu 12655d8dccf8SIoana Ciornei static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 126634ff6846SIoana Radulescu { 126734ff6846SIoana Radulescu int err; 126834ff6846SIoana Radulescu 126934ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 127034ff6846SIoana Radulescu DPNI_OFF_RX_L3_CSUM, enable); 127134ff6846SIoana Radulescu if (err) { 127234ff6846SIoana Radulescu netdev_err(priv->net_dev, 127334ff6846SIoana Radulescu "dpni_set_offload(RX_L3_CSUM) failed\n"); 127434ff6846SIoana Radulescu return err; 127534ff6846SIoana Radulescu } 127634ff6846SIoana Radulescu 127734ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 127834ff6846SIoana Radulescu DPNI_OFF_RX_L4_CSUM, enable); 127934ff6846SIoana Radulescu if (err) { 128034ff6846SIoana Radulescu netdev_err(priv->net_dev, 128134ff6846SIoana Radulescu "dpni_set_offload(RX_L4_CSUM) failed\n"); 128234ff6846SIoana Radulescu return err; 128334ff6846SIoana Radulescu } 128434ff6846SIoana Radulescu 128534ff6846SIoana Radulescu return 0; 128634ff6846SIoana Radulescu } 128734ff6846SIoana Radulescu 12885d8dccf8SIoana Ciornei static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 128934ff6846SIoana Radulescu { 129034ff6846SIoana Radulescu int err; 129134ff6846SIoana Radulescu 129234ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 129334ff6846SIoana Radulescu DPNI_OFF_TX_L3_CSUM, enable); 129434ff6846SIoana Radulescu if (err) { 129534ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 129634ff6846SIoana Radulescu return err; 129734ff6846SIoana Radulescu } 129834ff6846SIoana Radulescu 129934ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 130034ff6846SIoana Radulescu DPNI_OFF_TX_L4_CSUM, enable); 130134ff6846SIoana Radulescu if (err) { 130234ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 130334ff6846SIoana Radulescu return err; 130434ff6846SIoana Radulescu } 130534ff6846SIoana Radulescu 130634ff6846SIoana Radulescu return 0; 130734ff6846SIoana Radulescu } 130834ff6846SIoana Radulescu 130934ff6846SIoana Radulescu /* Perform a single release command to add buffers 131034ff6846SIoana Radulescu * to the specified buffer pool 131134ff6846SIoana Radulescu */ 13125d8dccf8SIoana Ciornei static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, 131334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, u16 bpid) 131434ff6846SIoana Radulescu { 131534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 131634ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 131727c87486SIoana Ciocoi Radulescu struct page *page; 131834ff6846SIoana Radulescu dma_addr_t addr; 1319ef17bd7cSIoana Radulescu int retries = 0; 132034ff6846SIoana Radulescu int i, err; 132134ff6846SIoana Radulescu 132234ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 132334ff6846SIoana Radulescu /* Allocate buffer visible to WRIOP + skb shared info + 132434ff6846SIoana Radulescu * alignment padding 132534ff6846SIoana Radulescu */ 132627c87486SIoana Ciocoi Radulescu /* allocate one page for each Rx buffer. WRIOP sees 132727c87486SIoana Ciocoi Radulescu * the entire page except for a tailroom reserved for 132827c87486SIoana Ciocoi Radulescu * skb shared info 132927c87486SIoana Ciocoi Radulescu */ 133027c87486SIoana Ciocoi Radulescu page = dev_alloc_pages(0); 133127c87486SIoana Ciocoi Radulescu if (!page) 133234ff6846SIoana Radulescu goto err_alloc; 133334ff6846SIoana Radulescu 1334efa6a7d0SIoana Ciornei addr = dma_map_page(dev, page, 0, priv->rx_buf_size, 133518c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 133634ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 133734ff6846SIoana Radulescu goto err_map; 133834ff6846SIoana Radulescu 133934ff6846SIoana Radulescu buf_array[i] = addr; 134034ff6846SIoana Radulescu 134134ff6846SIoana Radulescu /* tracing point */ 134234ff6846SIoana Radulescu trace_dpaa2_eth_buf_seed(priv->net_dev, 134327c87486SIoana Ciocoi Radulescu page, DPAA2_ETH_RX_BUF_RAW_SIZE, 1344efa6a7d0SIoana Ciornei addr, priv->rx_buf_size, 134534ff6846SIoana Radulescu bpid); 134634ff6846SIoana Radulescu } 134734ff6846SIoana Radulescu 134834ff6846SIoana Radulescu release_bufs: 134934ff6846SIoana Radulescu /* In case the portal is busy, retry until successful */ 135034ff6846SIoana Radulescu while ((err = dpaa2_io_service_release(ch->dpio, bpid, 1351ef17bd7cSIoana Radulescu buf_array, i)) == -EBUSY) { 1352ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 1353ef17bd7cSIoana Radulescu break; 135434ff6846SIoana Radulescu cpu_relax(); 1355ef17bd7cSIoana Radulescu } 135634ff6846SIoana Radulescu 135734ff6846SIoana Radulescu /* If release command failed, clean up and bail out; 135834ff6846SIoana Radulescu * not much else we can do about it 135934ff6846SIoana Radulescu */ 136034ff6846SIoana Radulescu if (err) { 13615d8dccf8SIoana Ciornei dpaa2_eth_free_bufs(priv, buf_array, i); 136234ff6846SIoana Radulescu return 0; 136334ff6846SIoana Radulescu } 136434ff6846SIoana Radulescu 136534ff6846SIoana Radulescu return i; 136634ff6846SIoana Radulescu 136734ff6846SIoana Radulescu err_map: 136827c87486SIoana Ciocoi Radulescu __free_pages(page, 0); 136934ff6846SIoana Radulescu err_alloc: 137034ff6846SIoana Radulescu /* If we managed to allocate at least some buffers, 137134ff6846SIoana Radulescu * release them to hardware 137234ff6846SIoana Radulescu */ 137334ff6846SIoana Radulescu if (i) 137434ff6846SIoana Radulescu goto release_bufs; 137534ff6846SIoana Radulescu 137634ff6846SIoana Radulescu return 0; 137734ff6846SIoana Radulescu } 137834ff6846SIoana Radulescu 13795d8dccf8SIoana Ciornei static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 138034ff6846SIoana Radulescu { 138134ff6846SIoana Radulescu int i, j; 138234ff6846SIoana Radulescu int new_count; 138334ff6846SIoana Radulescu 138434ff6846SIoana Radulescu for (j = 0; j < priv->num_channels; j++) { 138534ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_NUM_BUFS; 138634ff6846SIoana Radulescu i += DPAA2_ETH_BUFS_PER_CMD) { 13875d8dccf8SIoana Ciornei new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); 138834ff6846SIoana Radulescu priv->channel[j]->buf_count += new_count; 138934ff6846SIoana Radulescu 139034ff6846SIoana Radulescu if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 139134ff6846SIoana Radulescu return -ENOMEM; 139234ff6846SIoana Radulescu } 139334ff6846SIoana Radulescu } 139434ff6846SIoana Radulescu } 139534ff6846SIoana Radulescu 139634ff6846SIoana Radulescu return 0; 139734ff6846SIoana Radulescu } 139834ff6846SIoana Radulescu 1399d0ea5cbdSJesse Brandeburg /* 140034ff6846SIoana Radulescu * Drain the specified number of buffers from the DPNI's private buffer pool. 140134ff6846SIoana Radulescu * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 140234ff6846SIoana Radulescu */ 14035d8dccf8SIoana Ciornei static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) 140434ff6846SIoana Radulescu { 140534ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 1406ef17bd7cSIoana Radulescu int retries = 0; 140734ff6846SIoana Radulescu int ret; 140834ff6846SIoana Radulescu 140934ff6846SIoana Radulescu do { 141034ff6846SIoana Radulescu ret = dpaa2_io_service_acquire(NULL, priv->bpid, 141134ff6846SIoana Radulescu buf_array, count); 141234ff6846SIoana Radulescu if (ret < 0) { 1413ef17bd7cSIoana Radulescu if (ret == -EBUSY && 14140e5ad75bSIoana Ciornei retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) 1415ef17bd7cSIoana Radulescu continue; 141634ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 141734ff6846SIoana Radulescu return; 141834ff6846SIoana Radulescu } 14195d8dccf8SIoana Ciornei dpaa2_eth_free_bufs(priv, buf_array, ret); 1420ef17bd7cSIoana Radulescu retries = 0; 142134ff6846SIoana Radulescu } while (ret); 142234ff6846SIoana Radulescu } 142334ff6846SIoana Radulescu 14245d8dccf8SIoana Ciornei static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) 142534ff6846SIoana Radulescu { 142634ff6846SIoana Radulescu int i; 142734ff6846SIoana Radulescu 14285d8dccf8SIoana Ciornei dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 14295d8dccf8SIoana Ciornei dpaa2_eth_drain_bufs(priv, 1); 143034ff6846SIoana Radulescu 143134ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 143234ff6846SIoana Radulescu priv->channel[i]->buf_count = 0; 143334ff6846SIoana Radulescu } 143434ff6846SIoana Radulescu 143534ff6846SIoana Radulescu /* Function is called from softirq context only, so we don't need to guard 143634ff6846SIoana Radulescu * the access to percpu count 143734ff6846SIoana Radulescu */ 14385d8dccf8SIoana Ciornei static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, 143934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 144034ff6846SIoana Radulescu u16 bpid) 144134ff6846SIoana Radulescu { 144234ff6846SIoana Radulescu int new_count; 144334ff6846SIoana Radulescu 144434ff6846SIoana Radulescu if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 144534ff6846SIoana Radulescu return 0; 144634ff6846SIoana Radulescu 144734ff6846SIoana Radulescu do { 14485d8dccf8SIoana Ciornei new_count = dpaa2_eth_add_bufs(priv, ch, bpid); 144934ff6846SIoana Radulescu if (unlikely(!new_count)) { 145034ff6846SIoana Radulescu /* Out of memory; abort for now, we'll try later on */ 145134ff6846SIoana Radulescu break; 145234ff6846SIoana Radulescu } 145334ff6846SIoana Radulescu ch->buf_count += new_count; 145434ff6846SIoana Radulescu } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 145534ff6846SIoana Radulescu 145634ff6846SIoana Radulescu if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 145734ff6846SIoana Radulescu return -ENOMEM; 145834ff6846SIoana Radulescu 145934ff6846SIoana Radulescu return 0; 146034ff6846SIoana Radulescu } 146134ff6846SIoana Radulescu 1462d70446eeSIoana Ciornei static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) 1463d70446eeSIoana Ciornei { 1464d70446eeSIoana Ciornei struct dpaa2_eth_sgt_cache *sgt_cache; 1465d70446eeSIoana Ciornei u16 count; 1466d70446eeSIoana Ciornei int k, i; 1467d70446eeSIoana Ciornei 14680fe665d4SIoana Ciornei for_each_possible_cpu(k) { 1469d70446eeSIoana Ciornei sgt_cache = per_cpu_ptr(priv->sgt_cache, k); 1470d70446eeSIoana Ciornei count = sgt_cache->count; 1471d70446eeSIoana Ciornei 1472d70446eeSIoana Ciornei for (i = 0; i < count; i++) 1473d70446eeSIoana Ciornei kfree(sgt_cache->buf[i]); 1474d70446eeSIoana Ciornei sgt_cache->count = 0; 1475d70446eeSIoana Ciornei } 1476d70446eeSIoana Ciornei } 1477d70446eeSIoana Ciornei 14785d8dccf8SIoana Ciornei static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) 147934ff6846SIoana Radulescu { 148034ff6846SIoana Radulescu int err; 148134ff6846SIoana Radulescu int dequeues = -1; 148234ff6846SIoana Radulescu 148334ff6846SIoana Radulescu /* Retry while portal is busy */ 148434ff6846SIoana Radulescu do { 148534ff6846SIoana Radulescu err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 148634ff6846SIoana Radulescu ch->store); 148734ff6846SIoana Radulescu dequeues++; 148834ff6846SIoana Radulescu cpu_relax(); 1489ef17bd7cSIoana Radulescu } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); 149034ff6846SIoana Radulescu 149134ff6846SIoana Radulescu ch->stats.dequeue_portal_busy += dequeues; 149234ff6846SIoana Radulescu if (unlikely(err)) 149334ff6846SIoana Radulescu ch->stats.pull_err++; 149434ff6846SIoana Radulescu 149534ff6846SIoana Radulescu return err; 149634ff6846SIoana Radulescu } 149734ff6846SIoana Radulescu 149834ff6846SIoana Radulescu /* NAPI poll routine 149934ff6846SIoana Radulescu * 150034ff6846SIoana Radulescu * Frames are dequeued from the QMan channel associated with this NAPI context. 150134ff6846SIoana Radulescu * Rx, Tx confirmation and (if configured) Rx error frames all count 150234ff6846SIoana Radulescu * towards the NAPI budget. 150334ff6846SIoana Radulescu */ 150434ff6846SIoana Radulescu static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 150534ff6846SIoana Radulescu { 150634ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 150734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 150868049a5fSIoana Ciocoi Radulescu int rx_cleaned = 0, txconf_cleaned = 0; 1509569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, *txc_fq = NULL; 1510569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 1511569dac6aSIoana Ciocoi Radulescu int store_cleaned, work_done; 15120a25d92cSIoana Ciornei struct list_head rx_list; 1513ef17bd7cSIoana Radulescu int retries = 0; 151474a1c059SIoana Ciornei u16 flowid; 151534ff6846SIoana Radulescu int err; 151634ff6846SIoana Radulescu 151734ff6846SIoana Radulescu ch = container_of(napi, struct dpaa2_eth_channel, napi); 1518d678be1dSIoana Radulescu ch->xdp.res = 0; 151934ff6846SIoana Radulescu priv = ch->priv; 152034ff6846SIoana Radulescu 15210a25d92cSIoana Ciornei INIT_LIST_HEAD(&rx_list); 15220a25d92cSIoana Ciornei ch->rx_list = &rx_list; 15230a25d92cSIoana Ciornei 152468049a5fSIoana Ciocoi Radulescu do { 15255d8dccf8SIoana Ciornei err = dpaa2_eth_pull_channel(ch); 152634ff6846SIoana Radulescu if (unlikely(err)) 152734ff6846SIoana Radulescu break; 152834ff6846SIoana Radulescu 152934ff6846SIoana Radulescu /* Refill pool if appropriate */ 15305d8dccf8SIoana Ciornei dpaa2_eth_refill_pool(priv, ch, priv->bpid); 153134ff6846SIoana Radulescu 15325d8dccf8SIoana Ciornei store_cleaned = dpaa2_eth_consume_frames(ch, &fq); 1533ef17bd7cSIoana Radulescu if (store_cleaned <= 0) 1534569dac6aSIoana Ciocoi Radulescu break; 1535569dac6aSIoana Ciocoi Radulescu if (fq->type == DPAA2_RX_FQ) { 153668049a5fSIoana Ciocoi Radulescu rx_cleaned += store_cleaned; 153774a1c059SIoana Ciornei flowid = fq->flowid; 1538569dac6aSIoana Ciocoi Radulescu } else { 153968049a5fSIoana Ciocoi Radulescu txconf_cleaned += store_cleaned; 1540569dac6aSIoana Ciocoi Radulescu /* We have a single Tx conf FQ on this channel */ 1541569dac6aSIoana Ciocoi Radulescu txc_fq = fq; 1542569dac6aSIoana Ciocoi Radulescu } 154334ff6846SIoana Radulescu 154468049a5fSIoana Ciocoi Radulescu /* If we either consumed the whole NAPI budget with Rx frames 154568049a5fSIoana Ciocoi Radulescu * or we reached the Tx confirmations threshold, we're done. 154634ff6846SIoana Radulescu */ 154768049a5fSIoana Ciocoi Radulescu if (rx_cleaned >= budget || 1548569dac6aSIoana Ciocoi Radulescu txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { 1549569dac6aSIoana Ciocoi Radulescu work_done = budget; 1550569dac6aSIoana Ciocoi Radulescu goto out; 1551569dac6aSIoana Ciocoi Radulescu } 155268049a5fSIoana Ciocoi Radulescu } while (store_cleaned); 155334ff6846SIoana Radulescu 155468049a5fSIoana Ciocoi Radulescu /* We didn't consume the entire budget, so finish napi and 155568049a5fSIoana Ciocoi Radulescu * re-enable data availability notifications 155668049a5fSIoana Ciocoi Radulescu */ 155768049a5fSIoana Ciocoi Radulescu napi_complete_done(napi, rx_cleaned); 155834ff6846SIoana Radulescu do { 155934ff6846SIoana Radulescu err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 156034ff6846SIoana Radulescu cpu_relax(); 1561ef17bd7cSIoana Radulescu } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); 156234ff6846SIoana Radulescu WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 156334ff6846SIoana Radulescu ch->nctx.desired_cpu); 156434ff6846SIoana Radulescu 1565569dac6aSIoana Ciocoi Radulescu work_done = max(rx_cleaned, 1); 1566569dac6aSIoana Ciocoi Radulescu 1567569dac6aSIoana Ciocoi Radulescu out: 15680a25d92cSIoana Ciornei netif_receive_skb_list(ch->rx_list); 15690a25d92cSIoana Ciornei 1570d678be1dSIoana Radulescu if (txc_fq && txc_fq->dq_frames) { 1571569dac6aSIoana Ciocoi Radulescu nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); 1572569dac6aSIoana Ciocoi Radulescu netdev_tx_completed_queue(nq, txc_fq->dq_frames, 1573569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes); 1574569dac6aSIoana Ciocoi Radulescu txc_fq->dq_frames = 0; 1575569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes = 0; 1576569dac6aSIoana Ciocoi Radulescu } 1577569dac6aSIoana Ciocoi Radulescu 1578d678be1dSIoana Radulescu if (ch->xdp.res & XDP_REDIRECT) 1579d678be1dSIoana Radulescu xdp_do_flush_map(); 158074a1c059SIoana Ciornei else if (rx_cleaned && ch->xdp.res & XDP_TX) 15815d8dccf8SIoana Ciornei dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]); 1582d678be1dSIoana Radulescu 1583569dac6aSIoana Ciocoi Radulescu return work_done; 158434ff6846SIoana Radulescu } 158534ff6846SIoana Radulescu 15865d8dccf8SIoana Ciornei static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv) 158734ff6846SIoana Radulescu { 158834ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 158934ff6846SIoana Radulescu int i; 159034ff6846SIoana Radulescu 159134ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 159234ff6846SIoana Radulescu ch = priv->channel[i]; 159334ff6846SIoana Radulescu napi_enable(&ch->napi); 159434ff6846SIoana Radulescu } 159534ff6846SIoana Radulescu } 159634ff6846SIoana Radulescu 15975d8dccf8SIoana Ciornei static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv) 159834ff6846SIoana Radulescu { 159934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 160034ff6846SIoana Radulescu int i; 160134ff6846SIoana Radulescu 160234ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 160334ff6846SIoana Radulescu ch = priv->channel[i]; 160434ff6846SIoana Radulescu napi_disable(&ch->napi); 160534ff6846SIoana Radulescu } 160634ff6846SIoana Radulescu } 160734ff6846SIoana Radulescu 160807beb165SIoana Ciornei void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, 160907beb165SIoana Ciornei bool tx_pause, bool pfc) 16108eb3cef8SIoana Radulescu { 16118eb3cef8SIoana Radulescu struct dpni_taildrop td = {0}; 1612685e39eaSIoana Radulescu struct dpaa2_eth_fq *fq; 16138eb3cef8SIoana Radulescu int i, err; 16148eb3cef8SIoana Radulescu 161507beb165SIoana Ciornei /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if 161607beb165SIoana Ciornei * flow control is disabled (as it might interfere with either the 161707beb165SIoana Ciornei * buffer pool depletion trigger for pause frames or with the group 161807beb165SIoana Ciornei * congestion trigger for PFC frames) 161907beb165SIoana Ciornei */ 16202c8d1c8dSIoana Radulescu td.enable = !tx_pause; 162107beb165SIoana Ciornei if (priv->rx_fqtd_enabled == td.enable) 162207beb165SIoana Ciornei goto set_cgtd; 16238eb3cef8SIoana Radulescu 16242c8d1c8dSIoana Radulescu td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH; 16252c8d1c8dSIoana Radulescu td.units = DPNI_CONGESTION_UNIT_BYTES; 16268eb3cef8SIoana Radulescu 16278eb3cef8SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 1628685e39eaSIoana Radulescu fq = &priv->fq[i]; 1629685e39eaSIoana Radulescu if (fq->type != DPAA2_RX_FQ) 16308eb3cef8SIoana Radulescu continue; 16318eb3cef8SIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 1632685e39eaSIoana Radulescu DPNI_CP_QUEUE, DPNI_QUEUE_RX, 1633685e39eaSIoana Radulescu fq->tc, fq->flowid, &td); 16348eb3cef8SIoana Radulescu if (err) { 16358eb3cef8SIoana Radulescu netdev_err(priv->net_dev, 16362c8d1c8dSIoana Radulescu "dpni_set_taildrop(FQ) failed\n"); 16372c8d1c8dSIoana Radulescu return; 16388eb3cef8SIoana Radulescu } 16398eb3cef8SIoana Radulescu } 16408eb3cef8SIoana Radulescu 164107beb165SIoana Ciornei priv->rx_fqtd_enabled = td.enable; 164207beb165SIoana Ciornei 164307beb165SIoana Ciornei set_cgtd: 16442c8d1c8dSIoana Radulescu /* Congestion group taildrop: threshold is in frames, per group 16452c8d1c8dSIoana Radulescu * of FQs belonging to the same traffic class 164607beb165SIoana Ciornei * Enabled if general Tx pause disabled or if PFCs are enabled 164707beb165SIoana Ciornei * (congestion group threhsold for PFC generation is lower than the 164807beb165SIoana Ciornei * CG taildrop threshold, so it won't interfere with it; we also 164907beb165SIoana Ciornei * want frames in non-PFC enabled traffic classes to be kept in check) 16502c8d1c8dSIoana Radulescu */ 165107beb165SIoana Ciornei td.enable = !tx_pause || (tx_pause && pfc); 165207beb165SIoana Ciornei if (priv->rx_cgtd_enabled == td.enable) 165307beb165SIoana Ciornei return; 165407beb165SIoana Ciornei 16552c8d1c8dSIoana Radulescu td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv); 16562c8d1c8dSIoana Radulescu td.units = DPNI_CONGESTION_UNIT_FRAMES; 16572c8d1c8dSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 16582c8d1c8dSIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 16592c8d1c8dSIoana Radulescu DPNI_CP_GROUP, DPNI_QUEUE_RX, 16602c8d1c8dSIoana Radulescu i, 0, &td); 16612c8d1c8dSIoana Radulescu if (err) { 16622c8d1c8dSIoana Radulescu netdev_err(priv->net_dev, 16632c8d1c8dSIoana Radulescu "dpni_set_taildrop(CG) failed\n"); 16642c8d1c8dSIoana Radulescu return; 16652c8d1c8dSIoana Radulescu } 16662c8d1c8dSIoana Radulescu } 16672c8d1c8dSIoana Radulescu 166807beb165SIoana Ciornei priv->rx_cgtd_enabled = td.enable; 16698eb3cef8SIoana Radulescu } 16708eb3cef8SIoana Radulescu 16715d8dccf8SIoana Ciornei static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) 167234ff6846SIoana Radulescu { 167385b7a342SIoana Ciornei struct dpni_link_state state = {0}; 16748eb3cef8SIoana Radulescu bool tx_pause; 167534ff6846SIoana Radulescu int err; 167634ff6846SIoana Radulescu 167734ff6846SIoana Radulescu err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 167834ff6846SIoana Radulescu if (unlikely(err)) { 167934ff6846SIoana Radulescu netdev_err(priv->net_dev, 168034ff6846SIoana Radulescu "dpni_get_link_state() failed\n"); 168134ff6846SIoana Radulescu return err; 168234ff6846SIoana Radulescu } 168334ff6846SIoana Radulescu 16848eb3cef8SIoana Radulescu /* If Tx pause frame settings have changed, we need to update 16858eb3cef8SIoana Radulescu * Rx FQ taildrop configuration as well. We configure taildrop 16868eb3cef8SIoana Radulescu * only when pause frame generation is disabled. 16878eb3cef8SIoana Radulescu */ 1688ad054f26SIoana Radulescu tx_pause = dpaa2_eth_tx_pause_enabled(state.options); 168907beb165SIoana Ciornei dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled); 16908eb3cef8SIoana Radulescu 169171947923SIoana Ciornei /* When we manage the MAC/PHY using phylink there is no need 169271947923SIoana Ciornei * to manually update the netif_carrier. 169371947923SIoana Ciornei */ 169471947923SIoana Ciornei if (priv->mac) 169571947923SIoana Ciornei goto out; 169671947923SIoana Ciornei 169734ff6846SIoana Radulescu /* Chech link state; speed / duplex changes are not treated yet */ 169834ff6846SIoana Radulescu if (priv->link_state.up == state.up) 1699cce62943SIoana Radulescu goto out; 170034ff6846SIoana Radulescu 170134ff6846SIoana Radulescu if (state.up) { 170234ff6846SIoana Radulescu netif_carrier_on(priv->net_dev); 170334ff6846SIoana Radulescu netif_tx_start_all_queues(priv->net_dev); 170434ff6846SIoana Radulescu } else { 170534ff6846SIoana Radulescu netif_tx_stop_all_queues(priv->net_dev); 170634ff6846SIoana Radulescu netif_carrier_off(priv->net_dev); 170734ff6846SIoana Radulescu } 170834ff6846SIoana Radulescu 170934ff6846SIoana Radulescu netdev_info(priv->net_dev, "Link Event: state %s\n", 171034ff6846SIoana Radulescu state.up ? "up" : "down"); 171134ff6846SIoana Radulescu 1712cce62943SIoana Radulescu out: 1713cce62943SIoana Radulescu priv->link_state = state; 1714cce62943SIoana Radulescu 171534ff6846SIoana Radulescu return 0; 171634ff6846SIoana Radulescu } 171734ff6846SIoana Radulescu 171834ff6846SIoana Radulescu static int dpaa2_eth_open(struct net_device *net_dev) 171934ff6846SIoana Radulescu { 172034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 172134ff6846SIoana Radulescu int err; 172234ff6846SIoana Radulescu 17235d8dccf8SIoana Ciornei err = dpaa2_eth_seed_pool(priv, priv->bpid); 172434ff6846SIoana Radulescu if (err) { 172534ff6846SIoana Radulescu /* Not much to do; the buffer pool, though not filled up, 172634ff6846SIoana Radulescu * may still contain some buffers which would enable us 172734ff6846SIoana Radulescu * to limp on. 172834ff6846SIoana Radulescu */ 172934ff6846SIoana Radulescu netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 173034ff6846SIoana Radulescu priv->dpbp_dev->obj_desc.id, priv->bpid); 173134ff6846SIoana Radulescu } 173234ff6846SIoana Radulescu 173371947923SIoana Ciornei if (!priv->mac) { 173471947923SIoana Ciornei /* We'll only start the txqs when the link is actually ready; 173571947923SIoana Ciornei * make sure we don't race against the link up notification, 173671947923SIoana Ciornei * which may come immediately after dpni_enable(); 173734ff6846SIoana Radulescu */ 173834ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 173971947923SIoana Ciornei 174071947923SIoana Ciornei /* Also, explicitly set carrier off, otherwise 174171947923SIoana Ciornei * netif_carrier_ok() will return true and cause 'ip link show' 174271947923SIoana Ciornei * to report the LOWER_UP flag, even though the link 174371947923SIoana Ciornei * notification wasn't even received. 174434ff6846SIoana Radulescu */ 174534ff6846SIoana Radulescu netif_carrier_off(net_dev); 174671947923SIoana Ciornei } 17475d8dccf8SIoana Ciornei dpaa2_eth_enable_ch_napi(priv); 174834ff6846SIoana Radulescu 174934ff6846SIoana Radulescu err = dpni_enable(priv->mc_io, 0, priv->mc_token); 175034ff6846SIoana Radulescu if (err < 0) { 175134ff6846SIoana Radulescu netdev_err(net_dev, "dpni_enable() failed\n"); 175234ff6846SIoana Radulescu goto enable_err; 175334ff6846SIoana Radulescu } 175434ff6846SIoana Radulescu 17554c33a5bdSIoana Ciornei if (priv->mac) 175671947923SIoana Ciornei phylink_start(priv->mac->phylink); 175734ff6846SIoana Radulescu 175834ff6846SIoana Radulescu return 0; 175934ff6846SIoana Radulescu 176034ff6846SIoana Radulescu enable_err: 17615d8dccf8SIoana Ciornei dpaa2_eth_disable_ch_napi(priv); 17625d8dccf8SIoana Ciornei dpaa2_eth_drain_pool(priv); 176334ff6846SIoana Radulescu return err; 176434ff6846SIoana Radulescu } 176534ff6846SIoana Radulescu 176668d74315SIoana Ciocoi Radulescu /* Total number of in-flight frames on ingress queues */ 17675d8dccf8SIoana Ciornei static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv) 176834ff6846SIoana Radulescu { 176968d74315SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq; 177068d74315SIoana Ciocoi Radulescu u32 fcnt = 0, bcnt = 0, total = 0; 177168d74315SIoana Ciocoi Radulescu int i, err; 177234ff6846SIoana Radulescu 177368d74315SIoana Ciocoi Radulescu for (i = 0; i < priv->num_fqs; i++) { 177468d74315SIoana Ciocoi Radulescu fq = &priv->fq[i]; 177568d74315SIoana Ciocoi Radulescu err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); 177668d74315SIoana Ciocoi Radulescu if (err) { 177768d74315SIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "query_fq_count failed"); 177868d74315SIoana Ciocoi Radulescu break; 177968d74315SIoana Ciocoi Radulescu } 178068d74315SIoana Ciocoi Radulescu total += fcnt; 178168d74315SIoana Ciocoi Radulescu } 178234ff6846SIoana Radulescu 178334ff6846SIoana Radulescu return total; 178434ff6846SIoana Radulescu } 178534ff6846SIoana Radulescu 17865d8dccf8SIoana Ciornei static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) 178734ff6846SIoana Radulescu { 178868d74315SIoana Ciocoi Radulescu int retries = 10; 178968d74315SIoana Ciocoi Radulescu u32 pending; 179034ff6846SIoana Radulescu 179168d74315SIoana Ciocoi Radulescu do { 17925d8dccf8SIoana Ciornei pending = dpaa2_eth_ingress_fq_count(priv); 179368d74315SIoana Ciocoi Radulescu if (pending) 179468d74315SIoana Ciocoi Radulescu msleep(100); 179568d74315SIoana Ciocoi Radulescu } while (pending && --retries); 179634ff6846SIoana Radulescu } 179734ff6846SIoana Radulescu 179852b6a4ffSIoana Radulescu #define DPNI_TX_PENDING_VER_MAJOR 7 179952b6a4ffSIoana Radulescu #define DPNI_TX_PENDING_VER_MINOR 13 18005d8dccf8SIoana Ciornei static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) 180152b6a4ffSIoana Radulescu { 180252b6a4ffSIoana Radulescu union dpni_statistics stats; 180352b6a4ffSIoana Radulescu int retries = 10; 180452b6a4ffSIoana Radulescu int err; 180552b6a4ffSIoana Radulescu 180652b6a4ffSIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, 180752b6a4ffSIoana Radulescu DPNI_TX_PENDING_VER_MINOR) < 0) 180852b6a4ffSIoana Radulescu goto out; 180952b6a4ffSIoana Radulescu 181052b6a4ffSIoana Radulescu do { 181152b6a4ffSIoana Radulescu err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, 181252b6a4ffSIoana Radulescu &stats); 181352b6a4ffSIoana Radulescu if (err) 181452b6a4ffSIoana Radulescu goto out; 181552b6a4ffSIoana Radulescu if (stats.page_6.tx_pending_frames == 0) 181652b6a4ffSIoana Radulescu return; 181752b6a4ffSIoana Radulescu } while (--retries); 181852b6a4ffSIoana Radulescu 181952b6a4ffSIoana Radulescu out: 182052b6a4ffSIoana Radulescu msleep(500); 182152b6a4ffSIoana Radulescu } 182252b6a4ffSIoana Radulescu 182334ff6846SIoana Radulescu static int dpaa2_eth_stop(struct net_device *net_dev) 182434ff6846SIoana Radulescu { 182534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 182685b7a342SIoana Ciornei int dpni_enabled = 0; 182734ff6846SIoana Radulescu int retries = 10; 182834ff6846SIoana Radulescu 182971947923SIoana Ciornei if (!priv->mac) { 183034ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 183134ff6846SIoana Radulescu netif_carrier_off(net_dev); 183271947923SIoana Ciornei } else { 183371947923SIoana Ciornei phylink_stop(priv->mac->phylink); 183471947923SIoana Ciornei } 183534ff6846SIoana Radulescu 183668d74315SIoana Ciocoi Radulescu /* On dpni_disable(), the MC firmware will: 183768d74315SIoana Ciocoi Radulescu * - stop MAC Rx and wait for all Rx frames to be enqueued to software 183868d74315SIoana Ciocoi Radulescu * - cut off WRIOP dequeues from egress FQs and wait until transmission 183968d74315SIoana Ciocoi Radulescu * of all in flight Tx frames is finished (and corresponding Tx conf 184068d74315SIoana Ciocoi Radulescu * frames are enqueued back to software) 184168d74315SIoana Ciocoi Radulescu * 184268d74315SIoana Ciocoi Radulescu * Before calling dpni_disable(), we wait for all Tx frames to arrive 184368d74315SIoana Ciocoi Radulescu * on WRIOP. After it finishes, wait until all remaining frames on Rx 184468d74315SIoana Ciocoi Radulescu * and Tx conf queues are consumed on NAPI poll. 184534ff6846SIoana Radulescu */ 18465d8dccf8SIoana Ciornei dpaa2_eth_wait_for_egress_fq_empty(priv); 184768d74315SIoana Ciocoi Radulescu 184834ff6846SIoana Radulescu do { 184934ff6846SIoana Radulescu dpni_disable(priv->mc_io, 0, priv->mc_token); 185034ff6846SIoana Radulescu dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 185134ff6846SIoana Radulescu if (dpni_enabled) 185234ff6846SIoana Radulescu /* Allow the hardware some slack */ 185334ff6846SIoana Radulescu msleep(100); 185434ff6846SIoana Radulescu } while (dpni_enabled && --retries); 185534ff6846SIoana Radulescu if (!retries) { 185634ff6846SIoana Radulescu netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 185734ff6846SIoana Radulescu /* Must go on and disable NAPI nonetheless, so we don't crash at 185834ff6846SIoana Radulescu * the next "ifconfig up" 185934ff6846SIoana Radulescu */ 186034ff6846SIoana Radulescu } 186134ff6846SIoana Radulescu 18625d8dccf8SIoana Ciornei dpaa2_eth_wait_for_ingress_fq_empty(priv); 18635d8dccf8SIoana Ciornei dpaa2_eth_disable_ch_napi(priv); 186434ff6846SIoana Radulescu 186534ff6846SIoana Radulescu /* Empty the buffer pool */ 18665d8dccf8SIoana Ciornei dpaa2_eth_drain_pool(priv); 186734ff6846SIoana Radulescu 1868d70446eeSIoana Ciornei /* Empty the Scatter-Gather Buffer cache */ 1869d70446eeSIoana Ciornei dpaa2_eth_sgt_cache_drain(priv); 1870d70446eeSIoana Ciornei 187134ff6846SIoana Radulescu return 0; 187234ff6846SIoana Radulescu } 187334ff6846SIoana Radulescu 187434ff6846SIoana Radulescu static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 187534ff6846SIoana Radulescu { 187634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 187734ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 187834ff6846SIoana Radulescu int err; 187934ff6846SIoana Radulescu 188034ff6846SIoana Radulescu err = eth_mac_addr(net_dev, addr); 188134ff6846SIoana Radulescu if (err < 0) { 188234ff6846SIoana Radulescu dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 188334ff6846SIoana Radulescu return err; 188434ff6846SIoana Radulescu } 188534ff6846SIoana Radulescu 188634ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 188734ff6846SIoana Radulescu net_dev->dev_addr); 188834ff6846SIoana Radulescu if (err) { 188934ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 189034ff6846SIoana Radulescu return err; 189134ff6846SIoana Radulescu } 189234ff6846SIoana Radulescu 189334ff6846SIoana Radulescu return 0; 189434ff6846SIoana Radulescu } 189534ff6846SIoana Radulescu 189634ff6846SIoana Radulescu /** Fill in counters maintained by the GPP driver. These may be different from 189734ff6846SIoana Radulescu * the hardware counters obtained by ethtool. 189834ff6846SIoana Radulescu */ 189934ff6846SIoana Radulescu static void dpaa2_eth_get_stats(struct net_device *net_dev, 190034ff6846SIoana Radulescu struct rtnl_link_stats64 *stats) 190134ff6846SIoana Radulescu { 190234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 190334ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 190434ff6846SIoana Radulescu u64 *cpustats; 190534ff6846SIoana Radulescu u64 *netstats = (u64 *)stats; 190634ff6846SIoana Radulescu int i, j; 190734ff6846SIoana Radulescu int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 190834ff6846SIoana Radulescu 190934ff6846SIoana Radulescu for_each_possible_cpu(i) { 191034ff6846SIoana Radulescu percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 191134ff6846SIoana Radulescu cpustats = (u64 *)percpu_stats; 191234ff6846SIoana Radulescu for (j = 0; j < num; j++) 191334ff6846SIoana Radulescu netstats[j] += cpustats[j]; 191434ff6846SIoana Radulescu } 191534ff6846SIoana Radulescu } 191634ff6846SIoana Radulescu 191734ff6846SIoana Radulescu /* Copy mac unicast addresses from @net_dev to @priv. 191834ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 191934ff6846SIoana Radulescu */ 19205d8dccf8SIoana Ciornei static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev, 192134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 192234ff6846SIoana Radulescu { 192334ff6846SIoana Radulescu struct netdev_hw_addr *ha; 192434ff6846SIoana Radulescu int err; 192534ff6846SIoana Radulescu 192634ff6846SIoana Radulescu netdev_for_each_uc_addr(ha, net_dev) { 192734ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 192834ff6846SIoana Radulescu ha->addr); 192934ff6846SIoana Radulescu if (err) 193034ff6846SIoana Radulescu netdev_warn(priv->net_dev, 193134ff6846SIoana Radulescu "Could not add ucast MAC %pM to the filtering table (err %d)\n", 193234ff6846SIoana Radulescu ha->addr, err); 193334ff6846SIoana Radulescu } 193434ff6846SIoana Radulescu } 193534ff6846SIoana Radulescu 193634ff6846SIoana Radulescu /* Copy mac multicast addresses from @net_dev to @priv 193734ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 193834ff6846SIoana Radulescu */ 19395d8dccf8SIoana Ciornei static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev, 194034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 194134ff6846SIoana Radulescu { 194234ff6846SIoana Radulescu struct netdev_hw_addr *ha; 194334ff6846SIoana Radulescu int err; 194434ff6846SIoana Radulescu 194534ff6846SIoana Radulescu netdev_for_each_mc_addr(ha, net_dev) { 194634ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 194734ff6846SIoana Radulescu ha->addr); 194834ff6846SIoana Radulescu if (err) 194934ff6846SIoana Radulescu netdev_warn(priv->net_dev, 195034ff6846SIoana Radulescu "Could not add mcast MAC %pM to the filtering table (err %d)\n", 195134ff6846SIoana Radulescu ha->addr, err); 195234ff6846SIoana Radulescu } 195334ff6846SIoana Radulescu } 195434ff6846SIoana Radulescu 195534ff6846SIoana Radulescu static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 195634ff6846SIoana Radulescu { 195734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 195834ff6846SIoana Radulescu int uc_count = netdev_uc_count(net_dev); 195934ff6846SIoana Radulescu int mc_count = netdev_mc_count(net_dev); 196034ff6846SIoana Radulescu u8 max_mac = priv->dpni_attrs.mac_filter_entries; 196134ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 196234ff6846SIoana Radulescu u16 mc_token = priv->mc_token; 196334ff6846SIoana Radulescu struct fsl_mc_io *mc_io = priv->mc_io; 196434ff6846SIoana Radulescu int err; 196534ff6846SIoana Radulescu 196634ff6846SIoana Radulescu /* Basic sanity checks; these probably indicate a misconfiguration */ 196734ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 196834ff6846SIoana Radulescu netdev_info(net_dev, 196934ff6846SIoana Radulescu "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 197034ff6846SIoana Radulescu max_mac); 197134ff6846SIoana Radulescu 197234ff6846SIoana Radulescu /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 197334ff6846SIoana Radulescu if (uc_count > max_mac) { 197434ff6846SIoana Radulescu netdev_info(net_dev, 197534ff6846SIoana Radulescu "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 197634ff6846SIoana Radulescu uc_count, max_mac); 197734ff6846SIoana Radulescu goto force_promisc; 197834ff6846SIoana Radulescu } 197934ff6846SIoana Radulescu if (mc_count + uc_count > max_mac) { 198034ff6846SIoana Radulescu netdev_info(net_dev, 198134ff6846SIoana Radulescu "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 198234ff6846SIoana Radulescu uc_count + mc_count, max_mac); 198334ff6846SIoana Radulescu goto force_mc_promisc; 198434ff6846SIoana Radulescu } 198534ff6846SIoana Radulescu 198634ff6846SIoana Radulescu /* Adjust promisc settings due to flag combinations */ 198734ff6846SIoana Radulescu if (net_dev->flags & IFF_PROMISC) 198834ff6846SIoana Radulescu goto force_promisc; 198934ff6846SIoana Radulescu if (net_dev->flags & IFF_ALLMULTI) { 199034ff6846SIoana Radulescu /* First, rebuild unicast filtering table. This should be done 199134ff6846SIoana Radulescu * in promisc mode, in order to avoid frame loss while we 199234ff6846SIoana Radulescu * progressively add entries to the table. 199334ff6846SIoana Radulescu * We don't know whether we had been in promisc already, and 199434ff6846SIoana Radulescu * making an MC call to find out is expensive; so set uc promisc 199534ff6846SIoana Radulescu * nonetheless. 199634ff6846SIoana Radulescu */ 199734ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 199834ff6846SIoana Radulescu if (err) 199934ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc\n"); 200034ff6846SIoana Radulescu 200134ff6846SIoana Radulescu /* Actual uc table reconstruction. */ 200234ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 200334ff6846SIoana Radulescu if (err) 200434ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc filters\n"); 20055d8dccf8SIoana Ciornei dpaa2_eth_add_uc_hw_addr(net_dev, priv); 200634ff6846SIoana Radulescu 200734ff6846SIoana Radulescu /* Finally, clear uc promisc and set mc promisc as requested. */ 200834ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 200934ff6846SIoana Radulescu if (err) 201034ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc promisc\n"); 201134ff6846SIoana Radulescu goto force_mc_promisc; 201234ff6846SIoana Radulescu } 201334ff6846SIoana Radulescu 201434ff6846SIoana Radulescu /* Neither unicast, nor multicast promisc will be on... eventually. 201534ff6846SIoana Radulescu * For now, rebuild mac filtering tables while forcing both of them on. 201634ff6846SIoana Radulescu */ 201734ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 201834ff6846SIoana Radulescu if (err) 201934ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 202034ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 202134ff6846SIoana Radulescu if (err) 202234ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 202334ff6846SIoana Radulescu 202434ff6846SIoana Radulescu /* Actual mac filtering tables reconstruction */ 202534ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 202634ff6846SIoana Radulescu if (err) 202734ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mac filters\n"); 20285d8dccf8SIoana Ciornei dpaa2_eth_add_mc_hw_addr(net_dev, priv); 20295d8dccf8SIoana Ciornei dpaa2_eth_add_uc_hw_addr(net_dev, priv); 203034ff6846SIoana Radulescu 203134ff6846SIoana Radulescu /* Now we can clear both ucast and mcast promisc, without risking 203234ff6846SIoana Radulescu * to drop legitimate frames anymore. 203334ff6846SIoana Radulescu */ 203434ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 203534ff6846SIoana Radulescu if (err) 203634ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear ucast promisc\n"); 203734ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 203834ff6846SIoana Radulescu if (err) 203934ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mcast promisc\n"); 204034ff6846SIoana Radulescu 204134ff6846SIoana Radulescu return; 204234ff6846SIoana Radulescu 204334ff6846SIoana Radulescu force_promisc: 204434ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 204534ff6846SIoana Radulescu if (err) 204634ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set ucast promisc\n"); 204734ff6846SIoana Radulescu force_mc_promisc: 204834ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 204934ff6846SIoana Radulescu if (err) 205034ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mcast promisc\n"); 205134ff6846SIoana Radulescu } 205234ff6846SIoana Radulescu 205334ff6846SIoana Radulescu static int dpaa2_eth_set_features(struct net_device *net_dev, 205434ff6846SIoana Radulescu netdev_features_t features) 205534ff6846SIoana Radulescu { 205634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 205734ff6846SIoana Radulescu netdev_features_t changed = features ^ net_dev->features; 205834ff6846SIoana Radulescu bool enable; 205934ff6846SIoana Radulescu int err; 206034ff6846SIoana Radulescu 206134ff6846SIoana Radulescu if (changed & NETIF_F_RXCSUM) { 206234ff6846SIoana Radulescu enable = !!(features & NETIF_F_RXCSUM); 20635d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_csum(priv, enable); 206434ff6846SIoana Radulescu if (err) 206534ff6846SIoana Radulescu return err; 206634ff6846SIoana Radulescu } 206734ff6846SIoana Radulescu 206834ff6846SIoana Radulescu if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 206934ff6846SIoana Radulescu enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 20705d8dccf8SIoana Ciornei err = dpaa2_eth_set_tx_csum(priv, enable); 207134ff6846SIoana Radulescu if (err) 207234ff6846SIoana Radulescu return err; 207334ff6846SIoana Radulescu } 207434ff6846SIoana Radulescu 207534ff6846SIoana Radulescu return 0; 207634ff6846SIoana Radulescu } 207734ff6846SIoana Radulescu 207834ff6846SIoana Radulescu static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 207934ff6846SIoana Radulescu { 208034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 208134ff6846SIoana Radulescu struct hwtstamp_config config; 208234ff6846SIoana Radulescu 2083c5521189SYangbo Lu if (!dpaa2_ptp) 2084c5521189SYangbo Lu return -EINVAL; 2085c5521189SYangbo Lu 208634ff6846SIoana Radulescu if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 208734ff6846SIoana Radulescu return -EFAULT; 208834ff6846SIoana Radulescu 208934ff6846SIoana Radulescu switch (config.tx_type) { 209034ff6846SIoana Radulescu case HWTSTAMP_TX_OFF: 209134ff6846SIoana Radulescu case HWTSTAMP_TX_ON: 2092c5521189SYangbo Lu case HWTSTAMP_TX_ONESTEP_SYNC: 20931cf773bdSYangbo Lu priv->tx_tstamp_type = config.tx_type; 209434ff6846SIoana Radulescu break; 209534ff6846SIoana Radulescu default: 209634ff6846SIoana Radulescu return -ERANGE; 209734ff6846SIoana Radulescu } 209834ff6846SIoana Radulescu 209934ff6846SIoana Radulescu if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 210034ff6846SIoana Radulescu priv->rx_tstamp = false; 210134ff6846SIoana Radulescu } else { 210234ff6846SIoana Radulescu priv->rx_tstamp = true; 210334ff6846SIoana Radulescu /* TS is set for all frame types, not only those requested */ 210434ff6846SIoana Radulescu config.rx_filter = HWTSTAMP_FILTER_ALL; 210534ff6846SIoana Radulescu } 210634ff6846SIoana Radulescu 210734ff6846SIoana Radulescu return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 210834ff6846SIoana Radulescu -EFAULT : 0; 210934ff6846SIoana Radulescu } 211034ff6846SIoana Radulescu 211134ff6846SIoana Radulescu static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 211234ff6846SIoana Radulescu { 21134a84182aSRussell King struct dpaa2_eth_priv *priv = netdev_priv(dev); 21144a84182aSRussell King 211534ff6846SIoana Radulescu if (cmd == SIOCSHWTSTAMP) 211634ff6846SIoana Radulescu return dpaa2_eth_ts_ioctl(dev, rq, cmd); 211734ff6846SIoana Radulescu 21184a84182aSRussell King if (priv->mac) 21194a84182aSRussell King return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); 21204a84182aSRussell King 21214a84182aSRussell King return -EOPNOTSUPP; 212234ff6846SIoana Radulescu } 212334ff6846SIoana Radulescu 21247e273a8eSIoana Ciocoi Radulescu static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) 21257e273a8eSIoana Ciocoi Radulescu { 21267e273a8eSIoana Ciocoi Radulescu int mfl, linear_mfl; 21277e273a8eSIoana Ciocoi Radulescu 21287e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 2129efa6a7d0SIoana Ciornei linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - 21307b1eea1aSIoana Ciocoi Radulescu dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; 21317e273a8eSIoana Ciocoi Radulescu 21327e273a8eSIoana Ciocoi Radulescu if (mfl > linear_mfl) { 21337e273a8eSIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", 21347e273a8eSIoana Ciocoi Radulescu linear_mfl - VLAN_ETH_HLEN); 21357e273a8eSIoana Ciocoi Radulescu return false; 21367e273a8eSIoana Ciocoi Radulescu } 21377e273a8eSIoana Ciocoi Radulescu 21387e273a8eSIoana Ciocoi Radulescu return true; 21397e273a8eSIoana Ciocoi Radulescu } 21407e273a8eSIoana Ciocoi Radulescu 21415d8dccf8SIoana Ciornei static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) 21427e273a8eSIoana Ciocoi Radulescu { 21437e273a8eSIoana Ciocoi Radulescu int mfl, err; 21447e273a8eSIoana Ciocoi Radulescu 21457e273a8eSIoana Ciocoi Radulescu /* We enforce a maximum Rx frame length based on MTU only if we have 21467e273a8eSIoana Ciocoi Radulescu * an XDP program attached (in order to avoid Rx S/G frames). 21477e273a8eSIoana Ciocoi Radulescu * Otherwise, we accept all incoming frames as long as they are not 21487e273a8eSIoana Ciocoi Radulescu * larger than maximum size supported in hardware 21497e273a8eSIoana Ciocoi Radulescu */ 21507e273a8eSIoana Ciocoi Radulescu if (has_xdp) 21517e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 21527e273a8eSIoana Ciocoi Radulescu else 21537e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_MFL; 21547e273a8eSIoana Ciocoi Radulescu 21557e273a8eSIoana Ciocoi Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); 21567e273a8eSIoana Ciocoi Radulescu if (err) { 21577e273a8eSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); 21587e273a8eSIoana Ciocoi Radulescu return err; 21597e273a8eSIoana Ciocoi Radulescu } 21607e273a8eSIoana Ciocoi Radulescu 21617e273a8eSIoana Ciocoi Radulescu return 0; 21627e273a8eSIoana Ciocoi Radulescu } 21637e273a8eSIoana Ciocoi Radulescu 21647e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) 21657e273a8eSIoana Ciocoi Radulescu { 21667e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 21677e273a8eSIoana Ciocoi Radulescu int err; 21687e273a8eSIoana Ciocoi Radulescu 21697e273a8eSIoana Ciocoi Radulescu if (!priv->xdp_prog) 21707e273a8eSIoana Ciocoi Radulescu goto out; 21717e273a8eSIoana Ciocoi Radulescu 21727e273a8eSIoana Ciocoi Radulescu if (!xdp_mtu_valid(priv, new_mtu)) 21737e273a8eSIoana Ciocoi Radulescu return -EINVAL; 21747e273a8eSIoana Ciocoi Radulescu 21755d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true); 21767e273a8eSIoana Ciocoi Radulescu if (err) 21777e273a8eSIoana Ciocoi Radulescu return err; 21787e273a8eSIoana Ciocoi Radulescu 21797e273a8eSIoana Ciocoi Radulescu out: 21807e273a8eSIoana Ciocoi Radulescu dev->mtu = new_mtu; 21817e273a8eSIoana Ciocoi Radulescu return 0; 21827e273a8eSIoana Ciocoi Radulescu } 21837e273a8eSIoana Ciocoi Radulescu 21845d8dccf8SIoana Ciornei static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) 21857b1eea1aSIoana Ciocoi Radulescu { 21867b1eea1aSIoana Ciocoi Radulescu struct dpni_buffer_layout buf_layout = {0}; 21877b1eea1aSIoana Ciocoi Radulescu int err; 21887b1eea1aSIoana Ciocoi Radulescu 21897b1eea1aSIoana Ciocoi Radulescu err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, 21907b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 21917b1eea1aSIoana Ciocoi Radulescu if (err) { 21927b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); 21937b1eea1aSIoana Ciocoi Radulescu return err; 21947b1eea1aSIoana Ciocoi Radulescu } 21957b1eea1aSIoana Ciocoi Radulescu 21967b1eea1aSIoana Ciocoi Radulescu /* Reserve extra headroom for XDP header size changes */ 21977b1eea1aSIoana Ciocoi Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + 21987b1eea1aSIoana Ciocoi Radulescu (has_xdp ? XDP_PACKET_HEADROOM : 0); 21997b1eea1aSIoana Ciocoi Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; 22007b1eea1aSIoana Ciocoi Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 22017b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 22027b1eea1aSIoana Ciocoi Radulescu if (err) { 22037b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); 22047b1eea1aSIoana Ciocoi Radulescu return err; 22057b1eea1aSIoana Ciocoi Radulescu } 22067b1eea1aSIoana Ciocoi Radulescu 22077b1eea1aSIoana Ciocoi Radulescu return 0; 22087b1eea1aSIoana Ciocoi Radulescu } 22097b1eea1aSIoana Ciocoi Radulescu 22105d8dccf8SIoana Ciornei static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) 22117e273a8eSIoana Ciocoi Radulescu { 22127e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 22137e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch; 22147e273a8eSIoana Ciocoi Radulescu struct bpf_prog *old; 22157e273a8eSIoana Ciocoi Radulescu bool up, need_update; 22167e273a8eSIoana Ciocoi Radulescu int i, err; 22177e273a8eSIoana Ciocoi Radulescu 22187e273a8eSIoana Ciocoi Radulescu if (prog && !xdp_mtu_valid(priv, dev->mtu)) 22197e273a8eSIoana Ciocoi Radulescu return -EINVAL; 22207e273a8eSIoana Ciocoi Radulescu 222185192dbfSAndrii Nakryiko if (prog) 222285192dbfSAndrii Nakryiko bpf_prog_add(prog, priv->num_channels); 22237e273a8eSIoana Ciocoi Radulescu 22247e273a8eSIoana Ciocoi Radulescu up = netif_running(dev); 22257e273a8eSIoana Ciocoi Radulescu need_update = (!!priv->xdp_prog != !!prog); 22267e273a8eSIoana Ciocoi Radulescu 22277e273a8eSIoana Ciocoi Radulescu if (up) 22287e273a8eSIoana Ciocoi Radulescu dpaa2_eth_stop(dev); 22297e273a8eSIoana Ciocoi Radulescu 22307b1eea1aSIoana Ciocoi Radulescu /* While in xdp mode, enforce a maximum Rx frame size based on MTU. 22317b1eea1aSIoana Ciocoi Radulescu * Also, when switching between xdp/non-xdp modes we need to reconfigure 22327b1eea1aSIoana Ciocoi Radulescu * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, 22337b1eea1aSIoana Ciocoi Radulescu * so we are sure no old format buffers will be used from now on. 22347b1eea1aSIoana Ciocoi Radulescu */ 22357e273a8eSIoana Ciocoi Radulescu if (need_update) { 22365d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog); 22377e273a8eSIoana Ciocoi Radulescu if (err) 22387e273a8eSIoana Ciocoi Radulescu goto out_err; 22395d8dccf8SIoana Ciornei err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog); 22407b1eea1aSIoana Ciocoi Radulescu if (err) 22417b1eea1aSIoana Ciocoi Radulescu goto out_err; 22427e273a8eSIoana Ciocoi Radulescu } 22437e273a8eSIoana Ciocoi Radulescu 22447e273a8eSIoana Ciocoi Radulescu old = xchg(&priv->xdp_prog, prog); 22457e273a8eSIoana Ciocoi Radulescu if (old) 22467e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 22477e273a8eSIoana Ciocoi Radulescu 22487e273a8eSIoana Ciocoi Radulescu for (i = 0; i < priv->num_channels; i++) { 22497e273a8eSIoana Ciocoi Radulescu ch = priv->channel[i]; 22507e273a8eSIoana Ciocoi Radulescu old = xchg(&ch->xdp.prog, prog); 22517e273a8eSIoana Ciocoi Radulescu if (old) 22527e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 22537e273a8eSIoana Ciocoi Radulescu } 22547e273a8eSIoana Ciocoi Radulescu 22557e273a8eSIoana Ciocoi Radulescu if (up) { 22567e273a8eSIoana Ciocoi Radulescu err = dpaa2_eth_open(dev); 22577e273a8eSIoana Ciocoi Radulescu if (err) 22587e273a8eSIoana Ciocoi Radulescu return err; 22597e273a8eSIoana Ciocoi Radulescu } 22607e273a8eSIoana Ciocoi Radulescu 22617e273a8eSIoana Ciocoi Radulescu return 0; 22627e273a8eSIoana Ciocoi Radulescu 22637e273a8eSIoana Ciocoi Radulescu out_err: 22647e273a8eSIoana Ciocoi Radulescu if (prog) 22657e273a8eSIoana Ciocoi Radulescu bpf_prog_sub(prog, priv->num_channels); 22667e273a8eSIoana Ciocoi Radulescu if (up) 22677e273a8eSIoana Ciocoi Radulescu dpaa2_eth_open(dev); 22687e273a8eSIoana Ciocoi Radulescu 22697e273a8eSIoana Ciocoi Radulescu return err; 22707e273a8eSIoana Ciocoi Radulescu } 22717e273a8eSIoana Ciocoi Radulescu 22727e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) 22737e273a8eSIoana Ciocoi Radulescu { 22747e273a8eSIoana Ciocoi Radulescu switch (xdp->command) { 22757e273a8eSIoana Ciocoi Radulescu case XDP_SETUP_PROG: 22765d8dccf8SIoana Ciornei return dpaa2_eth_setup_xdp(dev, xdp->prog); 22777e273a8eSIoana Ciocoi Radulescu default: 22787e273a8eSIoana Ciocoi Radulescu return -EINVAL; 22797e273a8eSIoana Ciocoi Radulescu } 22807e273a8eSIoana Ciocoi Radulescu 22817e273a8eSIoana Ciocoi Radulescu return 0; 22827e273a8eSIoana Ciocoi Radulescu } 22837e273a8eSIoana Ciocoi Radulescu 22846aa40b9eSIoana Ciornei static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev, 22856aa40b9eSIoana Ciornei struct xdp_frame *xdpf, 22866aa40b9eSIoana Ciornei struct dpaa2_fd *fd) 2287d678be1dSIoana Radulescu { 2288d678be1dSIoana Radulescu struct device *dev = net_dev->dev.parent; 2289d678be1dSIoana Radulescu unsigned int needed_headroom; 2290d678be1dSIoana Radulescu struct dpaa2_eth_swa *swa; 2291d678be1dSIoana Radulescu void *buffer_start, *aligned_start; 2292d678be1dSIoana Radulescu dma_addr_t addr; 2293d678be1dSIoana Radulescu 2294d678be1dSIoana Radulescu /* We require a minimum headroom to be able to transmit the frame. 2295d678be1dSIoana Radulescu * Otherwise return an error and let the original net_device handle it 2296d678be1dSIoana Radulescu */ 22971cf773bdSYangbo Lu needed_headroom = dpaa2_eth_needed_headroom(NULL); 2298d678be1dSIoana Radulescu if (xdpf->headroom < needed_headroom) 2299d678be1dSIoana Radulescu return -EINVAL; 2300d678be1dSIoana Radulescu 2301d678be1dSIoana Radulescu /* Setup the FD fields */ 23026aa40b9eSIoana Ciornei memset(fd, 0, sizeof(*fd)); 2303d678be1dSIoana Radulescu 2304d678be1dSIoana Radulescu /* Align FD address, if possible */ 2305d678be1dSIoana Radulescu buffer_start = xdpf->data - needed_headroom; 2306d678be1dSIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 2307d678be1dSIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 2308d678be1dSIoana Radulescu if (aligned_start >= xdpf->data - xdpf->headroom) 2309d678be1dSIoana Radulescu buffer_start = aligned_start; 2310d678be1dSIoana Radulescu 2311d678be1dSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 2312d678be1dSIoana Radulescu /* fill in necessary fields here */ 2313d678be1dSIoana Radulescu swa->type = DPAA2_ETH_SWA_XDP; 2314d678be1dSIoana Radulescu swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; 2315d678be1dSIoana Radulescu swa->xdp.xdpf = xdpf; 2316d678be1dSIoana Radulescu 2317d678be1dSIoana Radulescu addr = dma_map_single(dev, buffer_start, 2318d678be1dSIoana Radulescu swa->xdp.dma_size, 2319d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 23206aa40b9eSIoana Ciornei if (unlikely(dma_mapping_error(dev, addr))) 2321d678be1dSIoana Radulescu return -ENOMEM; 2322d678be1dSIoana Radulescu 23236aa40b9eSIoana Ciornei dpaa2_fd_set_addr(fd, addr); 23246aa40b9eSIoana Ciornei dpaa2_fd_set_offset(fd, xdpf->data - buffer_start); 23256aa40b9eSIoana Ciornei dpaa2_fd_set_len(fd, xdpf->len); 23266aa40b9eSIoana Ciornei dpaa2_fd_set_format(fd, dpaa2_fd_single); 23276aa40b9eSIoana Ciornei dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 2328d678be1dSIoana Radulescu 2329d678be1dSIoana Radulescu return 0; 2330d678be1dSIoana Radulescu } 2331d678be1dSIoana Radulescu 2332d678be1dSIoana Radulescu static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, 2333d678be1dSIoana Radulescu struct xdp_frame **frames, u32 flags) 2334d678be1dSIoana Radulescu { 23356aa40b9eSIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 233638c440b2SIoana Ciornei struct dpaa2_eth_xdp_fds *xdp_redirect_fds; 23376aa40b9eSIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 23386aa40b9eSIoana Ciornei struct dpaa2_eth_fq *fq; 23398665d978SIoana Ciornei struct dpaa2_fd *fds; 234038c440b2SIoana Ciornei int enqueued, i, err; 2341d678be1dSIoana Radulescu 2342d678be1dSIoana Radulescu if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2343d678be1dSIoana Radulescu return -EINVAL; 2344d678be1dSIoana Radulescu 2345d678be1dSIoana Radulescu if (!netif_running(net_dev)) 2346d678be1dSIoana Radulescu return -ENETDOWN; 2347d678be1dSIoana Radulescu 23488665d978SIoana Ciornei fq = &priv->fq[smp_processor_id()]; 234938c440b2SIoana Ciornei xdp_redirect_fds = &fq->xdp_redirect_fds; 235038c440b2SIoana Ciornei fds = xdp_redirect_fds->fds; 23518665d978SIoana Ciornei 23526aa40b9eSIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 23536aa40b9eSIoana Ciornei 23548665d978SIoana Ciornei /* create a FD for each xdp_frame in the list received */ 2355d678be1dSIoana Radulescu for (i = 0; i < n; i++) { 23568665d978SIoana Ciornei err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]); 23578665d978SIoana Ciornei if (err) 23586aa40b9eSIoana Ciornei break; 23596aa40b9eSIoana Ciornei } 236038c440b2SIoana Ciornei xdp_redirect_fds->num = i; 23616aa40b9eSIoana Ciornei 236238c440b2SIoana Ciornei /* enqueue all the frame descriptors */ 236338c440b2SIoana Ciornei enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds); 2364d678be1dSIoana Radulescu 23658665d978SIoana Ciornei /* update statistics */ 236638c440b2SIoana Ciornei percpu_stats->tx_packets += enqueued; 236738c440b2SIoana Ciornei for (i = 0; i < enqueued; i++) 23688665d978SIoana Ciornei percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 236938c440b2SIoana Ciornei for (i = enqueued; i < n; i++) 23708665d978SIoana Ciornei xdp_return_frame_rx_napi(frames[i]); 23718665d978SIoana Ciornei 237238c440b2SIoana Ciornei return enqueued; 2373d678be1dSIoana Radulescu } 2374d678be1dSIoana Radulescu 237506d5b179SIoana Radulescu static int update_xps(struct dpaa2_eth_priv *priv) 237606d5b179SIoana Radulescu { 237706d5b179SIoana Radulescu struct net_device *net_dev = priv->net_dev; 237806d5b179SIoana Radulescu struct cpumask xps_mask; 237906d5b179SIoana Radulescu struct dpaa2_eth_fq *fq; 2380ab1e6de2SIoana Radulescu int i, num_queues, netdev_queues; 238106d5b179SIoana Radulescu int err = 0; 238206d5b179SIoana Radulescu 238306d5b179SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 2384ab1e6de2SIoana Radulescu netdev_queues = (net_dev->num_tc ? : 1) * num_queues; 238506d5b179SIoana Radulescu 238606d5b179SIoana Radulescu /* The first <num_queues> entries in priv->fq array are Tx/Tx conf 238706d5b179SIoana Radulescu * queues, so only process those 238806d5b179SIoana Radulescu */ 2389ab1e6de2SIoana Radulescu for (i = 0; i < netdev_queues; i++) { 2390ab1e6de2SIoana Radulescu fq = &priv->fq[i % num_queues]; 239106d5b179SIoana Radulescu 239206d5b179SIoana Radulescu cpumask_clear(&xps_mask); 239306d5b179SIoana Radulescu cpumask_set_cpu(fq->target_cpu, &xps_mask); 239406d5b179SIoana Radulescu 239506d5b179SIoana Radulescu err = netif_set_xps_queue(net_dev, &xps_mask, i); 239606d5b179SIoana Radulescu if (err) { 239706d5b179SIoana Radulescu netdev_warn_once(net_dev, "Error setting XPS queue\n"); 239806d5b179SIoana Radulescu break; 239906d5b179SIoana Radulescu } 240006d5b179SIoana Radulescu } 240106d5b179SIoana Radulescu 240206d5b179SIoana Radulescu return err; 240306d5b179SIoana Radulescu } 240406d5b179SIoana Radulescu 2405e3ec13beSIoana Ciornei static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, 2406e3ec13beSIoana Ciornei struct tc_mqprio_qopt *mqprio) 2407ab1e6de2SIoana Radulescu { 2408ab1e6de2SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2409ab1e6de2SIoana Radulescu u8 num_tc, num_queues; 2410ab1e6de2SIoana Radulescu int i; 2411ab1e6de2SIoana Radulescu 2412ab1e6de2SIoana Radulescu mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 2413ab1e6de2SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 2414ab1e6de2SIoana Radulescu num_tc = mqprio->num_tc; 2415ab1e6de2SIoana Radulescu 2416ab1e6de2SIoana Radulescu if (num_tc == net_dev->num_tc) 2417ab1e6de2SIoana Radulescu return 0; 2418ab1e6de2SIoana Radulescu 2419ab1e6de2SIoana Radulescu if (num_tc > dpaa2_eth_tc_count(priv)) { 2420ab1e6de2SIoana Radulescu netdev_err(net_dev, "Max %d traffic classes supported\n", 2421ab1e6de2SIoana Radulescu dpaa2_eth_tc_count(priv)); 2422b89c1e6bSJesper Dangaard Brouer return -EOPNOTSUPP; 2423ab1e6de2SIoana Radulescu } 2424ab1e6de2SIoana Radulescu 2425ab1e6de2SIoana Radulescu if (!num_tc) { 2426ab1e6de2SIoana Radulescu netdev_reset_tc(net_dev); 2427ab1e6de2SIoana Radulescu netif_set_real_num_tx_queues(net_dev, num_queues); 2428ab1e6de2SIoana Radulescu goto out; 2429ab1e6de2SIoana Radulescu } 2430ab1e6de2SIoana Radulescu 2431ab1e6de2SIoana Radulescu netdev_set_num_tc(net_dev, num_tc); 2432ab1e6de2SIoana Radulescu netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); 2433ab1e6de2SIoana Radulescu 2434ab1e6de2SIoana Radulescu for (i = 0; i < num_tc; i++) 2435ab1e6de2SIoana Radulescu netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); 2436ab1e6de2SIoana Radulescu 2437ab1e6de2SIoana Radulescu out: 2438ab1e6de2SIoana Radulescu update_xps(priv); 2439ab1e6de2SIoana Radulescu 2440ab1e6de2SIoana Radulescu return 0; 2441ab1e6de2SIoana Radulescu } 2442ab1e6de2SIoana Radulescu 24433657cdafSIoana Ciornei #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8) 24443657cdafSIoana Ciornei 24453657cdafSIoana Ciornei static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p) 24463657cdafSIoana Ciornei { 24473657cdafSIoana Ciornei struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params; 24483657cdafSIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 24493657cdafSIoana Ciornei struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 }; 24503657cdafSIoana Ciornei struct dpni_tx_shaping_cfg tx_er_shaper = { 0 }; 24513657cdafSIoana Ciornei int err; 24523657cdafSIoana Ciornei 24533657cdafSIoana Ciornei if (p->command == TC_TBF_STATS) 24543657cdafSIoana Ciornei return -EOPNOTSUPP; 24553657cdafSIoana Ciornei 24563657cdafSIoana Ciornei /* Only per port Tx shaping */ 24573657cdafSIoana Ciornei if (p->parent != TC_H_ROOT) 24583657cdafSIoana Ciornei return -EOPNOTSUPP; 24593657cdafSIoana Ciornei 24603657cdafSIoana Ciornei if (p->command == TC_TBF_REPLACE) { 24613657cdafSIoana Ciornei if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) { 24623657cdafSIoana Ciornei netdev_err(net_dev, "burst size cannot be greater than %d\n", 24633657cdafSIoana Ciornei DPAA2_ETH_MAX_BURST_SIZE); 24643657cdafSIoana Ciornei return -EINVAL; 24653657cdafSIoana Ciornei } 24663657cdafSIoana Ciornei 24673657cdafSIoana Ciornei tx_cr_shaper.max_burst_size = cfg->max_size; 24683657cdafSIoana Ciornei /* The TBF interface is in bytes/s, whereas DPAA2 expects the 24693657cdafSIoana Ciornei * rate in Mbits/s 24703657cdafSIoana Ciornei */ 24713657cdafSIoana Ciornei tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps); 24723657cdafSIoana Ciornei } 24733657cdafSIoana Ciornei 24743657cdafSIoana Ciornei err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper, 24753657cdafSIoana Ciornei &tx_er_shaper, 0); 24763657cdafSIoana Ciornei if (err) { 24773657cdafSIoana Ciornei netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err); 24783657cdafSIoana Ciornei return err; 24793657cdafSIoana Ciornei } 24803657cdafSIoana Ciornei 24813657cdafSIoana Ciornei return 0; 24823657cdafSIoana Ciornei } 24833657cdafSIoana Ciornei 2484e3ec13beSIoana Ciornei static int dpaa2_eth_setup_tc(struct net_device *net_dev, 2485e3ec13beSIoana Ciornei enum tc_setup_type type, void *type_data) 2486e3ec13beSIoana Ciornei { 2487e3ec13beSIoana Ciornei switch (type) { 2488e3ec13beSIoana Ciornei case TC_SETUP_QDISC_MQPRIO: 2489e3ec13beSIoana Ciornei return dpaa2_eth_setup_mqprio(net_dev, type_data); 24903657cdafSIoana Ciornei case TC_SETUP_QDISC_TBF: 24913657cdafSIoana Ciornei return dpaa2_eth_setup_tbf(net_dev, type_data); 2492e3ec13beSIoana Ciornei default: 2493e3ec13beSIoana Ciornei return -EOPNOTSUPP; 2494e3ec13beSIoana Ciornei } 2495e3ec13beSIoana Ciornei } 2496e3ec13beSIoana Ciornei 249734ff6846SIoana Radulescu static const struct net_device_ops dpaa2_eth_ops = { 249834ff6846SIoana Radulescu .ndo_open = dpaa2_eth_open, 249934ff6846SIoana Radulescu .ndo_start_xmit = dpaa2_eth_tx, 250034ff6846SIoana Radulescu .ndo_stop = dpaa2_eth_stop, 250134ff6846SIoana Radulescu .ndo_set_mac_address = dpaa2_eth_set_addr, 250234ff6846SIoana Radulescu .ndo_get_stats64 = dpaa2_eth_get_stats, 250334ff6846SIoana Radulescu .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 250434ff6846SIoana Radulescu .ndo_set_features = dpaa2_eth_set_features, 250534ff6846SIoana Radulescu .ndo_do_ioctl = dpaa2_eth_ioctl, 25067e273a8eSIoana Ciocoi Radulescu .ndo_change_mtu = dpaa2_eth_change_mtu, 25077e273a8eSIoana Ciocoi Radulescu .ndo_bpf = dpaa2_eth_xdp, 2508d678be1dSIoana Radulescu .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, 2509ab1e6de2SIoana Radulescu .ndo_setup_tc = dpaa2_eth_setup_tc, 251034ff6846SIoana Radulescu }; 251134ff6846SIoana Radulescu 25125d8dccf8SIoana Ciornei static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) 251334ff6846SIoana Radulescu { 251434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 251534ff6846SIoana Radulescu 251634ff6846SIoana Radulescu ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 251734ff6846SIoana Radulescu 251834ff6846SIoana Radulescu /* Update NAPI statistics */ 251934ff6846SIoana Radulescu ch->stats.cdan++; 252034ff6846SIoana Radulescu 25216c33ae1aSJiafei Pan napi_schedule(&ch->napi); 252234ff6846SIoana Radulescu } 252334ff6846SIoana Radulescu 252434ff6846SIoana Radulescu /* Allocate and configure a DPCON object */ 25255d8dccf8SIoana Ciornei static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) 252634ff6846SIoana Radulescu { 252734ff6846SIoana Radulescu struct fsl_mc_device *dpcon; 252834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 252934ff6846SIoana Radulescu int err; 253034ff6846SIoana Radulescu 253134ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 253234ff6846SIoana Radulescu FSL_MC_POOL_DPCON, &dpcon); 253334ff6846SIoana Radulescu if (err) { 2534d7f5a9d8SIoana Ciornei if (err == -ENXIO) 2535d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 2536d7f5a9d8SIoana Ciornei else 253734ff6846SIoana Radulescu dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 2538d7f5a9d8SIoana Ciornei return ERR_PTR(err); 253934ff6846SIoana Radulescu } 254034ff6846SIoana Radulescu 254134ff6846SIoana Radulescu err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 254234ff6846SIoana Radulescu if (err) { 254334ff6846SIoana Radulescu dev_err(dev, "dpcon_open() failed\n"); 254434ff6846SIoana Radulescu goto free; 254534ff6846SIoana Radulescu } 254634ff6846SIoana Radulescu 254734ff6846SIoana Radulescu err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 254834ff6846SIoana Radulescu if (err) { 254934ff6846SIoana Radulescu dev_err(dev, "dpcon_reset() failed\n"); 255034ff6846SIoana Radulescu goto close; 255134ff6846SIoana Radulescu } 255234ff6846SIoana Radulescu 255334ff6846SIoana Radulescu err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 255434ff6846SIoana Radulescu if (err) { 255534ff6846SIoana Radulescu dev_err(dev, "dpcon_enable() failed\n"); 255634ff6846SIoana Radulescu goto close; 255734ff6846SIoana Radulescu } 255834ff6846SIoana Radulescu 255934ff6846SIoana Radulescu return dpcon; 256034ff6846SIoana Radulescu 256134ff6846SIoana Radulescu close: 256234ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 256334ff6846SIoana Radulescu free: 256434ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 256534ff6846SIoana Radulescu 256602afa9c6SYueHaibing return ERR_PTR(err); 256734ff6846SIoana Radulescu } 256834ff6846SIoana Radulescu 25695d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv, 257034ff6846SIoana Radulescu struct fsl_mc_device *dpcon) 257134ff6846SIoana Radulescu { 257234ff6846SIoana Radulescu dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 257334ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 257434ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 257534ff6846SIoana Radulescu } 257634ff6846SIoana Radulescu 25775d8dccf8SIoana Ciornei static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv) 257834ff6846SIoana Radulescu { 257934ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 258034ff6846SIoana Radulescu struct dpcon_attr attr; 258134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 258234ff6846SIoana Radulescu int err; 258334ff6846SIoana Radulescu 258434ff6846SIoana Radulescu channel = kzalloc(sizeof(*channel), GFP_KERNEL); 258534ff6846SIoana Radulescu if (!channel) 258634ff6846SIoana Radulescu return NULL; 258734ff6846SIoana Radulescu 25885d8dccf8SIoana Ciornei channel->dpcon = dpaa2_eth_setup_dpcon(priv); 258902afa9c6SYueHaibing if (IS_ERR(channel->dpcon)) { 259002afa9c6SYueHaibing err = PTR_ERR(channel->dpcon); 259134ff6846SIoana Radulescu goto err_setup; 2592d7f5a9d8SIoana Ciornei } 259334ff6846SIoana Radulescu 259434ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 259534ff6846SIoana Radulescu &attr); 259634ff6846SIoana Radulescu if (err) { 259734ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 259834ff6846SIoana Radulescu goto err_get_attr; 259934ff6846SIoana Radulescu } 260034ff6846SIoana Radulescu 260134ff6846SIoana Radulescu channel->dpcon_id = attr.id; 260234ff6846SIoana Radulescu channel->ch_id = attr.qbman_ch_id; 260334ff6846SIoana Radulescu channel->priv = priv; 260434ff6846SIoana Radulescu 260534ff6846SIoana Radulescu return channel; 260634ff6846SIoana Radulescu 260734ff6846SIoana Radulescu err_get_attr: 26085d8dccf8SIoana Ciornei dpaa2_eth_free_dpcon(priv, channel->dpcon); 260934ff6846SIoana Radulescu err_setup: 261034ff6846SIoana Radulescu kfree(channel); 2611d7f5a9d8SIoana Ciornei return ERR_PTR(err); 261234ff6846SIoana Radulescu } 261334ff6846SIoana Radulescu 26145d8dccf8SIoana Ciornei static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv, 261534ff6846SIoana Radulescu struct dpaa2_eth_channel *channel) 261634ff6846SIoana Radulescu { 26175d8dccf8SIoana Ciornei dpaa2_eth_free_dpcon(priv, channel->dpcon); 261834ff6846SIoana Radulescu kfree(channel); 261934ff6846SIoana Radulescu } 262034ff6846SIoana Radulescu 262134ff6846SIoana Radulescu /* DPIO setup: allocate and configure QBMan channels, setup core affinity 262234ff6846SIoana Radulescu * and register data availability notifications 262334ff6846SIoana Radulescu */ 26245d8dccf8SIoana Ciornei static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) 262534ff6846SIoana Radulescu { 262634ff6846SIoana Radulescu struct dpaa2_io_notification_ctx *nctx; 262734ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 262834ff6846SIoana Radulescu struct dpcon_notification_cfg dpcon_notif_cfg; 262934ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 263034ff6846SIoana Radulescu int i, err; 263134ff6846SIoana Radulescu 263234ff6846SIoana Radulescu /* We want the ability to spread ingress traffic (RX, TX conf) to as 263334ff6846SIoana Radulescu * many cores as possible, so we need one channel for each core 263434ff6846SIoana Radulescu * (unless there's fewer queues than cores, in which case the extra 263534ff6846SIoana Radulescu * channels would be wasted). 263634ff6846SIoana Radulescu * Allocate one channel per core and register it to the core's 263734ff6846SIoana Radulescu * affine DPIO. If not enough channels are available for all cores 263834ff6846SIoana Radulescu * or if some cores don't have an affine DPIO, there will be no 263934ff6846SIoana Radulescu * ingress frame processing on those cores. 264034ff6846SIoana Radulescu */ 264134ff6846SIoana Radulescu cpumask_clear(&priv->dpio_cpumask); 264234ff6846SIoana Radulescu for_each_online_cpu(i) { 264334ff6846SIoana Radulescu /* Try to allocate a channel */ 26445d8dccf8SIoana Ciornei channel = dpaa2_eth_alloc_channel(priv); 2645d7f5a9d8SIoana Ciornei if (IS_ERR_OR_NULL(channel)) { 2646bd8460faSIoana Radulescu err = PTR_ERR_OR_ZERO(channel); 2647d7f5a9d8SIoana Ciornei if (err != -EPROBE_DEFER) 264834ff6846SIoana Radulescu dev_info(dev, 264934ff6846SIoana Radulescu "No affine channel for cpu %d and above\n", i); 265034ff6846SIoana Radulescu goto err_alloc_ch; 265134ff6846SIoana Radulescu } 265234ff6846SIoana Radulescu 265334ff6846SIoana Radulescu priv->channel[priv->num_channels] = channel; 265434ff6846SIoana Radulescu 265534ff6846SIoana Radulescu nctx = &channel->nctx; 265634ff6846SIoana Radulescu nctx->is_cdan = 1; 26575d8dccf8SIoana Ciornei nctx->cb = dpaa2_eth_cdan_cb; 265834ff6846SIoana Radulescu nctx->id = channel->ch_id; 265934ff6846SIoana Radulescu nctx->desired_cpu = i; 266034ff6846SIoana Radulescu 266134ff6846SIoana Radulescu /* Register the new context */ 266234ff6846SIoana Radulescu channel->dpio = dpaa2_io_service_select(i); 266347441f7fSIoana Ciornei err = dpaa2_io_service_register(channel->dpio, nctx, dev); 266434ff6846SIoana Radulescu if (err) { 266534ff6846SIoana Radulescu dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 266634ff6846SIoana Radulescu /* If no affine DPIO for this core, there's probably 266734ff6846SIoana Radulescu * none available for next cores either. Signal we want 266834ff6846SIoana Radulescu * to retry later, in case the DPIO devices weren't 266934ff6846SIoana Radulescu * probed yet. 267034ff6846SIoana Radulescu */ 267134ff6846SIoana Radulescu err = -EPROBE_DEFER; 267234ff6846SIoana Radulescu goto err_service_reg; 267334ff6846SIoana Radulescu } 267434ff6846SIoana Radulescu 267534ff6846SIoana Radulescu /* Register DPCON notification with MC */ 267634ff6846SIoana Radulescu dpcon_notif_cfg.dpio_id = nctx->dpio_id; 267734ff6846SIoana Radulescu dpcon_notif_cfg.priority = 0; 267834ff6846SIoana Radulescu dpcon_notif_cfg.user_ctx = nctx->qman64; 267934ff6846SIoana Radulescu err = dpcon_set_notification(priv->mc_io, 0, 268034ff6846SIoana Radulescu channel->dpcon->mc_handle, 268134ff6846SIoana Radulescu &dpcon_notif_cfg); 268234ff6846SIoana Radulescu if (err) { 268334ff6846SIoana Radulescu dev_err(dev, "dpcon_set_notification failed()\n"); 268434ff6846SIoana Radulescu goto err_set_cdan; 268534ff6846SIoana Radulescu } 268634ff6846SIoana Radulescu 268734ff6846SIoana Radulescu /* If we managed to allocate a channel and also found an affine 268834ff6846SIoana Radulescu * DPIO for this core, add it to the final mask 268934ff6846SIoana Radulescu */ 269034ff6846SIoana Radulescu cpumask_set_cpu(i, &priv->dpio_cpumask); 269134ff6846SIoana Radulescu priv->num_channels++; 269234ff6846SIoana Radulescu 269334ff6846SIoana Radulescu /* Stop if we already have enough channels to accommodate all 269434ff6846SIoana Radulescu * RX and TX conf queues 269534ff6846SIoana Radulescu */ 2696b0e4f37bSIoana Ciocoi Radulescu if (priv->num_channels == priv->dpni_attrs.num_queues) 269734ff6846SIoana Radulescu break; 269834ff6846SIoana Radulescu } 269934ff6846SIoana Radulescu 270034ff6846SIoana Radulescu return 0; 270134ff6846SIoana Radulescu 270234ff6846SIoana Radulescu err_set_cdan: 270347441f7fSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 270434ff6846SIoana Radulescu err_service_reg: 27055d8dccf8SIoana Ciornei dpaa2_eth_free_channel(priv, channel); 270634ff6846SIoana Radulescu err_alloc_ch: 27075aa4277dSIoana Ciornei if (err == -EPROBE_DEFER) { 27085aa4277dSIoana Ciornei for (i = 0; i < priv->num_channels; i++) { 27095aa4277dSIoana Ciornei channel = priv->channel[i]; 27105aa4277dSIoana Ciornei nctx = &channel->nctx; 27115aa4277dSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 27125d8dccf8SIoana Ciornei dpaa2_eth_free_channel(priv, channel); 27135aa4277dSIoana Ciornei } 27145aa4277dSIoana Ciornei priv->num_channels = 0; 2715d7f5a9d8SIoana Ciornei return err; 27165aa4277dSIoana Ciornei } 2717d7f5a9d8SIoana Ciornei 271834ff6846SIoana Radulescu if (cpumask_empty(&priv->dpio_cpumask)) { 271934ff6846SIoana Radulescu dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 2720d7f5a9d8SIoana Ciornei return -ENODEV; 272134ff6846SIoana Radulescu } 272234ff6846SIoana Radulescu 272334ff6846SIoana Radulescu dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 272434ff6846SIoana Radulescu cpumask_pr_args(&priv->dpio_cpumask)); 272534ff6846SIoana Radulescu 272634ff6846SIoana Radulescu return 0; 272734ff6846SIoana Radulescu } 272834ff6846SIoana Radulescu 27295d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv) 273034ff6846SIoana Radulescu { 273147441f7fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 273234ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 273347441f7fSIoana Ciornei int i; 273434ff6846SIoana Radulescu 273534ff6846SIoana Radulescu /* deregister CDAN notifications and free channels */ 273634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 273734ff6846SIoana Radulescu ch = priv->channel[i]; 273847441f7fSIoana Ciornei dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); 27395d8dccf8SIoana Ciornei dpaa2_eth_free_channel(priv, ch); 274034ff6846SIoana Radulescu } 274134ff6846SIoana Radulescu } 274234ff6846SIoana Radulescu 27435d8dccf8SIoana Ciornei static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv, 274434ff6846SIoana Radulescu int cpu) 274534ff6846SIoana Radulescu { 274634ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 274734ff6846SIoana Radulescu int i; 274834ff6846SIoana Radulescu 274934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 275034ff6846SIoana Radulescu if (priv->channel[i]->nctx.desired_cpu == cpu) 275134ff6846SIoana Radulescu return priv->channel[i]; 275234ff6846SIoana Radulescu 275334ff6846SIoana Radulescu /* We should never get here. Issue a warning and return 275434ff6846SIoana Radulescu * the first channel, because it's still better than nothing 275534ff6846SIoana Radulescu */ 275634ff6846SIoana Radulescu dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 275734ff6846SIoana Radulescu 275834ff6846SIoana Radulescu return priv->channel[0]; 275934ff6846SIoana Radulescu } 276034ff6846SIoana Radulescu 27615d8dccf8SIoana Ciornei static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv) 276234ff6846SIoana Radulescu { 276334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 276434ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 276534ff6846SIoana Radulescu int rx_cpu, txc_cpu; 276606d5b179SIoana Radulescu int i; 276734ff6846SIoana Radulescu 276834ff6846SIoana Radulescu /* For each FQ, pick one channel/CPU to deliver frames to. 276934ff6846SIoana Radulescu * This may well change at runtime, either through irqbalance or 277034ff6846SIoana Radulescu * through direct user intervention. 277134ff6846SIoana Radulescu */ 277234ff6846SIoana Radulescu rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 277334ff6846SIoana Radulescu 277434ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 277534ff6846SIoana Radulescu fq = &priv->fq[i]; 277634ff6846SIoana Radulescu switch (fq->type) { 277734ff6846SIoana Radulescu case DPAA2_RX_FQ: 2778061d631fSIoana Ciornei case DPAA2_RX_ERR_FQ: 277934ff6846SIoana Radulescu fq->target_cpu = rx_cpu; 278034ff6846SIoana Radulescu rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 278134ff6846SIoana Radulescu if (rx_cpu >= nr_cpu_ids) 278234ff6846SIoana Radulescu rx_cpu = cpumask_first(&priv->dpio_cpumask); 278334ff6846SIoana Radulescu break; 278434ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 278534ff6846SIoana Radulescu fq->target_cpu = txc_cpu; 278634ff6846SIoana Radulescu txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 278734ff6846SIoana Radulescu if (txc_cpu >= nr_cpu_ids) 278834ff6846SIoana Radulescu txc_cpu = cpumask_first(&priv->dpio_cpumask); 278934ff6846SIoana Radulescu break; 279034ff6846SIoana Radulescu default: 279134ff6846SIoana Radulescu dev_err(dev, "Unknown FQ type: %d\n", fq->type); 279234ff6846SIoana Radulescu } 27935d8dccf8SIoana Ciornei fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu); 279434ff6846SIoana Radulescu } 279506d5b179SIoana Radulescu 279606d5b179SIoana Radulescu update_xps(priv); 279734ff6846SIoana Radulescu } 279834ff6846SIoana Radulescu 27995d8dccf8SIoana Ciornei static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) 280034ff6846SIoana Radulescu { 2801685e39eaSIoana Radulescu int i, j; 280234ff6846SIoana Radulescu 280334ff6846SIoana Radulescu /* We have one TxConf FQ per Tx flow. 280434ff6846SIoana Radulescu * The number of Tx and Rx queues is the same. 280534ff6846SIoana Radulescu * Tx queues come first in the fq array. 280634ff6846SIoana Radulescu */ 280734ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 280834ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 280934ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 281034ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 281134ff6846SIoana Radulescu } 281234ff6846SIoana Radulescu 2813685e39eaSIoana Radulescu for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 281434ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 281534ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 281634ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 2817685e39eaSIoana Radulescu priv->fq[priv->num_fqs].tc = (u8)j; 281834ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 281934ff6846SIoana Radulescu } 2820685e39eaSIoana Radulescu } 282134ff6846SIoana Radulescu 2822061d631fSIoana Ciornei /* We have exactly one Rx error queue per DPNI */ 2823061d631fSIoana Ciornei priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; 2824061d631fSIoana Ciornei priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; 2825061d631fSIoana Ciornei 282634ff6846SIoana Radulescu /* For each FQ, decide on which core to process incoming frames */ 28275d8dccf8SIoana Ciornei dpaa2_eth_set_fq_affinity(priv); 282834ff6846SIoana Radulescu } 282934ff6846SIoana Radulescu 283034ff6846SIoana Radulescu /* Allocate and configure one buffer pool for each interface */ 28315d8dccf8SIoana Ciornei static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) 283234ff6846SIoana Radulescu { 283334ff6846SIoana Radulescu int err; 283434ff6846SIoana Radulescu struct fsl_mc_device *dpbp_dev; 283534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 283634ff6846SIoana Radulescu struct dpbp_attr dpbp_attrs; 283734ff6846SIoana Radulescu 283834ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 283934ff6846SIoana Radulescu &dpbp_dev); 284034ff6846SIoana Radulescu if (err) { 2841d7f5a9d8SIoana Ciornei if (err == -ENXIO) 2842d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 2843d7f5a9d8SIoana Ciornei else 284434ff6846SIoana Radulescu dev_err(dev, "DPBP device allocation failed\n"); 284534ff6846SIoana Radulescu return err; 284634ff6846SIoana Radulescu } 284734ff6846SIoana Radulescu 284834ff6846SIoana Radulescu priv->dpbp_dev = dpbp_dev; 284934ff6846SIoana Radulescu 285034ff6846SIoana Radulescu err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 285134ff6846SIoana Radulescu &dpbp_dev->mc_handle); 285234ff6846SIoana Radulescu if (err) { 285334ff6846SIoana Radulescu dev_err(dev, "dpbp_open() failed\n"); 285434ff6846SIoana Radulescu goto err_open; 285534ff6846SIoana Radulescu } 285634ff6846SIoana Radulescu 285734ff6846SIoana Radulescu err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 285834ff6846SIoana Radulescu if (err) { 285934ff6846SIoana Radulescu dev_err(dev, "dpbp_reset() failed\n"); 286034ff6846SIoana Radulescu goto err_reset; 286134ff6846SIoana Radulescu } 286234ff6846SIoana Radulescu 286334ff6846SIoana Radulescu err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 286434ff6846SIoana Radulescu if (err) { 286534ff6846SIoana Radulescu dev_err(dev, "dpbp_enable() failed\n"); 286634ff6846SIoana Radulescu goto err_enable; 286734ff6846SIoana Radulescu } 286834ff6846SIoana Radulescu 286934ff6846SIoana Radulescu err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 287034ff6846SIoana Radulescu &dpbp_attrs); 287134ff6846SIoana Radulescu if (err) { 287234ff6846SIoana Radulescu dev_err(dev, "dpbp_get_attributes() failed\n"); 287334ff6846SIoana Radulescu goto err_get_attr; 287434ff6846SIoana Radulescu } 287534ff6846SIoana Radulescu priv->bpid = dpbp_attrs.bpid; 287634ff6846SIoana Radulescu 287734ff6846SIoana Radulescu return 0; 287834ff6846SIoana Radulescu 287934ff6846SIoana Radulescu err_get_attr: 288034ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 288134ff6846SIoana Radulescu err_enable: 288234ff6846SIoana Radulescu err_reset: 288334ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 288434ff6846SIoana Radulescu err_open: 288534ff6846SIoana Radulescu fsl_mc_object_free(dpbp_dev); 288634ff6846SIoana Radulescu 288734ff6846SIoana Radulescu return err; 288834ff6846SIoana Radulescu } 288934ff6846SIoana Radulescu 28905d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) 289134ff6846SIoana Radulescu { 28925d8dccf8SIoana Ciornei dpaa2_eth_drain_pool(priv); 289334ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 289434ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 289534ff6846SIoana Radulescu fsl_mc_object_free(priv->dpbp_dev); 289634ff6846SIoana Radulescu } 289734ff6846SIoana Radulescu 28985d8dccf8SIoana Ciornei static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) 289934ff6846SIoana Radulescu { 290034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 290134ff6846SIoana Radulescu struct dpni_buffer_layout buf_layout = {0}; 290227c87486SIoana Ciocoi Radulescu u16 rx_buf_align; 290334ff6846SIoana Radulescu int err; 290434ff6846SIoana Radulescu 290534ff6846SIoana Radulescu /* We need to check for WRIOP version 1.0.0, but depending on the MC 290634ff6846SIoana Radulescu * version, this number is not always provided correctly on rev1. 290734ff6846SIoana Radulescu * We need to check for both alternatives in this situation. 290834ff6846SIoana Radulescu */ 290934ff6846SIoana Radulescu if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 291034ff6846SIoana Radulescu priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 291127c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 291234ff6846SIoana Radulescu else 291327c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 291434ff6846SIoana Radulescu 2915efa6a7d0SIoana Ciornei /* We need to ensure that the buffer size seen by WRIOP is a multiple 2916efa6a7d0SIoana Ciornei * of 64 or 256 bytes depending on the WRIOP version. 2917efa6a7d0SIoana Ciornei */ 2918efa6a7d0SIoana Ciornei priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); 2919efa6a7d0SIoana Ciornei 292034ff6846SIoana Radulescu /* tx buffer */ 292134ff6846SIoana Radulescu buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 292234ff6846SIoana Radulescu buf_layout.pass_timestamp = true; 2923c5521189SYangbo Lu buf_layout.pass_frame_status = true; 292434ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 2925c5521189SYangbo Lu DPNI_BUF_LAYOUT_OPT_TIMESTAMP | 2926c5521189SYangbo Lu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 292734ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 292834ff6846SIoana Radulescu DPNI_QUEUE_TX, &buf_layout); 292934ff6846SIoana Radulescu if (err) { 293034ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 293134ff6846SIoana Radulescu return err; 293234ff6846SIoana Radulescu } 293334ff6846SIoana Radulescu 293434ff6846SIoana Radulescu /* tx-confirm buffer */ 2935c5521189SYangbo Lu buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP | 2936c5521189SYangbo Lu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 293734ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 293834ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, &buf_layout); 293934ff6846SIoana Radulescu if (err) { 294034ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 294134ff6846SIoana Radulescu return err; 294234ff6846SIoana Radulescu } 294334ff6846SIoana Radulescu 294434ff6846SIoana Radulescu /* Now that we've set our tx buffer layout, retrieve the minimum 294534ff6846SIoana Radulescu * required tx data offset. 294634ff6846SIoana Radulescu */ 294734ff6846SIoana Radulescu err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 294834ff6846SIoana Radulescu &priv->tx_data_offset); 294934ff6846SIoana Radulescu if (err) { 295034ff6846SIoana Radulescu dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 295134ff6846SIoana Radulescu return err; 295234ff6846SIoana Radulescu } 295334ff6846SIoana Radulescu 295434ff6846SIoana Radulescu if ((priv->tx_data_offset % 64) != 0) 295534ff6846SIoana Radulescu dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 295634ff6846SIoana Radulescu priv->tx_data_offset); 295734ff6846SIoana Radulescu 295834ff6846SIoana Radulescu /* rx buffer */ 295934ff6846SIoana Radulescu buf_layout.pass_frame_status = true; 296034ff6846SIoana Radulescu buf_layout.pass_parser_result = true; 296127c87486SIoana Ciocoi Radulescu buf_layout.data_align = rx_buf_align; 296234ff6846SIoana Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 296334ff6846SIoana Radulescu buf_layout.private_data_size = 0; 296434ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 296534ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 296634ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 296734ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 296834ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 296934ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 297034ff6846SIoana Radulescu DPNI_QUEUE_RX, &buf_layout); 297134ff6846SIoana Radulescu if (err) { 297234ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 297334ff6846SIoana Radulescu return err; 297434ff6846SIoana Radulescu } 297534ff6846SIoana Radulescu 297634ff6846SIoana Radulescu return 0; 297734ff6846SIoana Radulescu } 297834ff6846SIoana Radulescu 29791fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 29801fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MINOR 9 29811fa0f68cSIoana Ciocoi Radulescu 29821fa0f68cSIoana Ciocoi Radulescu static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, 29831fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 298448c0481eSIoana Ciornei struct dpaa2_fd *fd, u8 prio, 29856ff80447SIoana Ciornei u32 num_frames __always_unused, 298648c0481eSIoana Ciornei int *frames_enqueued) 29871fa0f68cSIoana Ciocoi Radulescu { 298848c0481eSIoana Ciornei int err; 298948c0481eSIoana Ciornei 299048c0481eSIoana Ciornei err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 29911fa0f68cSIoana Ciocoi Radulescu priv->tx_qdid, prio, 29921fa0f68cSIoana Ciocoi Radulescu fq->tx_qdbin, fd); 299348c0481eSIoana Ciornei if (!err && frames_enqueued) 299448c0481eSIoana Ciornei *frames_enqueued = 1; 299548c0481eSIoana Ciornei return err; 29961fa0f68cSIoana Ciocoi Radulescu } 29971fa0f68cSIoana Ciocoi Radulescu 29986ff80447SIoana Ciornei static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv, 29991fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 30006ff80447SIoana Ciornei struct dpaa2_fd *fd, 30016ff80447SIoana Ciornei u8 prio, u32 num_frames, 300248c0481eSIoana Ciornei int *frames_enqueued) 30031fa0f68cSIoana Ciocoi Radulescu { 300448c0481eSIoana Ciornei int err; 300548c0481eSIoana Ciornei 30066ff80447SIoana Ciornei err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio, 30076ff80447SIoana Ciornei fq->tx_fqid[prio], 30086ff80447SIoana Ciornei fd, num_frames); 30096ff80447SIoana Ciornei 30106ff80447SIoana Ciornei if (err == 0) 30116ff80447SIoana Ciornei return -EBUSY; 30126ff80447SIoana Ciornei 30136ff80447SIoana Ciornei if (frames_enqueued) 30146ff80447SIoana Ciornei *frames_enqueued = err; 30156ff80447SIoana Ciornei return 0; 30161fa0f68cSIoana Ciocoi Radulescu } 30171fa0f68cSIoana Ciocoi Radulescu 30185d8dccf8SIoana Ciornei static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv) 30191fa0f68cSIoana Ciocoi Radulescu { 30201fa0f68cSIoana Ciocoi Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 30211fa0f68cSIoana Ciocoi Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 30221fa0f68cSIoana Ciocoi Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 30231fa0f68cSIoana Ciocoi Radulescu else 30246ff80447SIoana Ciornei priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 30251fa0f68cSIoana Ciocoi Radulescu } 30261fa0f68cSIoana Ciocoi Radulescu 30275d8dccf8SIoana Ciornei static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv) 30288eb3cef8SIoana Radulescu { 30298eb3cef8SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 30308eb3cef8SIoana Radulescu struct dpni_link_cfg link_cfg = {0}; 30318eb3cef8SIoana Radulescu int err; 30328eb3cef8SIoana Radulescu 30338eb3cef8SIoana Radulescu /* Get the default link options so we don't override other flags */ 30348eb3cef8SIoana Radulescu err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 30358eb3cef8SIoana Radulescu if (err) { 30368eb3cef8SIoana Radulescu dev_err(dev, "dpni_get_link_cfg() failed\n"); 30378eb3cef8SIoana Radulescu return err; 30388eb3cef8SIoana Radulescu } 30398eb3cef8SIoana Radulescu 30408eb3cef8SIoana Radulescu /* By default, enable both Rx and Tx pause frames */ 30418eb3cef8SIoana Radulescu link_cfg.options |= DPNI_LINK_OPT_PAUSE; 30428eb3cef8SIoana Radulescu link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 30438eb3cef8SIoana Radulescu err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 30448eb3cef8SIoana Radulescu if (err) { 30458eb3cef8SIoana Radulescu dev_err(dev, "dpni_set_link_cfg() failed\n"); 30468eb3cef8SIoana Radulescu return err; 30478eb3cef8SIoana Radulescu } 30488eb3cef8SIoana Radulescu 30498eb3cef8SIoana Radulescu priv->link_state.options = link_cfg.options; 30508eb3cef8SIoana Radulescu 30518eb3cef8SIoana Radulescu return 0; 30528eb3cef8SIoana Radulescu } 30538eb3cef8SIoana Radulescu 30545d8dccf8SIoana Ciornei static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv) 3055a690af4fSIoana Radulescu { 3056a690af4fSIoana Radulescu struct dpni_queue_id qid = {0}; 3057a690af4fSIoana Radulescu struct dpaa2_eth_fq *fq; 3058a690af4fSIoana Radulescu struct dpni_queue queue; 3059a690af4fSIoana Radulescu int i, j, err; 3060a690af4fSIoana Radulescu 3061a690af4fSIoana Radulescu /* We only use Tx FQIDs for FQID-based enqueue, so check 3062a690af4fSIoana Radulescu * if DPNI version supports it before updating FQIDs 3063a690af4fSIoana Radulescu */ 3064a690af4fSIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 3065a690af4fSIoana Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 3066a690af4fSIoana Radulescu return; 3067a690af4fSIoana Radulescu 3068a690af4fSIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 3069a690af4fSIoana Radulescu fq = &priv->fq[i]; 3070a690af4fSIoana Radulescu if (fq->type != DPAA2_TX_CONF_FQ) 3071a690af4fSIoana Radulescu continue; 3072a690af4fSIoana Radulescu for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 3073a690af4fSIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3074a690af4fSIoana Radulescu DPNI_QUEUE_TX, j, fq->flowid, 3075a690af4fSIoana Radulescu &queue, &qid); 3076a690af4fSIoana Radulescu if (err) 3077a690af4fSIoana Radulescu goto out_err; 3078a690af4fSIoana Radulescu 3079a690af4fSIoana Radulescu fq->tx_fqid[j] = qid.fqid; 3080a690af4fSIoana Radulescu if (fq->tx_fqid[j] == 0) 3081a690af4fSIoana Radulescu goto out_err; 3082a690af4fSIoana Radulescu } 3083a690af4fSIoana Radulescu } 3084a690af4fSIoana Radulescu 30856ff80447SIoana Ciornei priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 3086a690af4fSIoana Radulescu 3087a690af4fSIoana Radulescu return; 3088a690af4fSIoana Radulescu 3089a690af4fSIoana Radulescu out_err: 3090a690af4fSIoana Radulescu netdev_info(priv->net_dev, 3091a690af4fSIoana Radulescu "Error reading Tx FQID, fallback to QDID-based enqueue\n"); 3092a690af4fSIoana Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 3093a690af4fSIoana Radulescu } 3094a690af4fSIoana Radulescu 30956aa90fe2SIoana Radulescu /* Configure ingress classification based on VLAN PCP */ 30965d8dccf8SIoana Ciornei static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv) 30976aa90fe2SIoana Radulescu { 30986aa90fe2SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 30996aa90fe2SIoana Radulescu struct dpkg_profile_cfg kg_cfg = {0}; 31006aa90fe2SIoana Radulescu struct dpni_qos_tbl_cfg qos_cfg = {0}; 31016aa90fe2SIoana Radulescu struct dpni_rule_cfg key_params; 31026aa90fe2SIoana Radulescu void *dma_mem, *key, *mask; 31036aa90fe2SIoana Radulescu u8 key_size = 2; /* VLAN TCI field */ 31046aa90fe2SIoana Radulescu int i, pcp, err; 31056aa90fe2SIoana Radulescu 31066aa90fe2SIoana Radulescu /* VLAN-based classification only makes sense if we have multiple 31076aa90fe2SIoana Radulescu * traffic classes. 31086aa90fe2SIoana Radulescu * Also, we need to extract just the 3-bit PCP field from the VLAN 31096aa90fe2SIoana Radulescu * header and we can only do that by using a mask 31106aa90fe2SIoana Radulescu */ 31116aa90fe2SIoana Radulescu if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) { 31126aa90fe2SIoana Radulescu dev_dbg(dev, "VLAN-based QoS classification not supported\n"); 31136aa90fe2SIoana Radulescu return -EOPNOTSUPP; 31146aa90fe2SIoana Radulescu } 31156aa90fe2SIoana Radulescu 31166aa90fe2SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 31176aa90fe2SIoana Radulescu if (!dma_mem) 31186aa90fe2SIoana Radulescu return -ENOMEM; 31196aa90fe2SIoana Radulescu 31206aa90fe2SIoana Radulescu kg_cfg.num_extracts = 1; 31216aa90fe2SIoana Radulescu kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 31226aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; 31236aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 31246aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; 31256aa90fe2SIoana Radulescu 31266aa90fe2SIoana Radulescu err = dpni_prepare_key_cfg(&kg_cfg, dma_mem); 31276aa90fe2SIoana Radulescu if (err) { 31286aa90fe2SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg failed\n"); 31296aa90fe2SIoana Radulescu goto out_free_tbl; 31306aa90fe2SIoana Radulescu } 31316aa90fe2SIoana Radulescu 31326aa90fe2SIoana Radulescu /* set QoS table */ 31336aa90fe2SIoana Radulescu qos_cfg.default_tc = 0; 31346aa90fe2SIoana Radulescu qos_cfg.discard_on_miss = 0; 31356aa90fe2SIoana Radulescu qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, 31366aa90fe2SIoana Radulescu DPAA2_CLASSIFIER_DMA_SIZE, 31376aa90fe2SIoana Radulescu DMA_TO_DEVICE); 31386aa90fe2SIoana Radulescu if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { 31396aa90fe2SIoana Radulescu dev_err(dev, "QoS table DMA mapping failed\n"); 31406aa90fe2SIoana Radulescu err = -ENOMEM; 31416aa90fe2SIoana Radulescu goto out_free_tbl; 31426aa90fe2SIoana Radulescu } 31436aa90fe2SIoana Radulescu 31446aa90fe2SIoana Radulescu err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); 31456aa90fe2SIoana Radulescu if (err) { 31466aa90fe2SIoana Radulescu dev_err(dev, "dpni_set_qos_table failed\n"); 31476aa90fe2SIoana Radulescu goto out_unmap_tbl; 31486aa90fe2SIoana Radulescu } 31496aa90fe2SIoana Radulescu 31506aa90fe2SIoana Radulescu /* Add QoS table entries */ 31516aa90fe2SIoana Radulescu key = kzalloc(key_size * 2, GFP_KERNEL); 31526aa90fe2SIoana Radulescu if (!key) { 31536aa90fe2SIoana Radulescu err = -ENOMEM; 31546aa90fe2SIoana Radulescu goto out_unmap_tbl; 31556aa90fe2SIoana Radulescu } 31566aa90fe2SIoana Radulescu mask = key + key_size; 31576aa90fe2SIoana Radulescu *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK); 31586aa90fe2SIoana Radulescu 31596aa90fe2SIoana Radulescu key_params.key_iova = dma_map_single(dev, key, key_size * 2, 31606aa90fe2SIoana Radulescu DMA_TO_DEVICE); 31616aa90fe2SIoana Radulescu if (dma_mapping_error(dev, key_params.key_iova)) { 31626aa90fe2SIoana Radulescu dev_err(dev, "Qos table entry DMA mapping failed\n"); 31636aa90fe2SIoana Radulescu err = -ENOMEM; 31646aa90fe2SIoana Radulescu goto out_free_key; 31656aa90fe2SIoana Radulescu } 31666aa90fe2SIoana Radulescu 31676aa90fe2SIoana Radulescu key_params.mask_iova = key_params.key_iova + key_size; 31686aa90fe2SIoana Radulescu key_params.key_size = key_size; 31696aa90fe2SIoana Radulescu 31706aa90fe2SIoana Radulescu /* We add rules for PCP-based distribution starting with highest 31716aa90fe2SIoana Radulescu * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic 31726aa90fe2SIoana Radulescu * classes to accommodate all priority levels, the lowest ones end up 31736aa90fe2SIoana Radulescu * on TC 0 which was configured as default 31746aa90fe2SIoana Radulescu */ 31756aa90fe2SIoana Radulescu for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { 31766aa90fe2SIoana Radulescu *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT); 31776aa90fe2SIoana Radulescu dma_sync_single_for_device(dev, key_params.key_iova, 31786aa90fe2SIoana Radulescu key_size * 2, DMA_TO_DEVICE); 31796aa90fe2SIoana Radulescu 31806aa90fe2SIoana Radulescu err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, 31816aa90fe2SIoana Radulescu &key_params, i, i); 31826aa90fe2SIoana Radulescu if (err) { 31836aa90fe2SIoana Radulescu dev_err(dev, "dpni_add_qos_entry failed\n"); 31846aa90fe2SIoana Radulescu dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token); 31856aa90fe2SIoana Radulescu goto out_unmap_key; 31866aa90fe2SIoana Radulescu } 31876aa90fe2SIoana Radulescu } 31886aa90fe2SIoana Radulescu 31896aa90fe2SIoana Radulescu priv->vlan_cls_enabled = true; 31906aa90fe2SIoana Radulescu 31916aa90fe2SIoana Radulescu /* Table and key memory is not persistent, clean everything up after 31926aa90fe2SIoana Radulescu * configuration is finished 31936aa90fe2SIoana Radulescu */ 31946aa90fe2SIoana Radulescu out_unmap_key: 31956aa90fe2SIoana Radulescu dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE); 31966aa90fe2SIoana Radulescu out_free_key: 31976aa90fe2SIoana Radulescu kfree(key); 31986aa90fe2SIoana Radulescu out_unmap_tbl: 31996aa90fe2SIoana Radulescu dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE, 32006aa90fe2SIoana Radulescu DMA_TO_DEVICE); 32016aa90fe2SIoana Radulescu out_free_tbl: 32026aa90fe2SIoana Radulescu kfree(dma_mem); 32036aa90fe2SIoana Radulescu 32046aa90fe2SIoana Radulescu return err; 32056aa90fe2SIoana Radulescu } 32066aa90fe2SIoana Radulescu 320734ff6846SIoana Radulescu /* Configure the DPNI object this interface is associated with */ 32085d8dccf8SIoana Ciornei static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) 320934ff6846SIoana Radulescu { 321034ff6846SIoana Radulescu struct device *dev = &ls_dev->dev; 321134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 321234ff6846SIoana Radulescu struct net_device *net_dev; 321334ff6846SIoana Radulescu int err; 321434ff6846SIoana Radulescu 321534ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 321634ff6846SIoana Radulescu priv = netdev_priv(net_dev); 321734ff6846SIoana Radulescu 321834ff6846SIoana Radulescu /* get a handle for the DPNI object */ 321934ff6846SIoana Radulescu err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 322034ff6846SIoana Radulescu if (err) { 322134ff6846SIoana Radulescu dev_err(dev, "dpni_open() failed\n"); 322234ff6846SIoana Radulescu return err; 322334ff6846SIoana Radulescu } 322434ff6846SIoana Radulescu 322534ff6846SIoana Radulescu /* Check if we can work with this DPNI object */ 322634ff6846SIoana Radulescu err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 322734ff6846SIoana Radulescu &priv->dpni_ver_minor); 322834ff6846SIoana Radulescu if (err) { 322934ff6846SIoana Radulescu dev_err(dev, "dpni_get_api_version() failed\n"); 323034ff6846SIoana Radulescu goto close; 323134ff6846SIoana Radulescu } 323234ff6846SIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 323334ff6846SIoana Radulescu dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 323434ff6846SIoana Radulescu priv->dpni_ver_major, priv->dpni_ver_minor, 323534ff6846SIoana Radulescu DPNI_VER_MAJOR, DPNI_VER_MINOR); 323634ff6846SIoana Radulescu err = -ENOTSUPP; 323734ff6846SIoana Radulescu goto close; 323834ff6846SIoana Radulescu } 323934ff6846SIoana Radulescu 324034ff6846SIoana Radulescu ls_dev->mc_io = priv->mc_io; 324134ff6846SIoana Radulescu ls_dev->mc_handle = priv->mc_token; 324234ff6846SIoana Radulescu 324334ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 324434ff6846SIoana Radulescu if (err) { 324534ff6846SIoana Radulescu dev_err(dev, "dpni_reset() failed\n"); 324634ff6846SIoana Radulescu goto close; 324734ff6846SIoana Radulescu } 324834ff6846SIoana Radulescu 324934ff6846SIoana Radulescu err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 325034ff6846SIoana Radulescu &priv->dpni_attrs); 325134ff6846SIoana Radulescu if (err) { 325234ff6846SIoana Radulescu dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 325334ff6846SIoana Radulescu goto close; 325434ff6846SIoana Radulescu } 325534ff6846SIoana Radulescu 32565d8dccf8SIoana Ciornei err = dpaa2_eth_set_buffer_layout(priv); 325734ff6846SIoana Radulescu if (err) 325834ff6846SIoana Radulescu goto close; 325934ff6846SIoana Radulescu 32605d8dccf8SIoana Ciornei dpaa2_eth_set_enqueue_mode(priv); 32611fa0f68cSIoana Ciocoi Radulescu 32628eb3cef8SIoana Radulescu /* Enable pause frame support */ 32638eb3cef8SIoana Radulescu if (dpaa2_eth_has_pause_support(priv)) { 32645d8dccf8SIoana Ciornei err = dpaa2_eth_set_pause(priv); 32658eb3cef8SIoana Radulescu if (err) 32668eb3cef8SIoana Radulescu goto close; 32678eb3cef8SIoana Radulescu } 32688eb3cef8SIoana Radulescu 32695d8dccf8SIoana Ciornei err = dpaa2_eth_set_vlan_qos(priv); 32706aa90fe2SIoana Radulescu if (err && err != -EOPNOTSUPP) 32716aa90fe2SIoana Radulescu goto close; 32726aa90fe2SIoana Radulescu 32739334d5baSXu Wang priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), 32749334d5baSXu Wang sizeof(struct dpaa2_eth_cls_rule), 32759334d5baSXu Wang GFP_KERNEL); 327697fff7c8SWei Yongjun if (!priv->cls_rules) { 327797fff7c8SWei Yongjun err = -ENOMEM; 3278afb90dbbSIoana Radulescu goto close; 327997fff7c8SWei Yongjun } 3280afb90dbbSIoana Radulescu 328134ff6846SIoana Radulescu return 0; 328234ff6846SIoana Radulescu 328334ff6846SIoana Radulescu close: 328434ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 328534ff6846SIoana Radulescu 328634ff6846SIoana Radulescu return err; 328734ff6846SIoana Radulescu } 328834ff6846SIoana Radulescu 32895d8dccf8SIoana Ciornei static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv) 329034ff6846SIoana Radulescu { 329134ff6846SIoana Radulescu int err; 329234ff6846SIoana Radulescu 329334ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 329434ff6846SIoana Radulescu if (err) 329534ff6846SIoana Radulescu netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 329634ff6846SIoana Radulescu err); 329734ff6846SIoana Radulescu 329834ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 329934ff6846SIoana Radulescu } 330034ff6846SIoana Radulescu 33015d8dccf8SIoana Ciornei static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, 330234ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 330334ff6846SIoana Radulescu { 330434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 330534ff6846SIoana Radulescu struct dpni_queue queue; 330634ff6846SIoana Radulescu struct dpni_queue_id qid; 330734ff6846SIoana Radulescu int err; 330834ff6846SIoana Radulescu 330934ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3310685e39eaSIoana Radulescu DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); 331134ff6846SIoana Radulescu if (err) { 331234ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(RX) failed\n"); 331334ff6846SIoana Radulescu return err; 331434ff6846SIoana Radulescu } 331534ff6846SIoana Radulescu 331634ff6846SIoana Radulescu fq->fqid = qid.fqid; 331734ff6846SIoana Radulescu 331834ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 331934ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 332034ff6846SIoana Radulescu queue.destination.priority = 1; 332134ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 332234ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3323685e39eaSIoana Radulescu DPNI_QUEUE_RX, fq->tc, fq->flowid, 332416fa1cf1SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 332534ff6846SIoana Radulescu &queue); 332634ff6846SIoana Radulescu if (err) { 332734ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(RX) failed\n"); 332834ff6846SIoana Radulescu return err; 332934ff6846SIoana Radulescu } 333034ff6846SIoana Radulescu 3331d678be1dSIoana Radulescu /* xdp_rxq setup */ 3332685e39eaSIoana Radulescu /* only once for each channel */ 3333685e39eaSIoana Radulescu if (fq->tc > 0) 3334685e39eaSIoana Radulescu return 0; 3335685e39eaSIoana Radulescu 3336d678be1dSIoana Radulescu err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, 3337b02e5a0eSBjörn Töpel fq->flowid, 0); 3338d678be1dSIoana Radulescu if (err) { 3339d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg failed\n"); 3340d678be1dSIoana Radulescu return err; 3341d678be1dSIoana Radulescu } 3342d678be1dSIoana Radulescu 3343d678be1dSIoana Radulescu err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, 3344d678be1dSIoana Radulescu MEM_TYPE_PAGE_ORDER0, NULL); 3345d678be1dSIoana Radulescu if (err) { 3346d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); 3347d678be1dSIoana Radulescu return err; 3348d678be1dSIoana Radulescu } 3349d678be1dSIoana Radulescu 335034ff6846SIoana Radulescu return 0; 335134ff6846SIoana Radulescu } 335234ff6846SIoana Radulescu 33535d8dccf8SIoana Ciornei static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv, 335434ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 335534ff6846SIoana Radulescu { 335634ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 335734ff6846SIoana Radulescu struct dpni_queue queue; 335834ff6846SIoana Radulescu struct dpni_queue_id qid; 335915c87f6bSIoana Radulescu int i, err; 336034ff6846SIoana Radulescu 336115c87f6bSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 336234ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 336315c87f6bSIoana Radulescu DPNI_QUEUE_TX, i, fq->flowid, 336415c87f6bSIoana Radulescu &queue, &qid); 336534ff6846SIoana Radulescu if (err) { 336634ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX) failed\n"); 336734ff6846SIoana Radulescu return err; 336834ff6846SIoana Radulescu } 336915c87f6bSIoana Radulescu fq->tx_fqid[i] = qid.fqid; 337015c87f6bSIoana Radulescu } 337134ff6846SIoana Radulescu 337215c87f6bSIoana Radulescu /* All Tx queues belonging to the same flowid have the same qdbin */ 337334ff6846SIoana Radulescu fq->tx_qdbin = qid.qdbin; 337434ff6846SIoana Radulescu 337534ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 337634ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 337734ff6846SIoana Radulescu &queue, &qid); 337834ff6846SIoana Radulescu if (err) { 337934ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 338034ff6846SIoana Radulescu return err; 338134ff6846SIoana Radulescu } 338234ff6846SIoana Radulescu 338334ff6846SIoana Radulescu fq->fqid = qid.fqid; 338434ff6846SIoana Radulescu 338534ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 338634ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 338734ff6846SIoana Radulescu queue.destination.priority = 0; 338834ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 338934ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 339034ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 339134ff6846SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 339234ff6846SIoana Radulescu &queue); 339334ff6846SIoana Radulescu if (err) { 339434ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 339534ff6846SIoana Radulescu return err; 339634ff6846SIoana Radulescu } 339734ff6846SIoana Radulescu 339834ff6846SIoana Radulescu return 0; 339934ff6846SIoana Radulescu } 340034ff6846SIoana Radulescu 3401061d631fSIoana Ciornei static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, 3402061d631fSIoana Ciornei struct dpaa2_eth_fq *fq) 3403061d631fSIoana Ciornei { 3404061d631fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 3405061d631fSIoana Ciornei struct dpni_queue q = { { 0 } }; 3406061d631fSIoana Ciornei struct dpni_queue_id qid; 3407061d631fSIoana Ciornei u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; 3408061d631fSIoana Ciornei int err; 3409061d631fSIoana Ciornei 3410061d631fSIoana Ciornei err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3411061d631fSIoana Ciornei DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); 3412061d631fSIoana Ciornei if (err) { 3413061d631fSIoana Ciornei dev_err(dev, "dpni_get_queue() failed (%d)\n", err); 3414061d631fSIoana Ciornei return err; 3415061d631fSIoana Ciornei } 3416061d631fSIoana Ciornei 3417061d631fSIoana Ciornei fq->fqid = qid.fqid; 3418061d631fSIoana Ciornei 3419061d631fSIoana Ciornei q.destination.id = fq->channel->dpcon_id; 3420061d631fSIoana Ciornei q.destination.type = DPNI_DEST_DPCON; 3421061d631fSIoana Ciornei q.destination.priority = 1; 3422061d631fSIoana Ciornei q.user_context = (u64)(uintptr_t)fq; 3423061d631fSIoana Ciornei err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3424061d631fSIoana Ciornei DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); 3425061d631fSIoana Ciornei if (err) { 3426061d631fSIoana Ciornei dev_err(dev, "dpni_set_queue() failed (%d)\n", err); 3427061d631fSIoana Ciornei return err; 3428061d631fSIoana Ciornei } 3429061d631fSIoana Ciornei 3430061d631fSIoana Ciornei return 0; 3431061d631fSIoana Ciornei } 3432061d631fSIoana Ciornei 3433edad8d26SIoana Ciocoi Radulescu /* Supported header fields for Rx hash distribution key */ 3434f76c483aSIoana Radulescu static const struct dpaa2_eth_dist_fields dist_fields[] = { 343534ff6846SIoana Radulescu { 3436edad8d26SIoana Ciocoi Radulescu /* L2 header */ 3437edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_L2DA, 3438edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_ETH, 3439edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_ETH_DA, 34403a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHDST, 3441edad8d26SIoana Ciocoi Radulescu .size = 6, 3442edad8d26SIoana Ciocoi Radulescu }, { 3443afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 3444afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_SA, 34453a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHSRC, 3446afb90dbbSIoana Radulescu .size = 6, 3447afb90dbbSIoana Radulescu }, { 3448afb90dbbSIoana Radulescu /* This is the last ethertype field parsed: 3449afb90dbbSIoana Radulescu * depending on frame format, it can be the MAC ethertype 3450afb90dbbSIoana Radulescu * or the VLAN etype. 3451afb90dbbSIoana Radulescu */ 3452afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 3453afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_TYPE, 34543a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHTYPE, 3455afb90dbbSIoana Radulescu .size = 2, 3456afb90dbbSIoana Radulescu }, { 3457edad8d26SIoana Ciocoi Radulescu /* VLAN header */ 3458edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_VLAN, 3459edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_VLAN, 3460edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_VLAN_TCI, 34613a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_VLAN, 3462edad8d26SIoana Ciocoi Radulescu .size = 2, 3463edad8d26SIoana Ciocoi Radulescu }, { 346434ff6846SIoana Radulescu /* IP header */ 346534ff6846SIoana Radulescu .rxnfc_field = RXH_IP_SRC, 346634ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 346734ff6846SIoana Radulescu .cls_field = NH_FLD_IP_SRC, 34683a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPSRC, 346934ff6846SIoana Radulescu .size = 4, 347034ff6846SIoana Radulescu }, { 347134ff6846SIoana Radulescu .rxnfc_field = RXH_IP_DST, 347234ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 347334ff6846SIoana Radulescu .cls_field = NH_FLD_IP_DST, 34743a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPDST, 347534ff6846SIoana Radulescu .size = 4, 347634ff6846SIoana Radulescu }, { 347734ff6846SIoana Radulescu .rxnfc_field = RXH_L3_PROTO, 347834ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 347934ff6846SIoana Radulescu .cls_field = NH_FLD_IP_PROTO, 34803a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPPROTO, 348134ff6846SIoana Radulescu .size = 1, 348234ff6846SIoana Radulescu }, { 348334ff6846SIoana Radulescu /* Using UDP ports, this is functionally equivalent to raw 348434ff6846SIoana Radulescu * byte pairs from L4 header. 348534ff6846SIoana Radulescu */ 348634ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_0_1, 348734ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 348834ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_SRC, 34893a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4SRC, 349034ff6846SIoana Radulescu .size = 2, 349134ff6846SIoana Radulescu }, { 349234ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_2_3, 349334ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 349434ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_DST, 34953a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4DST, 349634ff6846SIoana Radulescu .size = 2, 349734ff6846SIoana Radulescu }, 349834ff6846SIoana Radulescu }; 349934ff6846SIoana Radulescu 3500df85aeb9SIoana Radulescu /* Configure the Rx hash key using the legacy API */ 35015d8dccf8SIoana Ciornei static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3502df85aeb9SIoana Radulescu { 3503df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3504df85aeb9SIoana Radulescu struct dpni_rx_tc_dist_cfg dist_cfg; 3505685e39eaSIoana Radulescu int i, err = 0; 3506df85aeb9SIoana Radulescu 3507df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 3508df85aeb9SIoana Radulescu 3509df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 3510df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3511df85aeb9SIoana Radulescu dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 3512df85aeb9SIoana Radulescu 3513685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3514685e39eaSIoana Radulescu err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 3515685e39eaSIoana Radulescu i, &dist_cfg); 3516685e39eaSIoana Radulescu if (err) { 3517df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 3518685e39eaSIoana Radulescu break; 3519685e39eaSIoana Radulescu } 3520685e39eaSIoana Radulescu } 3521df85aeb9SIoana Radulescu 3522df85aeb9SIoana Radulescu return err; 3523df85aeb9SIoana Radulescu } 3524df85aeb9SIoana Radulescu 3525df85aeb9SIoana Radulescu /* Configure the Rx hash key using the new API */ 35265d8dccf8SIoana Ciornei static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3527df85aeb9SIoana Radulescu { 3528df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3529df85aeb9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 3530685e39eaSIoana Radulescu int i, err = 0; 3531df85aeb9SIoana Radulescu 3532df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 3533df85aeb9SIoana Radulescu 3534df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 3535df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3536df85aeb9SIoana Radulescu dist_cfg.enable = 1; 3537df85aeb9SIoana Radulescu 3538685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3539685e39eaSIoana Radulescu dist_cfg.tc = i; 3540685e39eaSIoana Radulescu err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, 3541685e39eaSIoana Radulescu &dist_cfg); 3542685e39eaSIoana Radulescu if (err) { 3543df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 3544685e39eaSIoana Radulescu break; 3545685e39eaSIoana Radulescu } 35465e29c16fSIonut-robert Aron 35475e29c16fSIonut-robert Aron /* If the flow steering / hashing key is shared between all 35485e29c16fSIonut-robert Aron * traffic classes, install it just once 35495e29c16fSIonut-robert Aron */ 35505e29c16fSIonut-robert Aron if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) 35515e29c16fSIonut-robert Aron break; 3552685e39eaSIoana Radulescu } 3553df85aeb9SIoana Radulescu 3554df85aeb9SIoana Radulescu return err; 3555df85aeb9SIoana Radulescu } 3556df85aeb9SIoana Radulescu 35574aaaf9b9SIoana Radulescu /* Configure the Rx flow classification key */ 35585d8dccf8SIoana Ciornei static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 35594aaaf9b9SIoana Radulescu { 35604aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 35614aaaf9b9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 3562685e39eaSIoana Radulescu int i, err = 0; 35634aaaf9b9SIoana Radulescu 35644aaaf9b9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 35654aaaf9b9SIoana Radulescu 35664aaaf9b9SIoana Radulescu dist_cfg.key_cfg_iova = key; 35674aaaf9b9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 35684aaaf9b9SIoana Radulescu dist_cfg.enable = 1; 35694aaaf9b9SIoana Radulescu 3570685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3571685e39eaSIoana Radulescu dist_cfg.tc = i; 3572685e39eaSIoana Radulescu err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, 3573685e39eaSIoana Radulescu &dist_cfg); 3574685e39eaSIoana Radulescu if (err) { 35754aaaf9b9SIoana Radulescu dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 3576685e39eaSIoana Radulescu break; 3577685e39eaSIoana Radulescu } 35785e29c16fSIonut-robert Aron 35795e29c16fSIonut-robert Aron /* If the flow steering / hashing key is shared between all 35805e29c16fSIonut-robert Aron * traffic classes, install it just once 35815e29c16fSIonut-robert Aron */ 35825e29c16fSIonut-robert Aron if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) 35835e29c16fSIonut-robert Aron break; 3584685e39eaSIoana Radulescu } 35854aaaf9b9SIoana Radulescu 35864aaaf9b9SIoana Radulescu return err; 35874aaaf9b9SIoana Radulescu } 35884aaaf9b9SIoana Radulescu 3589afb90dbbSIoana Radulescu /* Size of the Rx flow classification key */ 35902d680237SIoana Ciocoi Radulescu int dpaa2_eth_cls_key_size(u64 fields) 3591afb90dbbSIoana Radulescu { 3592afb90dbbSIoana Radulescu int i, size = 0; 3593afb90dbbSIoana Radulescu 35942d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 35952d680237SIoana Ciocoi Radulescu if (!(fields & dist_fields[i].id)) 35962d680237SIoana Ciocoi Radulescu continue; 3597afb90dbbSIoana Radulescu size += dist_fields[i].size; 35982d680237SIoana Ciocoi Radulescu } 3599afb90dbbSIoana Radulescu 3600afb90dbbSIoana Radulescu return size; 3601afb90dbbSIoana Radulescu } 3602afb90dbbSIoana Radulescu 3603afb90dbbSIoana Radulescu /* Offset of header field in Rx classification key */ 3604afb90dbbSIoana Radulescu int dpaa2_eth_cls_fld_off(int prot, int field) 3605afb90dbbSIoana Radulescu { 3606afb90dbbSIoana Radulescu int i, off = 0; 3607afb90dbbSIoana Radulescu 3608afb90dbbSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3609afb90dbbSIoana Radulescu if (dist_fields[i].cls_prot == prot && 3610afb90dbbSIoana Radulescu dist_fields[i].cls_field == field) 3611afb90dbbSIoana Radulescu return off; 3612afb90dbbSIoana Radulescu off += dist_fields[i].size; 3613afb90dbbSIoana Radulescu } 3614afb90dbbSIoana Radulescu 3615afb90dbbSIoana Radulescu WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 3616afb90dbbSIoana Radulescu return 0; 3617afb90dbbSIoana Radulescu } 3618afb90dbbSIoana Radulescu 36192d680237SIoana Ciocoi Radulescu /* Prune unused fields from the classification rule. 36202d680237SIoana Ciocoi Radulescu * Used when masking is not supported 36212d680237SIoana Ciocoi Radulescu */ 36222d680237SIoana Ciocoi Radulescu void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) 36232d680237SIoana Ciocoi Radulescu { 36242d680237SIoana Ciocoi Radulescu int off = 0, new_off = 0; 36252d680237SIoana Ciocoi Radulescu int i, size; 36262d680237SIoana Ciocoi Radulescu 36272d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 36282d680237SIoana Ciocoi Radulescu size = dist_fields[i].size; 36292d680237SIoana Ciocoi Radulescu if (dist_fields[i].id & fields) { 36302d680237SIoana Ciocoi Radulescu memcpy(key_mem + new_off, key_mem + off, size); 36312d680237SIoana Ciocoi Radulescu new_off += size; 36322d680237SIoana Ciocoi Radulescu } 36332d680237SIoana Ciocoi Radulescu off += size; 36342d680237SIoana Ciocoi Radulescu } 36352d680237SIoana Ciocoi Radulescu } 36362d680237SIoana Ciocoi Radulescu 36374aaaf9b9SIoana Radulescu /* Set Rx distribution (hash or flow classification) key 363834ff6846SIoana Radulescu * flags is a combination of RXH_ bits 363934ff6846SIoana Radulescu */ 36403233c151SIoana Ciornei static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 36414aaaf9b9SIoana Radulescu enum dpaa2_eth_rx_dist type, u64 flags) 364234ff6846SIoana Radulescu { 364334ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 364434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 364534ff6846SIoana Radulescu struct dpkg_profile_cfg cls_cfg; 3646edad8d26SIoana Ciocoi Radulescu u32 rx_hash_fields = 0; 3647df85aeb9SIoana Radulescu dma_addr_t key_iova; 364834ff6846SIoana Radulescu u8 *dma_mem; 364934ff6846SIoana Radulescu int i; 365034ff6846SIoana Radulescu int err = 0; 365134ff6846SIoana Radulescu 365234ff6846SIoana Radulescu memset(&cls_cfg, 0, sizeof(cls_cfg)); 365334ff6846SIoana Radulescu 3654f76c483aSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 365534ff6846SIoana Radulescu struct dpkg_extract *key = 365634ff6846SIoana Radulescu &cls_cfg.extracts[cls_cfg.num_extracts]; 365734ff6846SIoana Radulescu 36582d680237SIoana Ciocoi Radulescu /* For both Rx hashing and classification keys 36592d680237SIoana Ciocoi Radulescu * we set only the selected fields. 36604aaaf9b9SIoana Radulescu */ 36613a1e6b84SIoana Ciocoi Radulescu if (!(flags & dist_fields[i].id)) 366234ff6846SIoana Radulescu continue; 36632d680237SIoana Ciocoi Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) 36644aaaf9b9SIoana Radulescu rx_hash_fields |= dist_fields[i].rxnfc_field; 366534ff6846SIoana Radulescu 366634ff6846SIoana Radulescu if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 366734ff6846SIoana Radulescu dev_err(dev, "error adding key extraction rule, too many rules?\n"); 366834ff6846SIoana Radulescu return -E2BIG; 366934ff6846SIoana Radulescu } 367034ff6846SIoana Radulescu 367134ff6846SIoana Radulescu key->type = DPKG_EXTRACT_FROM_HDR; 3672f76c483aSIoana Radulescu key->extract.from_hdr.prot = dist_fields[i].cls_prot; 367334ff6846SIoana Radulescu key->extract.from_hdr.type = DPKG_FULL_FIELD; 3674f76c483aSIoana Radulescu key->extract.from_hdr.field = dist_fields[i].cls_field; 367534ff6846SIoana Radulescu cls_cfg.num_extracts++; 367634ff6846SIoana Radulescu } 367734ff6846SIoana Radulescu 367834ff6846SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 367934ff6846SIoana Radulescu if (!dma_mem) 368034ff6846SIoana Radulescu return -ENOMEM; 368134ff6846SIoana Radulescu 368234ff6846SIoana Radulescu err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 368334ff6846SIoana Radulescu if (err) { 368434ff6846SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 3685df85aeb9SIoana Radulescu goto free_key; 368634ff6846SIoana Radulescu } 368734ff6846SIoana Radulescu 368834ff6846SIoana Radulescu /* Prepare for setting the rx dist */ 3689df85aeb9SIoana Radulescu key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 369034ff6846SIoana Radulescu DMA_TO_DEVICE); 3691df85aeb9SIoana Radulescu if (dma_mapping_error(dev, key_iova)) { 369234ff6846SIoana Radulescu dev_err(dev, "DMA mapping failed\n"); 369334ff6846SIoana Radulescu err = -ENOMEM; 3694df85aeb9SIoana Radulescu goto free_key; 369534ff6846SIoana Radulescu } 369634ff6846SIoana Radulescu 36974aaaf9b9SIoana Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) { 3698df85aeb9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) 36995d8dccf8SIoana Ciornei err = dpaa2_eth_config_legacy_hash_key(priv, key_iova); 3700edad8d26SIoana Ciocoi Radulescu else 37015d8dccf8SIoana Ciornei err = dpaa2_eth_config_hash_key(priv, key_iova); 37024aaaf9b9SIoana Radulescu } else { 37035d8dccf8SIoana Ciornei err = dpaa2_eth_config_cls_key(priv, key_iova); 37044aaaf9b9SIoana Radulescu } 3705df85aeb9SIoana Radulescu 3706df85aeb9SIoana Radulescu dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 3707df85aeb9SIoana Radulescu DMA_TO_DEVICE); 37084aaaf9b9SIoana Radulescu if (!err && type == DPAA2_ETH_RX_DIST_HASH) 3709edad8d26SIoana Ciocoi Radulescu priv->rx_hash_fields = rx_hash_fields; 371034ff6846SIoana Radulescu 3711df85aeb9SIoana Radulescu free_key: 371234ff6846SIoana Radulescu kfree(dma_mem); 371334ff6846SIoana Radulescu return err; 371434ff6846SIoana Radulescu } 371534ff6846SIoana Radulescu 37164aaaf9b9SIoana Radulescu int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 37174aaaf9b9SIoana Radulescu { 37184aaaf9b9SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 37193a1e6b84SIoana Ciocoi Radulescu u64 key = 0; 37203a1e6b84SIoana Ciocoi Radulescu int i; 37214aaaf9b9SIoana Radulescu 37224aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) 37234aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 37244aaaf9b9SIoana Radulescu 37253a1e6b84SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 37263a1e6b84SIoana Ciocoi Radulescu if (dist_fields[i].rxnfc_field & flags) 37273a1e6b84SIoana Ciocoi Radulescu key |= dist_fields[i].id; 37283a1e6b84SIoana Ciocoi Radulescu 37293a1e6b84SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); 37304aaaf9b9SIoana Radulescu } 37314aaaf9b9SIoana Radulescu 37322d680237SIoana Ciocoi Radulescu int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) 37332d680237SIoana Ciocoi Radulescu { 37342d680237SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); 37352d680237SIoana Ciocoi Radulescu } 37362d680237SIoana Ciocoi Radulescu 37372d680237SIoana Ciocoi Radulescu static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) 37384aaaf9b9SIoana Radulescu { 37394aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3740df8e249bSIoana Ciocoi Radulescu int err; 37414aaaf9b9SIoana Radulescu 37424aaaf9b9SIoana Radulescu /* Check if we actually support Rx flow classification */ 37434aaaf9b9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) { 37444aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls not supported by current MC version\n"); 37454aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 37464aaaf9b9SIoana Radulescu } 37474aaaf9b9SIoana Radulescu 37482d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_enabled(priv)) { 37494aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 37504aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 37514aaaf9b9SIoana Radulescu } 37524aaaf9b9SIoana Radulescu 37534aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) { 37544aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 37554aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 37564aaaf9b9SIoana Radulescu } 37574aaaf9b9SIoana Radulescu 37582d680237SIoana Ciocoi Radulescu /* If there is no support for masking in the classification table, 37592d680237SIoana Ciocoi Radulescu * we don't set a default key, as it will depend on the rules 37602d680237SIoana Ciocoi Radulescu * added by the user at runtime. 37612d680237SIoana Ciocoi Radulescu */ 37622d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_mask_enabled(priv)) 37632d680237SIoana Ciocoi Radulescu goto out; 37642d680237SIoana Ciocoi Radulescu 37652d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); 3766df8e249bSIoana Ciocoi Radulescu if (err) 3767df8e249bSIoana Ciocoi Radulescu return err; 3768df8e249bSIoana Ciocoi Radulescu 37692d680237SIoana Ciocoi Radulescu out: 37704aaaf9b9SIoana Radulescu priv->rx_cls_enabled = 1; 37714aaaf9b9SIoana Radulescu 3772df8e249bSIoana Ciocoi Radulescu return 0; 37734aaaf9b9SIoana Radulescu } 37744aaaf9b9SIoana Radulescu 377534ff6846SIoana Radulescu /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 377634ff6846SIoana Radulescu * frame queues and channels 377734ff6846SIoana Radulescu */ 37785d8dccf8SIoana Ciornei static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) 377934ff6846SIoana Radulescu { 378034ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 378134ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 378234ff6846SIoana Radulescu struct dpni_pools_cfg pools_params; 378334ff6846SIoana Radulescu struct dpni_error_cfg err_cfg; 378434ff6846SIoana Radulescu int err = 0; 378534ff6846SIoana Radulescu int i; 378634ff6846SIoana Radulescu 378734ff6846SIoana Radulescu pools_params.num_dpbp = 1; 378834ff6846SIoana Radulescu pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 378934ff6846SIoana Radulescu pools_params.pools[0].backup_pool = 0; 3790efa6a7d0SIoana Ciornei pools_params.pools[0].buffer_size = priv->rx_buf_size; 379134ff6846SIoana Radulescu err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 379234ff6846SIoana Radulescu if (err) { 379334ff6846SIoana Radulescu dev_err(dev, "dpni_set_pools() failed\n"); 379434ff6846SIoana Radulescu return err; 379534ff6846SIoana Radulescu } 379634ff6846SIoana Radulescu 379734ff6846SIoana Radulescu /* have the interface implicitly distribute traffic based on 379834ff6846SIoana Radulescu * the default hash key 379934ff6846SIoana Radulescu */ 380034ff6846SIoana Radulescu err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 3801edad8d26SIoana Ciocoi Radulescu if (err && err != -EOPNOTSUPP) 380234ff6846SIoana Radulescu dev_err(dev, "Failed to configure hashing\n"); 380334ff6846SIoana Radulescu 38044aaaf9b9SIoana Radulescu /* Configure the flow classification key; it includes all 38054aaaf9b9SIoana Radulescu * supported header fields and cannot be modified at runtime 38064aaaf9b9SIoana Radulescu */ 38072d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_default_cls(priv); 38084aaaf9b9SIoana Radulescu if (err && err != -EOPNOTSUPP) 38094aaaf9b9SIoana Radulescu dev_err(dev, "Failed to configure Rx classification key\n"); 38104aaaf9b9SIoana Radulescu 381134ff6846SIoana Radulescu /* Configure handling of error frames */ 381234ff6846SIoana Radulescu err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 381334ff6846SIoana Radulescu err_cfg.set_frame_annotation = 1; 381434ff6846SIoana Radulescu err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 381534ff6846SIoana Radulescu err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 381634ff6846SIoana Radulescu &err_cfg); 381734ff6846SIoana Radulescu if (err) { 381834ff6846SIoana Radulescu dev_err(dev, "dpni_set_errors_behavior failed\n"); 381934ff6846SIoana Radulescu return err; 382034ff6846SIoana Radulescu } 382134ff6846SIoana Radulescu 382234ff6846SIoana Radulescu /* Configure Rx and Tx conf queues to generate CDANs */ 382334ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 382434ff6846SIoana Radulescu switch (priv->fq[i].type) { 382534ff6846SIoana Radulescu case DPAA2_RX_FQ: 38265d8dccf8SIoana Ciornei err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]); 382734ff6846SIoana Radulescu break; 382834ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 38295d8dccf8SIoana Ciornei err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]); 383034ff6846SIoana Radulescu break; 3831061d631fSIoana Ciornei case DPAA2_RX_ERR_FQ: 3832061d631fSIoana Ciornei err = setup_rx_err_flow(priv, &priv->fq[i]); 3833061d631fSIoana Ciornei break; 383434ff6846SIoana Radulescu default: 383534ff6846SIoana Radulescu dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 383634ff6846SIoana Radulescu return -EINVAL; 383734ff6846SIoana Radulescu } 383834ff6846SIoana Radulescu if (err) 383934ff6846SIoana Radulescu return err; 384034ff6846SIoana Radulescu } 384134ff6846SIoana Radulescu 384234ff6846SIoana Radulescu err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 384334ff6846SIoana Radulescu DPNI_QUEUE_TX, &priv->tx_qdid); 384434ff6846SIoana Radulescu if (err) { 384534ff6846SIoana Radulescu dev_err(dev, "dpni_get_qdid() failed\n"); 384634ff6846SIoana Radulescu return err; 384734ff6846SIoana Radulescu } 384834ff6846SIoana Radulescu 384934ff6846SIoana Radulescu return 0; 385034ff6846SIoana Radulescu } 385134ff6846SIoana Radulescu 385234ff6846SIoana Radulescu /* Allocate rings for storing incoming frame descriptors */ 38535d8dccf8SIoana Ciornei static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) 385434ff6846SIoana Radulescu { 385534ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 385634ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 385734ff6846SIoana Radulescu int i; 385834ff6846SIoana Radulescu 385934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 386034ff6846SIoana Radulescu priv->channel[i]->store = 386134ff6846SIoana Radulescu dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 386234ff6846SIoana Radulescu if (!priv->channel[i]->store) { 386334ff6846SIoana Radulescu netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 386434ff6846SIoana Radulescu goto err_ring; 386534ff6846SIoana Radulescu } 386634ff6846SIoana Radulescu } 386734ff6846SIoana Radulescu 386834ff6846SIoana Radulescu return 0; 386934ff6846SIoana Radulescu 387034ff6846SIoana Radulescu err_ring: 387134ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 387234ff6846SIoana Radulescu if (!priv->channel[i]->store) 387334ff6846SIoana Radulescu break; 387434ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 387534ff6846SIoana Radulescu } 387634ff6846SIoana Radulescu 387734ff6846SIoana Radulescu return -ENOMEM; 387834ff6846SIoana Radulescu } 387934ff6846SIoana Radulescu 38805d8dccf8SIoana Ciornei static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) 388134ff6846SIoana Radulescu { 388234ff6846SIoana Radulescu int i; 388334ff6846SIoana Radulescu 388434ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 388534ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 388634ff6846SIoana Radulescu } 388734ff6846SIoana Radulescu 38885d8dccf8SIoana Ciornei static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) 388934ff6846SIoana Radulescu { 389034ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 389134ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 389234ff6846SIoana Radulescu u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 389334ff6846SIoana Radulescu int err; 389434ff6846SIoana Radulescu 389534ff6846SIoana Radulescu /* Get firmware address, if any */ 389634ff6846SIoana Radulescu err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 389734ff6846SIoana Radulescu if (err) { 389834ff6846SIoana Radulescu dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 389934ff6846SIoana Radulescu return err; 390034ff6846SIoana Radulescu } 390134ff6846SIoana Radulescu 390234ff6846SIoana Radulescu /* Get DPNI attributes address, if any */ 390334ff6846SIoana Radulescu err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 390434ff6846SIoana Radulescu dpni_mac_addr); 390534ff6846SIoana Radulescu if (err) { 390634ff6846SIoana Radulescu dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 390734ff6846SIoana Radulescu return err; 390834ff6846SIoana Radulescu } 390934ff6846SIoana Radulescu 391034ff6846SIoana Radulescu /* First check if firmware has any address configured by bootloader */ 391134ff6846SIoana Radulescu if (!is_zero_ether_addr(mac_addr)) { 391234ff6846SIoana Radulescu /* If the DPMAC addr != DPNI addr, update it */ 391334ff6846SIoana Radulescu if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 391434ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, 391534ff6846SIoana Radulescu priv->mc_token, 391634ff6846SIoana Radulescu mac_addr); 391734ff6846SIoana Radulescu if (err) { 391834ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 391934ff6846SIoana Radulescu return err; 392034ff6846SIoana Radulescu } 392134ff6846SIoana Radulescu } 392234ff6846SIoana Radulescu memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 392334ff6846SIoana Radulescu } else if (is_zero_ether_addr(dpni_mac_addr)) { 392434ff6846SIoana Radulescu /* No MAC address configured, fill in net_dev->dev_addr 392534ff6846SIoana Radulescu * with a random one 392634ff6846SIoana Radulescu */ 392734ff6846SIoana Radulescu eth_hw_addr_random(net_dev); 392834ff6846SIoana Radulescu dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 392934ff6846SIoana Radulescu 393034ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 393134ff6846SIoana Radulescu net_dev->dev_addr); 393234ff6846SIoana Radulescu if (err) { 393334ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 393434ff6846SIoana Radulescu return err; 393534ff6846SIoana Radulescu } 393634ff6846SIoana Radulescu 393734ff6846SIoana Radulescu /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 393834ff6846SIoana Radulescu * practical purposes, this will be our "permanent" mac address, 393934ff6846SIoana Radulescu * at least until the next reboot. This move will also permit 394034ff6846SIoana Radulescu * register_netdevice() to properly fill up net_dev->perm_addr. 394134ff6846SIoana Radulescu */ 394234ff6846SIoana Radulescu net_dev->addr_assign_type = NET_ADDR_PERM; 394334ff6846SIoana Radulescu } else { 394434ff6846SIoana Radulescu /* NET_ADDR_PERM is default, all we have to do is 394534ff6846SIoana Radulescu * fill in the device addr. 394634ff6846SIoana Radulescu */ 394734ff6846SIoana Radulescu memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 394834ff6846SIoana Radulescu } 394934ff6846SIoana Radulescu 395034ff6846SIoana Radulescu return 0; 395134ff6846SIoana Radulescu } 395234ff6846SIoana Radulescu 39535d8dccf8SIoana Ciornei static int dpaa2_eth_netdev_init(struct net_device *net_dev) 395434ff6846SIoana Radulescu { 395534ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 395634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 395734ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 395834ff6846SIoana Radulescu u64 supported = 0, not_supported = 0; 395934ff6846SIoana Radulescu u8 bcast_addr[ETH_ALEN]; 396034ff6846SIoana Radulescu u8 num_queues; 396134ff6846SIoana Radulescu int err; 396234ff6846SIoana Radulescu 396334ff6846SIoana Radulescu net_dev->netdev_ops = &dpaa2_eth_ops; 396434ff6846SIoana Radulescu net_dev->ethtool_ops = &dpaa2_ethtool_ops; 396534ff6846SIoana Radulescu 39665d8dccf8SIoana Ciornei err = dpaa2_eth_set_mac_addr(priv); 396734ff6846SIoana Radulescu if (err) 396834ff6846SIoana Radulescu return err; 396934ff6846SIoana Radulescu 397034ff6846SIoana Radulescu /* Explicitly add the broadcast address to the MAC filtering table */ 397134ff6846SIoana Radulescu eth_broadcast_addr(bcast_addr); 397234ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 397334ff6846SIoana Radulescu if (err) { 397434ff6846SIoana Radulescu dev_err(dev, "dpni_add_mac_addr() failed\n"); 397534ff6846SIoana Radulescu return err; 397634ff6846SIoana Radulescu } 397734ff6846SIoana Radulescu 397834ff6846SIoana Radulescu /* Set MTU upper limit; lower limit is 68B (default value) */ 397934ff6846SIoana Radulescu net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 398034ff6846SIoana Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 398134ff6846SIoana Radulescu DPAA2_ETH_MFL); 398234ff6846SIoana Radulescu if (err) { 398334ff6846SIoana Radulescu dev_err(dev, "dpni_set_max_frame_length() failed\n"); 398434ff6846SIoana Radulescu return err; 398534ff6846SIoana Radulescu } 398634ff6846SIoana Radulescu 398734ff6846SIoana Radulescu /* Set actual number of queues in the net device */ 398834ff6846SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 398934ff6846SIoana Radulescu err = netif_set_real_num_tx_queues(net_dev, num_queues); 399034ff6846SIoana Radulescu if (err) { 399134ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 399234ff6846SIoana Radulescu return err; 399334ff6846SIoana Radulescu } 399434ff6846SIoana Radulescu err = netif_set_real_num_rx_queues(net_dev, num_queues); 399534ff6846SIoana Radulescu if (err) { 399634ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 399734ff6846SIoana Radulescu return err; 399834ff6846SIoana Radulescu } 399934ff6846SIoana Radulescu 400034ff6846SIoana Radulescu /* Capabilities listing */ 400134ff6846SIoana Radulescu supported |= IFF_LIVE_ADDR_CHANGE; 400234ff6846SIoana Radulescu 400334ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER) 400434ff6846SIoana Radulescu not_supported |= IFF_UNICAST_FLT; 400534ff6846SIoana Radulescu else 400634ff6846SIoana Radulescu supported |= IFF_UNICAST_FLT; 400734ff6846SIoana Radulescu 400834ff6846SIoana Radulescu net_dev->priv_flags |= supported; 400934ff6846SIoana Radulescu net_dev->priv_flags &= ~not_supported; 401034ff6846SIoana Radulescu 401134ff6846SIoana Radulescu /* Features */ 401234ff6846SIoana Radulescu net_dev->features = NETIF_F_RXCSUM | 401334ff6846SIoana Radulescu NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 401434ff6846SIoana Radulescu NETIF_F_SG | NETIF_F_HIGHDMA | 40153657cdafSIoana Ciornei NETIF_F_LLTX | NETIF_F_HW_TC; 401634ff6846SIoana Radulescu net_dev->hw_features = net_dev->features; 401734ff6846SIoana Radulescu 401834ff6846SIoana Radulescu return 0; 401934ff6846SIoana Radulescu } 402034ff6846SIoana Radulescu 40215d8dccf8SIoana Ciornei static int dpaa2_eth_poll_link_state(void *arg) 402234ff6846SIoana Radulescu { 402334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 402434ff6846SIoana Radulescu int err; 402534ff6846SIoana Radulescu 402634ff6846SIoana Radulescu while (!kthread_should_stop()) { 40275d8dccf8SIoana Ciornei err = dpaa2_eth_link_state_update(priv); 402834ff6846SIoana Radulescu if (unlikely(err)) 402934ff6846SIoana Radulescu return err; 403034ff6846SIoana Radulescu 403134ff6846SIoana Radulescu msleep(DPAA2_ETH_LINK_STATE_REFRESH); 403234ff6846SIoana Radulescu } 403334ff6846SIoana Radulescu 403434ff6846SIoana Radulescu return 0; 403534ff6846SIoana Radulescu } 403634ff6846SIoana Radulescu 403771947923SIoana Ciornei static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) 403871947923SIoana Ciornei { 403971947923SIoana Ciornei struct fsl_mc_device *dpni_dev, *dpmac_dev; 404071947923SIoana Ciornei struct dpaa2_mac *mac; 404171947923SIoana Ciornei int err; 404271947923SIoana Ciornei 404371947923SIoana Ciornei dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); 404471947923SIoana Ciornei dpmac_dev = fsl_mc_get_endpoint(dpni_dev); 4045841eb401SIoana Ciornei if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 404671947923SIoana Ciornei return 0; 404771947923SIoana Ciornei 404871947923SIoana Ciornei if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) 404971947923SIoana Ciornei return 0; 405071947923SIoana Ciornei 405171947923SIoana Ciornei mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); 405271947923SIoana Ciornei if (!mac) 405371947923SIoana Ciornei return -ENOMEM; 405471947923SIoana Ciornei 405571947923SIoana Ciornei mac->mc_dev = dpmac_dev; 405671947923SIoana Ciornei mac->mc_io = priv->mc_io; 405771947923SIoana Ciornei mac->net_dev = priv->net_dev; 405871947923SIoana Ciornei 4059*095dca16SIoana Ciornei err = dpaa2_mac_open(mac); 4060*095dca16SIoana Ciornei if (err) 4061*095dca16SIoana Ciornei goto err_free_mac; 4062*095dca16SIoana Ciornei 406371947923SIoana Ciornei err = dpaa2_mac_connect(mac); 406471947923SIoana Ciornei if (err) { 406571947923SIoana Ciornei netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n"); 4066*095dca16SIoana Ciornei goto err_close_mac; 406771947923SIoana Ciornei } 406871947923SIoana Ciornei priv->mac = mac; 406971947923SIoana Ciornei 407071947923SIoana Ciornei return 0; 4071*095dca16SIoana Ciornei 4072*095dca16SIoana Ciornei err_close_mac: 4073*095dca16SIoana Ciornei dpaa2_mac_close(mac); 4074*095dca16SIoana Ciornei err_free_mac: 4075*095dca16SIoana Ciornei kfree(mac); 4076*095dca16SIoana Ciornei return err; 407771947923SIoana Ciornei } 407871947923SIoana Ciornei 407971947923SIoana Ciornei static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) 408071947923SIoana Ciornei { 408171947923SIoana Ciornei if (!priv->mac) 408271947923SIoana Ciornei return; 408371947923SIoana Ciornei 408471947923SIoana Ciornei dpaa2_mac_disconnect(priv->mac); 4085*095dca16SIoana Ciornei dpaa2_mac_close(priv->mac); 408671947923SIoana Ciornei kfree(priv->mac); 408771947923SIoana Ciornei priv->mac = NULL; 408871947923SIoana Ciornei } 408971947923SIoana Ciornei 409034ff6846SIoana Radulescu static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 409134ff6846SIoana Radulescu { 409234ff6846SIoana Radulescu u32 status = ~0; 409334ff6846SIoana Radulescu struct device *dev = (struct device *)arg; 409434ff6846SIoana Radulescu struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 409534ff6846SIoana Radulescu struct net_device *net_dev = dev_get_drvdata(dev); 409671947923SIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 409734ff6846SIoana Radulescu int err; 409834ff6846SIoana Radulescu 409934ff6846SIoana Radulescu err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 410034ff6846SIoana Radulescu DPNI_IRQ_INDEX, &status); 410134ff6846SIoana Radulescu if (unlikely(err)) { 410234ff6846SIoana Radulescu netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 410334ff6846SIoana Radulescu return IRQ_HANDLED; 410434ff6846SIoana Radulescu } 410534ff6846SIoana Radulescu 410634ff6846SIoana Radulescu if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 41075d8dccf8SIoana Ciornei dpaa2_eth_link_state_update(netdev_priv(net_dev)); 410834ff6846SIoana Radulescu 4109f5c3fffaSIoana Ciornei if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { 41105d8dccf8SIoana Ciornei dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); 41115d8dccf8SIoana Ciornei dpaa2_eth_update_tx_fqids(priv); 411271947923SIoana Ciornei 411371947923SIoana Ciornei rtnl_lock(); 411471947923SIoana Ciornei if (priv->mac) 411571947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 411671947923SIoana Ciornei else 411771947923SIoana Ciornei dpaa2_eth_connect_mac(priv); 411871947923SIoana Ciornei rtnl_unlock(); 4119f5c3fffaSIoana Ciornei } 41208398b375SFlorin Chiculita 412134ff6846SIoana Radulescu return IRQ_HANDLED; 412234ff6846SIoana Radulescu } 412334ff6846SIoana Radulescu 41245d8dccf8SIoana Ciornei static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) 412534ff6846SIoana Radulescu { 412634ff6846SIoana Radulescu int err = 0; 412734ff6846SIoana Radulescu struct fsl_mc_device_irq *irq; 412834ff6846SIoana Radulescu 412934ff6846SIoana Radulescu err = fsl_mc_allocate_irqs(ls_dev); 413034ff6846SIoana Radulescu if (err) { 413134ff6846SIoana Radulescu dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 413234ff6846SIoana Radulescu return err; 413334ff6846SIoana Radulescu } 413434ff6846SIoana Radulescu 413534ff6846SIoana Radulescu irq = ls_dev->irqs[0]; 413634ff6846SIoana Radulescu err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 413734ff6846SIoana Radulescu NULL, dpni_irq0_handler_thread, 413834ff6846SIoana Radulescu IRQF_NO_SUSPEND | IRQF_ONESHOT, 413934ff6846SIoana Radulescu dev_name(&ls_dev->dev), &ls_dev->dev); 414034ff6846SIoana Radulescu if (err < 0) { 414134ff6846SIoana Radulescu dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 414234ff6846SIoana Radulescu goto free_mc_irq; 414334ff6846SIoana Radulescu } 414434ff6846SIoana Radulescu 414534ff6846SIoana Radulescu err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 41468398b375SFlorin Chiculita DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | 41478398b375SFlorin Chiculita DPNI_IRQ_EVENT_ENDPOINT_CHANGED); 414834ff6846SIoana Radulescu if (err < 0) { 414934ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 415034ff6846SIoana Radulescu goto free_irq; 415134ff6846SIoana Radulescu } 415234ff6846SIoana Radulescu 415334ff6846SIoana Radulescu err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 415434ff6846SIoana Radulescu DPNI_IRQ_INDEX, 1); 415534ff6846SIoana Radulescu if (err < 0) { 415634ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 415734ff6846SIoana Radulescu goto free_irq; 415834ff6846SIoana Radulescu } 415934ff6846SIoana Radulescu 416034ff6846SIoana Radulescu return 0; 416134ff6846SIoana Radulescu 416234ff6846SIoana Radulescu free_irq: 416334ff6846SIoana Radulescu devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 416434ff6846SIoana Radulescu free_mc_irq: 416534ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 416634ff6846SIoana Radulescu 416734ff6846SIoana Radulescu return err; 416834ff6846SIoana Radulescu } 416934ff6846SIoana Radulescu 41705d8dccf8SIoana Ciornei static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv) 417134ff6846SIoana Radulescu { 417234ff6846SIoana Radulescu int i; 417334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 417434ff6846SIoana Radulescu 417534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 417634ff6846SIoana Radulescu ch = priv->channel[i]; 417734ff6846SIoana Radulescu /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 417834ff6846SIoana Radulescu netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 417934ff6846SIoana Radulescu NAPI_POLL_WEIGHT); 418034ff6846SIoana Radulescu } 418134ff6846SIoana Radulescu } 418234ff6846SIoana Radulescu 41835d8dccf8SIoana Ciornei static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) 418434ff6846SIoana Radulescu { 418534ff6846SIoana Radulescu int i; 418634ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 418734ff6846SIoana Radulescu 418834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 418934ff6846SIoana Radulescu ch = priv->channel[i]; 419034ff6846SIoana Radulescu netif_napi_del(&ch->napi); 419134ff6846SIoana Radulescu } 419234ff6846SIoana Radulescu } 419334ff6846SIoana Radulescu 419434ff6846SIoana Radulescu static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 419534ff6846SIoana Radulescu { 419634ff6846SIoana Radulescu struct device *dev; 419734ff6846SIoana Radulescu struct net_device *net_dev = NULL; 419834ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = NULL; 419934ff6846SIoana Radulescu int err = 0; 420034ff6846SIoana Radulescu 420134ff6846SIoana Radulescu dev = &dpni_dev->dev; 420234ff6846SIoana Radulescu 420334ff6846SIoana Radulescu /* Net device */ 4204ab1e6de2SIoana Radulescu net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); 420534ff6846SIoana Radulescu if (!net_dev) { 420634ff6846SIoana Radulescu dev_err(dev, "alloc_etherdev_mq() failed\n"); 420734ff6846SIoana Radulescu return -ENOMEM; 420834ff6846SIoana Radulescu } 420934ff6846SIoana Radulescu 421034ff6846SIoana Radulescu SET_NETDEV_DEV(net_dev, dev); 421134ff6846SIoana Radulescu dev_set_drvdata(dev, net_dev); 421234ff6846SIoana Radulescu 421334ff6846SIoana Radulescu priv = netdev_priv(net_dev); 421434ff6846SIoana Radulescu priv->net_dev = net_dev; 421534ff6846SIoana Radulescu 421634ff6846SIoana Radulescu priv->iommu_domain = iommu_get_domain_for_dev(dev); 421734ff6846SIoana Radulescu 42181cf773bdSYangbo Lu priv->tx_tstamp_type = HWTSTAMP_TX_OFF; 42191cf773bdSYangbo Lu priv->rx_tstamp = false; 42201cf773bdSYangbo Lu 4221c5521189SYangbo Lu priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0); 4222c5521189SYangbo Lu if (!priv->dpaa2_ptp_wq) { 4223c5521189SYangbo Lu err = -ENOMEM; 4224c5521189SYangbo Lu goto err_wq_alloc; 4225c5521189SYangbo Lu } 4226c5521189SYangbo Lu 4227c5521189SYangbo Lu INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); 4228c5521189SYangbo Lu 4229c5521189SYangbo Lu skb_queue_head_init(&priv->tx_skbs); 4230c5521189SYangbo Lu 423134ff6846SIoana Radulescu /* Obtain a MC portal */ 423234ff6846SIoana Radulescu err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 423334ff6846SIoana Radulescu &priv->mc_io); 423434ff6846SIoana Radulescu if (err) { 423534ff6846SIoana Radulescu if (err == -ENXIO) 423634ff6846SIoana Radulescu err = -EPROBE_DEFER; 423734ff6846SIoana Radulescu else 423834ff6846SIoana Radulescu dev_err(dev, "MC portal allocation failed\n"); 423934ff6846SIoana Radulescu goto err_portal_alloc; 424034ff6846SIoana Radulescu } 424134ff6846SIoana Radulescu 424234ff6846SIoana Radulescu /* MC objects initialization and configuration */ 42435d8dccf8SIoana Ciornei err = dpaa2_eth_setup_dpni(dpni_dev); 424434ff6846SIoana Radulescu if (err) 424534ff6846SIoana Radulescu goto err_dpni_setup; 424634ff6846SIoana Radulescu 42475d8dccf8SIoana Ciornei err = dpaa2_eth_setup_dpio(priv); 424834ff6846SIoana Radulescu if (err) 424934ff6846SIoana Radulescu goto err_dpio_setup; 425034ff6846SIoana Radulescu 42515d8dccf8SIoana Ciornei dpaa2_eth_setup_fqs(priv); 425234ff6846SIoana Radulescu 42535d8dccf8SIoana Ciornei err = dpaa2_eth_setup_dpbp(priv); 425434ff6846SIoana Radulescu if (err) 425534ff6846SIoana Radulescu goto err_dpbp_setup; 425634ff6846SIoana Radulescu 42575d8dccf8SIoana Ciornei err = dpaa2_eth_bind_dpni(priv); 425834ff6846SIoana Radulescu if (err) 425934ff6846SIoana Radulescu goto err_bind; 426034ff6846SIoana Radulescu 426134ff6846SIoana Radulescu /* Add a NAPI context for each channel */ 42625d8dccf8SIoana Ciornei dpaa2_eth_add_ch_napi(priv); 426334ff6846SIoana Radulescu 426434ff6846SIoana Radulescu /* Percpu statistics */ 426534ff6846SIoana Radulescu priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 426634ff6846SIoana Radulescu if (!priv->percpu_stats) { 426734ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 426834ff6846SIoana Radulescu err = -ENOMEM; 426934ff6846SIoana Radulescu goto err_alloc_percpu_stats; 427034ff6846SIoana Radulescu } 427134ff6846SIoana Radulescu priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 427234ff6846SIoana Radulescu if (!priv->percpu_extras) { 427334ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 427434ff6846SIoana Radulescu err = -ENOMEM; 427534ff6846SIoana Radulescu goto err_alloc_percpu_extras; 427634ff6846SIoana Radulescu } 427734ff6846SIoana Radulescu 4278d70446eeSIoana Ciornei priv->sgt_cache = alloc_percpu(*priv->sgt_cache); 4279d70446eeSIoana Ciornei if (!priv->sgt_cache) { 4280d70446eeSIoana Ciornei dev_err(dev, "alloc_percpu(sgt_cache) failed\n"); 4281d70446eeSIoana Ciornei err = -ENOMEM; 4282d70446eeSIoana Ciornei goto err_alloc_sgt_cache; 4283d70446eeSIoana Ciornei } 4284d70446eeSIoana Ciornei 42855d8dccf8SIoana Ciornei err = dpaa2_eth_netdev_init(net_dev); 428634ff6846SIoana Radulescu if (err) 428734ff6846SIoana Radulescu goto err_netdev_init; 428834ff6846SIoana Radulescu 428934ff6846SIoana Radulescu /* Configure checksum offload based on current interface flags */ 42905d8dccf8SIoana Ciornei err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 429134ff6846SIoana Radulescu if (err) 429234ff6846SIoana Radulescu goto err_csum; 429334ff6846SIoana Radulescu 42945d8dccf8SIoana Ciornei err = dpaa2_eth_set_tx_csum(priv, 42955d8dccf8SIoana Ciornei !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 429634ff6846SIoana Radulescu if (err) 429734ff6846SIoana Radulescu goto err_csum; 429834ff6846SIoana Radulescu 42995d8dccf8SIoana Ciornei err = dpaa2_eth_alloc_rings(priv); 430034ff6846SIoana Radulescu if (err) 430134ff6846SIoana Radulescu goto err_alloc_rings; 430234ff6846SIoana Radulescu 4303f395b69fSIoana Ciornei #ifdef CONFIG_FSL_DPAA2_ETH_DCB 4304f395b69fSIoana Ciornei if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { 4305f395b69fSIoana Ciornei priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; 4306f395b69fSIoana Ciornei net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; 4307f395b69fSIoana Ciornei } else { 4308f395b69fSIoana Ciornei dev_dbg(dev, "PFC not supported\n"); 4309f395b69fSIoana Ciornei } 4310f395b69fSIoana Ciornei #endif 4311f395b69fSIoana Ciornei 43125d8dccf8SIoana Ciornei err = dpaa2_eth_setup_irqs(dpni_dev); 431334ff6846SIoana Radulescu if (err) { 431434ff6846SIoana Radulescu netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 43155d8dccf8SIoana Ciornei priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, 431634ff6846SIoana Radulescu "%s_poll_link", net_dev->name); 431734ff6846SIoana Radulescu if (IS_ERR(priv->poll_thread)) { 431834ff6846SIoana Radulescu dev_err(dev, "Error starting polling thread\n"); 431934ff6846SIoana Radulescu goto err_poll_thread; 432034ff6846SIoana Radulescu } 432134ff6846SIoana Radulescu priv->do_link_poll = true; 432234ff6846SIoana Radulescu } 432334ff6846SIoana Radulescu 432471947923SIoana Ciornei err = dpaa2_eth_connect_mac(priv); 432571947923SIoana Ciornei if (err) 432671947923SIoana Ciornei goto err_connect_mac; 432771947923SIoana Ciornei 4328ceeb03adSIoana Ciornei err = dpaa2_eth_dl_register(priv); 4329ceeb03adSIoana Ciornei if (err) 4330ceeb03adSIoana Ciornei goto err_dl_register; 4331ceeb03adSIoana Ciornei 4332061d631fSIoana Ciornei err = dpaa2_eth_dl_traps_register(priv); 4333061d631fSIoana Ciornei if (err) 4334061d631fSIoana Ciornei goto err_dl_trap_register; 4335061d631fSIoana Ciornei 4336ceeb03adSIoana Ciornei err = dpaa2_eth_dl_port_add(priv); 4337ceeb03adSIoana Ciornei if (err) 4338ceeb03adSIoana Ciornei goto err_dl_port_add; 4339ceeb03adSIoana Ciornei 434034ff6846SIoana Radulescu err = register_netdev(net_dev); 434134ff6846SIoana Radulescu if (err < 0) { 434234ff6846SIoana Radulescu dev_err(dev, "register_netdev() failed\n"); 434334ff6846SIoana Radulescu goto err_netdev_reg; 434434ff6846SIoana Radulescu } 434534ff6846SIoana Radulescu 4346091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 4347091a19eaSIoana Radulescu dpaa2_dbg_add(priv); 4348091a19eaSIoana Radulescu #endif 4349091a19eaSIoana Radulescu 435034ff6846SIoana Radulescu dev_info(dev, "Probed interface %s\n", net_dev->name); 435134ff6846SIoana Radulescu return 0; 435234ff6846SIoana Radulescu 435334ff6846SIoana Radulescu err_netdev_reg: 4354ceeb03adSIoana Ciornei dpaa2_eth_dl_port_del(priv); 4355ceeb03adSIoana Ciornei err_dl_port_add: 4356061d631fSIoana Ciornei dpaa2_eth_dl_traps_unregister(priv); 4357061d631fSIoana Ciornei err_dl_trap_register: 4358ceeb03adSIoana Ciornei dpaa2_eth_dl_unregister(priv); 4359ceeb03adSIoana Ciornei err_dl_register: 436071947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 436171947923SIoana Ciornei err_connect_mac: 436234ff6846SIoana Radulescu if (priv->do_link_poll) 436334ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 436434ff6846SIoana Radulescu else 436534ff6846SIoana Radulescu fsl_mc_free_irqs(dpni_dev); 436634ff6846SIoana Radulescu err_poll_thread: 43675d8dccf8SIoana Ciornei dpaa2_eth_free_rings(priv); 436834ff6846SIoana Radulescu err_alloc_rings: 436934ff6846SIoana Radulescu err_csum: 437034ff6846SIoana Radulescu err_netdev_init: 4371d70446eeSIoana Ciornei free_percpu(priv->sgt_cache); 4372d70446eeSIoana Ciornei err_alloc_sgt_cache: 437334ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 437434ff6846SIoana Radulescu err_alloc_percpu_extras: 437534ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 437634ff6846SIoana Radulescu err_alloc_percpu_stats: 43775d8dccf8SIoana Ciornei dpaa2_eth_del_ch_napi(priv); 437834ff6846SIoana Radulescu err_bind: 43795d8dccf8SIoana Ciornei dpaa2_eth_free_dpbp(priv); 438034ff6846SIoana Radulescu err_dpbp_setup: 43815d8dccf8SIoana Ciornei dpaa2_eth_free_dpio(priv); 438234ff6846SIoana Radulescu err_dpio_setup: 43835d8dccf8SIoana Ciornei dpaa2_eth_free_dpni(priv); 438434ff6846SIoana Radulescu err_dpni_setup: 438534ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 438634ff6846SIoana Radulescu err_portal_alloc: 4387c5521189SYangbo Lu destroy_workqueue(priv->dpaa2_ptp_wq); 4388c5521189SYangbo Lu err_wq_alloc: 438934ff6846SIoana Radulescu dev_set_drvdata(dev, NULL); 439034ff6846SIoana Radulescu free_netdev(net_dev); 439134ff6846SIoana Radulescu 439234ff6846SIoana Radulescu return err; 439334ff6846SIoana Radulescu } 439434ff6846SIoana Radulescu 439534ff6846SIoana Radulescu static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 439634ff6846SIoana Radulescu { 439734ff6846SIoana Radulescu struct device *dev; 439834ff6846SIoana Radulescu struct net_device *net_dev; 439934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 440034ff6846SIoana Radulescu 440134ff6846SIoana Radulescu dev = &ls_dev->dev; 440234ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 440334ff6846SIoana Radulescu priv = netdev_priv(net_dev); 440434ff6846SIoana Radulescu 4405091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 4406091a19eaSIoana Radulescu dpaa2_dbg_remove(priv); 4407091a19eaSIoana Radulescu #endif 440871947923SIoana Ciornei rtnl_lock(); 440971947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 441071947923SIoana Ciornei rtnl_unlock(); 441171947923SIoana Ciornei 441234ff6846SIoana Radulescu unregister_netdev(net_dev); 441334ff6846SIoana Radulescu 4414ceeb03adSIoana Ciornei dpaa2_eth_dl_port_del(priv); 4415061d631fSIoana Ciornei dpaa2_eth_dl_traps_unregister(priv); 4416ceeb03adSIoana Ciornei dpaa2_eth_dl_unregister(priv); 4417ceeb03adSIoana Ciornei 441834ff6846SIoana Radulescu if (priv->do_link_poll) 441934ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 442034ff6846SIoana Radulescu else 442134ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 442234ff6846SIoana Radulescu 44235d8dccf8SIoana Ciornei dpaa2_eth_free_rings(priv); 4424d70446eeSIoana Ciornei free_percpu(priv->sgt_cache); 442534ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 442634ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 442734ff6846SIoana Radulescu 44285d8dccf8SIoana Ciornei dpaa2_eth_del_ch_napi(priv); 44295d8dccf8SIoana Ciornei dpaa2_eth_free_dpbp(priv); 44305d8dccf8SIoana Ciornei dpaa2_eth_free_dpio(priv); 44315d8dccf8SIoana Ciornei dpaa2_eth_free_dpni(priv); 443234ff6846SIoana Radulescu 443334ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 443434ff6846SIoana Radulescu 443534ff6846SIoana Radulescu free_netdev(net_dev); 443634ff6846SIoana Radulescu 443734ff6846SIoana Radulescu dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 443834ff6846SIoana Radulescu 443934ff6846SIoana Radulescu return 0; 444034ff6846SIoana Radulescu } 444134ff6846SIoana Radulescu 444234ff6846SIoana Radulescu static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 444334ff6846SIoana Radulescu { 444434ff6846SIoana Radulescu .vendor = FSL_MC_VENDOR_FREESCALE, 444534ff6846SIoana Radulescu .obj_type = "dpni", 444634ff6846SIoana Radulescu }, 444734ff6846SIoana Radulescu { .vendor = 0x0 } 444834ff6846SIoana Radulescu }; 444934ff6846SIoana Radulescu MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 445034ff6846SIoana Radulescu 445134ff6846SIoana Radulescu static struct fsl_mc_driver dpaa2_eth_driver = { 445234ff6846SIoana Radulescu .driver = { 445334ff6846SIoana Radulescu .name = KBUILD_MODNAME, 445434ff6846SIoana Radulescu .owner = THIS_MODULE, 445534ff6846SIoana Radulescu }, 445634ff6846SIoana Radulescu .probe = dpaa2_eth_probe, 445734ff6846SIoana Radulescu .remove = dpaa2_eth_remove, 445834ff6846SIoana Radulescu .match_id_table = dpaa2_eth_match_id_table 445934ff6846SIoana Radulescu }; 446034ff6846SIoana Radulescu 4461091a19eaSIoana Radulescu static int __init dpaa2_eth_driver_init(void) 4462091a19eaSIoana Radulescu { 4463091a19eaSIoana Radulescu int err; 4464091a19eaSIoana Radulescu 4465091a19eaSIoana Radulescu dpaa2_eth_dbg_init(); 4466091a19eaSIoana Radulescu err = fsl_mc_driver_register(&dpaa2_eth_driver); 4467091a19eaSIoana Radulescu if (err) { 4468091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 4469091a19eaSIoana Radulescu return err; 4470091a19eaSIoana Radulescu } 4471091a19eaSIoana Radulescu 4472091a19eaSIoana Radulescu return 0; 4473091a19eaSIoana Radulescu } 4474091a19eaSIoana Radulescu 4475091a19eaSIoana Radulescu static void __exit dpaa2_eth_driver_exit(void) 4476091a19eaSIoana Radulescu { 4477091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 4478091a19eaSIoana Radulescu fsl_mc_driver_unregister(&dpaa2_eth_driver); 4479091a19eaSIoana Radulescu } 4480091a19eaSIoana Radulescu 4481091a19eaSIoana Radulescu module_init(dpaa2_eth_driver_init); 4482091a19eaSIoana Radulescu module_exit(dpaa2_eth_driver_exit); 4483