134ff6846SIoana Radulescu // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 234ff6846SIoana Radulescu /* Copyright 2014-2016 Freescale Semiconductor Inc. 348c0481eSIoana Ciornei * Copyright 2016-2020 NXP 434ff6846SIoana Radulescu */ 534ff6846SIoana Radulescu #include <linux/init.h> 634ff6846SIoana Radulescu #include <linux/module.h> 734ff6846SIoana Radulescu #include <linux/platform_device.h> 834ff6846SIoana Radulescu #include <linux/etherdevice.h> 934ff6846SIoana Radulescu #include <linux/of_net.h> 1034ff6846SIoana Radulescu #include <linux/interrupt.h> 1134ff6846SIoana Radulescu #include <linux/msi.h> 1234ff6846SIoana Radulescu #include <linux/kthread.h> 1334ff6846SIoana Radulescu #include <linux/iommu.h> 1434ff6846SIoana Radulescu #include <linux/net_tstamp.h> 1534ff6846SIoana Radulescu #include <linux/fsl/mc.h> 167e273a8eSIoana Ciocoi Radulescu #include <linux/bpf.h> 177e273a8eSIoana Ciocoi Radulescu #include <linux/bpf_trace.h> 1834ff6846SIoana Radulescu #include <net/sock.h> 1934ff6846SIoana Radulescu 2034ff6846SIoana Radulescu #include "dpaa2-eth.h" 2134ff6846SIoana Radulescu 2234ff6846SIoana Radulescu /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 2334ff6846SIoana Radulescu * using trace events only need to #include <trace/events/sched.h> 2434ff6846SIoana Radulescu */ 2534ff6846SIoana Radulescu #define CREATE_TRACE_POINTS 2634ff6846SIoana Radulescu #include "dpaa2-eth-trace.h" 2734ff6846SIoana Radulescu 2834ff6846SIoana Radulescu MODULE_LICENSE("Dual BSD/GPL"); 2934ff6846SIoana Radulescu MODULE_AUTHOR("Freescale Semiconductor, Inc"); 3034ff6846SIoana Radulescu MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 3134ff6846SIoana Radulescu 3234ff6846SIoana Radulescu static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 3334ff6846SIoana Radulescu dma_addr_t iova_addr) 3434ff6846SIoana Radulescu { 3534ff6846SIoana Radulescu phys_addr_t phys_addr; 3634ff6846SIoana Radulescu 3734ff6846SIoana Radulescu phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 3834ff6846SIoana Radulescu 3934ff6846SIoana Radulescu return phys_to_virt(phys_addr); 4034ff6846SIoana Radulescu } 4134ff6846SIoana Radulescu 4234ff6846SIoana Radulescu static void validate_rx_csum(struct dpaa2_eth_priv *priv, 4334ff6846SIoana Radulescu u32 fd_status, 4434ff6846SIoana Radulescu struct sk_buff *skb) 4534ff6846SIoana Radulescu { 4634ff6846SIoana Radulescu skb_checksum_none_assert(skb); 4734ff6846SIoana Radulescu 4834ff6846SIoana Radulescu /* HW checksum validation is disabled, nothing to do here */ 4934ff6846SIoana Radulescu if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 5034ff6846SIoana Radulescu return; 5134ff6846SIoana Radulescu 5234ff6846SIoana Radulescu /* Read checksum validation bits */ 5334ff6846SIoana Radulescu if (!((fd_status & DPAA2_FAS_L3CV) && 5434ff6846SIoana Radulescu (fd_status & DPAA2_FAS_L4CV))) 5534ff6846SIoana Radulescu return; 5634ff6846SIoana Radulescu 5734ff6846SIoana Radulescu /* Inform the stack there's no need to compute L3/L4 csum anymore */ 5834ff6846SIoana Radulescu skb->ip_summed = CHECKSUM_UNNECESSARY; 5934ff6846SIoana Radulescu } 6034ff6846SIoana Radulescu 6134ff6846SIoana Radulescu /* Free a received FD. 6234ff6846SIoana Radulescu * Not to be used for Tx conf FDs or on any other paths. 6334ff6846SIoana Radulescu */ 6434ff6846SIoana Radulescu static void free_rx_fd(struct dpaa2_eth_priv *priv, 6534ff6846SIoana Radulescu const struct dpaa2_fd *fd, 6634ff6846SIoana Radulescu void *vaddr) 6734ff6846SIoana Radulescu { 6834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 6934ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 7034ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 7134ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 7234ff6846SIoana Radulescu void *sg_vaddr; 7334ff6846SIoana Radulescu int i; 7434ff6846SIoana Radulescu 7534ff6846SIoana Radulescu /* If single buffer frame, just free the data buffer */ 7634ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) 7734ff6846SIoana Radulescu goto free_buf; 7834ff6846SIoana Radulescu else if (fd_format != dpaa2_fd_sg) 7934ff6846SIoana Radulescu /* We don't support any other format */ 8034ff6846SIoana Radulescu return; 8134ff6846SIoana Radulescu 8234ff6846SIoana Radulescu /* For S/G frames, we first need to free all SG entries 8334ff6846SIoana Radulescu * except the first one, which was taken care of already 8434ff6846SIoana Radulescu */ 8534ff6846SIoana Radulescu sgt = vaddr + dpaa2_fd_get_offset(fd); 8634ff6846SIoana Radulescu for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 8734ff6846SIoana Radulescu addr = dpaa2_sg_get_addr(&sgt[i]); 8834ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 89efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 9018c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 9134ff6846SIoana Radulescu 9227c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 9334ff6846SIoana Radulescu if (dpaa2_sg_is_final(&sgt[i])) 9434ff6846SIoana Radulescu break; 9534ff6846SIoana Radulescu } 9634ff6846SIoana Radulescu 9734ff6846SIoana Radulescu free_buf: 9827c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 9934ff6846SIoana Radulescu } 10034ff6846SIoana Radulescu 10134ff6846SIoana Radulescu /* Build a linear skb based on a single-buffer frame descriptor */ 102fdb6ca9eSIoana Ciornei static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, 10334ff6846SIoana Radulescu const struct dpaa2_fd *fd, 10434ff6846SIoana Radulescu void *fd_vaddr) 10534ff6846SIoana Radulescu { 10634ff6846SIoana Radulescu struct sk_buff *skb = NULL; 10734ff6846SIoana Radulescu u16 fd_offset = dpaa2_fd_get_offset(fd); 10834ff6846SIoana Radulescu u32 fd_length = dpaa2_fd_get_len(fd); 10934ff6846SIoana Radulescu 11034ff6846SIoana Radulescu ch->buf_count--; 11134ff6846SIoana Radulescu 11227c87486SIoana Ciocoi Radulescu skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 11334ff6846SIoana Radulescu if (unlikely(!skb)) 11434ff6846SIoana Radulescu return NULL; 11534ff6846SIoana Radulescu 11634ff6846SIoana Radulescu skb_reserve(skb, fd_offset); 11734ff6846SIoana Radulescu skb_put(skb, fd_length); 11834ff6846SIoana Radulescu 11934ff6846SIoana Radulescu return skb; 12034ff6846SIoana Radulescu } 12134ff6846SIoana Radulescu 12234ff6846SIoana Radulescu /* Build a non linear (fragmented) skb based on a S/G table */ 12334ff6846SIoana Radulescu static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, 12434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 12534ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt) 12634ff6846SIoana Radulescu { 12734ff6846SIoana Radulescu struct sk_buff *skb = NULL; 12834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 12934ff6846SIoana Radulescu void *sg_vaddr; 13034ff6846SIoana Radulescu dma_addr_t sg_addr; 13134ff6846SIoana Radulescu u16 sg_offset; 13234ff6846SIoana Radulescu u32 sg_length; 13334ff6846SIoana Radulescu struct page *page, *head_page; 13434ff6846SIoana Radulescu int page_offset; 13534ff6846SIoana Radulescu int i; 13634ff6846SIoana Radulescu 13734ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 13834ff6846SIoana Radulescu struct dpaa2_sg_entry *sge = &sgt[i]; 13934ff6846SIoana Radulescu 14034ff6846SIoana Radulescu /* NOTE: We only support SG entries in dpaa2_sg_single format, 14134ff6846SIoana Radulescu * but this is the only format we may receive from HW anyway 14234ff6846SIoana Radulescu */ 14334ff6846SIoana Radulescu 14434ff6846SIoana Radulescu /* Get the address and length from the S/G entry */ 14534ff6846SIoana Radulescu sg_addr = dpaa2_sg_get_addr(sge); 14634ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 147efa6a7d0SIoana Ciornei dma_unmap_page(dev, sg_addr, priv->rx_buf_size, 14818c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 14934ff6846SIoana Radulescu 15034ff6846SIoana Radulescu sg_length = dpaa2_sg_get_len(sge); 15134ff6846SIoana Radulescu 15234ff6846SIoana Radulescu if (i == 0) { 15334ff6846SIoana Radulescu /* We build the skb around the first data buffer */ 15427c87486SIoana Ciocoi Radulescu skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 15534ff6846SIoana Radulescu if (unlikely(!skb)) { 15634ff6846SIoana Radulescu /* Free the first SG entry now, since we already 15734ff6846SIoana Radulescu * unmapped it and obtained the virtual address 15834ff6846SIoana Radulescu */ 15927c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 16034ff6846SIoana Radulescu 16134ff6846SIoana Radulescu /* We still need to subtract the buffers used 16234ff6846SIoana Radulescu * by this FD from our software counter 16334ff6846SIoana Radulescu */ 16434ff6846SIoana Radulescu while (!dpaa2_sg_is_final(&sgt[i]) && 16534ff6846SIoana Radulescu i < DPAA2_ETH_MAX_SG_ENTRIES) 16634ff6846SIoana Radulescu i++; 16734ff6846SIoana Radulescu break; 16834ff6846SIoana Radulescu } 16934ff6846SIoana Radulescu 17034ff6846SIoana Radulescu sg_offset = dpaa2_sg_get_offset(sge); 17134ff6846SIoana Radulescu skb_reserve(skb, sg_offset); 17234ff6846SIoana Radulescu skb_put(skb, sg_length); 17334ff6846SIoana Radulescu } else { 17434ff6846SIoana Radulescu /* Rest of the data buffers are stored as skb frags */ 17534ff6846SIoana Radulescu page = virt_to_page(sg_vaddr); 17634ff6846SIoana Radulescu head_page = virt_to_head_page(sg_vaddr); 17734ff6846SIoana Radulescu 17834ff6846SIoana Radulescu /* Offset in page (which may be compound). 17934ff6846SIoana Radulescu * Data in subsequent SG entries is stored from the 18034ff6846SIoana Radulescu * beginning of the buffer, so we don't need to add the 18134ff6846SIoana Radulescu * sg_offset. 18234ff6846SIoana Radulescu */ 18334ff6846SIoana Radulescu page_offset = ((unsigned long)sg_vaddr & 18434ff6846SIoana Radulescu (PAGE_SIZE - 1)) + 18534ff6846SIoana Radulescu (page_address(page) - page_address(head_page)); 18634ff6846SIoana Radulescu 18734ff6846SIoana Radulescu skb_add_rx_frag(skb, i - 1, head_page, page_offset, 188efa6a7d0SIoana Ciornei sg_length, priv->rx_buf_size); 18934ff6846SIoana Radulescu } 19034ff6846SIoana Radulescu 19134ff6846SIoana Radulescu if (dpaa2_sg_is_final(sge)) 19234ff6846SIoana Radulescu break; 19334ff6846SIoana Radulescu } 19434ff6846SIoana Radulescu 19534ff6846SIoana Radulescu WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 19634ff6846SIoana Radulescu 19734ff6846SIoana Radulescu /* Count all data buffers + SG table buffer */ 19834ff6846SIoana Radulescu ch->buf_count -= i + 2; 19934ff6846SIoana Radulescu 20034ff6846SIoana Radulescu return skb; 20134ff6846SIoana Radulescu } 20234ff6846SIoana Radulescu 203569375fbSIoana Ciocoi Radulescu /* Free buffers acquired from the buffer pool or which were meant to 204569375fbSIoana Ciocoi Radulescu * be released in the pool 205569375fbSIoana Ciocoi Radulescu */ 206569375fbSIoana Ciocoi Radulescu static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) 207569375fbSIoana Ciocoi Radulescu { 208569375fbSIoana Ciocoi Radulescu struct device *dev = priv->net_dev->dev.parent; 209569375fbSIoana Ciocoi Radulescu void *vaddr; 210569375fbSIoana Ciocoi Radulescu int i; 211569375fbSIoana Ciocoi Radulescu 212569375fbSIoana Ciocoi Radulescu for (i = 0; i < count; i++) { 213569375fbSIoana Ciocoi Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 214efa6a7d0SIoana Ciornei dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, 21518c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 21627c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 217569375fbSIoana Ciocoi Radulescu } 218569375fbSIoana Ciocoi Radulescu } 219569375fbSIoana Ciocoi Radulescu 2205d39dc21SIoana Ciocoi Radulescu static void xdp_release_buf(struct dpaa2_eth_priv *priv, 2215d39dc21SIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 2225d39dc21SIoana Ciocoi Radulescu dma_addr_t addr) 2235d39dc21SIoana Ciocoi Radulescu { 224ef17bd7cSIoana Radulescu int retries = 0; 2255d39dc21SIoana Ciocoi Radulescu int err; 2265d39dc21SIoana Ciocoi Radulescu 2275d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; 2285d39dc21SIoana Ciocoi Radulescu if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) 2295d39dc21SIoana Ciocoi Radulescu return; 2305d39dc21SIoana Ciocoi Radulescu 2315d39dc21SIoana Ciocoi Radulescu while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, 2325d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_bufs, 233ef17bd7cSIoana Radulescu ch->xdp.drop_cnt)) == -EBUSY) { 234ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 235ef17bd7cSIoana Radulescu break; 2365d39dc21SIoana Ciocoi Radulescu cpu_relax(); 237ef17bd7cSIoana Radulescu } 2385d39dc21SIoana Ciocoi Radulescu 2395d39dc21SIoana Ciocoi Radulescu if (err) { 2405d39dc21SIoana Ciocoi Radulescu free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); 2415d39dc21SIoana Ciocoi Radulescu ch->buf_count -= ch->xdp.drop_cnt; 2425d39dc21SIoana Ciocoi Radulescu } 2435d39dc21SIoana Ciocoi Radulescu 2445d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_cnt = 0; 2455d39dc21SIoana Ciocoi Radulescu } 2465d39dc21SIoana Ciocoi Radulescu 24738c440b2SIoana Ciornei static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, 24838c440b2SIoana Ciornei struct dpaa2_eth_fq *fq, 24938c440b2SIoana Ciornei struct dpaa2_eth_xdp_fds *xdp_fds) 25038c440b2SIoana Ciornei { 25138c440b2SIoana Ciornei int total_enqueued = 0, retries = 0, enqueued; 25238c440b2SIoana Ciornei struct dpaa2_eth_drv_stats *percpu_extras; 25338c440b2SIoana Ciornei int num_fds, err, max_retries; 25438c440b2SIoana Ciornei struct dpaa2_fd *fds; 25538c440b2SIoana Ciornei 25638c440b2SIoana Ciornei percpu_extras = this_cpu_ptr(priv->percpu_extras); 25738c440b2SIoana Ciornei 25838c440b2SIoana Ciornei /* try to enqueue all the FDs until the max number of retries is hit */ 25938c440b2SIoana Ciornei fds = xdp_fds->fds; 26038c440b2SIoana Ciornei num_fds = xdp_fds->num; 26138c440b2SIoana Ciornei max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; 26238c440b2SIoana Ciornei while (total_enqueued < num_fds && retries < max_retries) { 26338c440b2SIoana Ciornei err = priv->enqueue(priv, fq, &fds[total_enqueued], 26438c440b2SIoana Ciornei 0, num_fds - total_enqueued, &enqueued); 26538c440b2SIoana Ciornei if (err == -EBUSY) { 26638c440b2SIoana Ciornei percpu_extras->tx_portal_busy += ++retries; 26738c440b2SIoana Ciornei continue; 26838c440b2SIoana Ciornei } 26938c440b2SIoana Ciornei total_enqueued += enqueued; 27038c440b2SIoana Ciornei } 27138c440b2SIoana Ciornei xdp_fds->num = 0; 27238c440b2SIoana Ciornei 27338c440b2SIoana Ciornei return total_enqueued; 27438c440b2SIoana Ciornei } 27538c440b2SIoana Ciornei 27674a1c059SIoana Ciornei static void xdp_tx_flush(struct dpaa2_eth_priv *priv, 27774a1c059SIoana Ciornei struct dpaa2_eth_channel *ch, 27874a1c059SIoana Ciornei struct dpaa2_eth_fq *fq) 27974a1c059SIoana Ciornei { 28074a1c059SIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 28174a1c059SIoana Ciornei struct dpaa2_fd *fds; 28274a1c059SIoana Ciornei int enqueued, i; 28374a1c059SIoana Ciornei 28474a1c059SIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 28574a1c059SIoana Ciornei 28674a1c059SIoana Ciornei // enqueue the array of XDP_TX frames 28774a1c059SIoana Ciornei enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); 28874a1c059SIoana Ciornei 28974a1c059SIoana Ciornei /* update statistics */ 29074a1c059SIoana Ciornei percpu_stats->tx_packets += enqueued; 29174a1c059SIoana Ciornei fds = fq->xdp_tx_fds.fds; 29274a1c059SIoana Ciornei for (i = 0; i < enqueued; i++) { 29374a1c059SIoana Ciornei percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 29474a1c059SIoana Ciornei ch->stats.xdp_tx++; 29574a1c059SIoana Ciornei } 29674a1c059SIoana Ciornei for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { 29774a1c059SIoana Ciornei xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); 29874a1c059SIoana Ciornei percpu_stats->tx_errors++; 29974a1c059SIoana Ciornei ch->stats.xdp_tx_err++; 30074a1c059SIoana Ciornei } 30174a1c059SIoana Ciornei fq->xdp_tx_fds.num = 0; 30274a1c059SIoana Ciornei } 30374a1c059SIoana Ciornei 30474a1c059SIoana Ciornei static void xdp_enqueue(struct dpaa2_eth_priv *priv, 30574a1c059SIoana Ciornei struct dpaa2_eth_channel *ch, 30674a1c059SIoana Ciornei struct dpaa2_fd *fd, 30799e43521SIoana Ciocoi Radulescu void *buf_start, u16 queue_id) 30899e43521SIoana Ciocoi Radulescu { 30999e43521SIoana Ciocoi Radulescu struct dpaa2_faead *faead; 31074a1c059SIoana Ciornei struct dpaa2_fd *dest_fd; 31174a1c059SIoana Ciornei struct dpaa2_eth_fq *fq; 31299e43521SIoana Ciocoi Radulescu u32 ctrl, frc; 31399e43521SIoana Ciocoi Radulescu 31499e43521SIoana Ciocoi Radulescu /* Mark the egress frame hardware annotation area as valid */ 31599e43521SIoana Ciocoi Radulescu frc = dpaa2_fd_get_frc(fd); 31699e43521SIoana Ciocoi Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 31799e43521SIoana Ciocoi Radulescu dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); 31899e43521SIoana Ciocoi Radulescu 31999e43521SIoana Ciocoi Radulescu /* Instruct hardware to release the FD buffer directly into 32099e43521SIoana Ciocoi Radulescu * the buffer pool once transmission is completed, instead of 32199e43521SIoana Ciocoi Radulescu * sending a Tx confirmation frame to us 32299e43521SIoana Ciocoi Radulescu */ 32399e43521SIoana Ciocoi Radulescu ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; 32499e43521SIoana Ciocoi Radulescu faead = dpaa2_get_faead(buf_start, false); 32599e43521SIoana Ciocoi Radulescu faead->ctrl = cpu_to_le32(ctrl); 32699e43521SIoana Ciocoi Radulescu faead->conf_fqid = 0; 32799e43521SIoana Ciocoi Radulescu 32899e43521SIoana Ciocoi Radulescu fq = &priv->fq[queue_id]; 32974a1c059SIoana Ciornei dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; 33074a1c059SIoana Ciornei memcpy(dest_fd, fd, sizeof(*dest_fd)); 33199e43521SIoana Ciocoi Radulescu 33274a1c059SIoana Ciornei if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) 33374a1c059SIoana Ciornei return; 33474a1c059SIoana Ciornei 33574a1c059SIoana Ciornei xdp_tx_flush(priv, ch, fq); 33699e43521SIoana Ciocoi Radulescu } 33799e43521SIoana Ciocoi Radulescu 3387e273a8eSIoana Ciocoi Radulescu static u32 run_xdp(struct dpaa2_eth_priv *priv, 3397e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 34099e43521SIoana Ciocoi Radulescu struct dpaa2_eth_fq *rx_fq, 3417e273a8eSIoana Ciocoi Radulescu struct dpaa2_fd *fd, void *vaddr) 3427e273a8eSIoana Ciocoi Radulescu { 3435d39dc21SIoana Ciocoi Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 3447e273a8eSIoana Ciocoi Radulescu struct bpf_prog *xdp_prog; 3457e273a8eSIoana Ciocoi Radulescu struct xdp_buff xdp; 3467e273a8eSIoana Ciocoi Radulescu u32 xdp_act = XDP_PASS; 34799e43521SIoana Ciocoi Radulescu int err; 34899e43521SIoana Ciocoi Radulescu 3497e273a8eSIoana Ciocoi Radulescu rcu_read_lock(); 3507e273a8eSIoana Ciocoi Radulescu 3517e273a8eSIoana Ciocoi Radulescu xdp_prog = READ_ONCE(ch->xdp.prog); 3527e273a8eSIoana Ciocoi Radulescu if (!xdp_prog) 3537e273a8eSIoana Ciocoi Radulescu goto out; 3547e273a8eSIoana Ciocoi Radulescu 3557e273a8eSIoana Ciocoi Radulescu xdp.data = vaddr + dpaa2_fd_get_offset(fd); 3567e273a8eSIoana Ciocoi Radulescu xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); 3577b1eea1aSIoana Ciocoi Radulescu xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; 3587e273a8eSIoana Ciocoi Radulescu xdp_set_data_meta_invalid(&xdp); 359d678be1dSIoana Radulescu xdp.rxq = &ch->xdp_rxq; 3607e273a8eSIoana Ciocoi Radulescu 3614a9b052aSJesper Dangaard Brouer xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE - 3624a9b052aSJesper Dangaard Brouer (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM); 3634a9b052aSJesper Dangaard Brouer 3647e273a8eSIoana Ciocoi Radulescu xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); 3657e273a8eSIoana Ciocoi Radulescu 3667b1eea1aSIoana Ciocoi Radulescu /* xdp.data pointer may have changed */ 3677b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_offset(fd, xdp.data - vaddr); 3687b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); 3697b1eea1aSIoana Ciocoi Radulescu 3707e273a8eSIoana Ciocoi Radulescu switch (xdp_act) { 3717e273a8eSIoana Ciocoi Radulescu case XDP_PASS: 3727e273a8eSIoana Ciocoi Radulescu break; 37399e43521SIoana Ciocoi Radulescu case XDP_TX: 37474a1c059SIoana Ciornei xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); 37599e43521SIoana Ciocoi Radulescu break; 3767e273a8eSIoana Ciocoi Radulescu default: 3777e273a8eSIoana Ciocoi Radulescu bpf_warn_invalid_xdp_action(xdp_act); 378c1cb11bcSIoana Ciocoi Radulescu /* fall through */ 3797e273a8eSIoana Ciocoi Radulescu case XDP_ABORTED: 3807e273a8eSIoana Ciocoi Radulescu trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 381c1cb11bcSIoana Ciocoi Radulescu /* fall through */ 3827e273a8eSIoana Ciocoi Radulescu case XDP_DROP: 3835d39dc21SIoana Ciocoi Radulescu xdp_release_buf(priv, ch, addr); 384a4a7b762SIoana Ciocoi Radulescu ch->stats.xdp_drop++; 3857e273a8eSIoana Ciocoi Radulescu break; 386d678be1dSIoana Radulescu case XDP_REDIRECT: 387d678be1dSIoana Radulescu dma_unmap_page(priv->net_dev->dev.parent, addr, 388efa6a7d0SIoana Ciornei priv->rx_buf_size, DMA_BIDIRECTIONAL); 389d678be1dSIoana Radulescu ch->buf_count--; 3904a9b052aSJesper Dangaard Brouer 3914a9b052aSJesper Dangaard Brouer /* Allow redirect use of full headroom */ 392d678be1dSIoana Radulescu xdp.data_hard_start = vaddr; 3934a9b052aSJesper Dangaard Brouer xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE; 3944a9b052aSJesper Dangaard Brouer 395d678be1dSIoana Radulescu err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); 396d678be1dSIoana Radulescu if (unlikely(err)) 397d678be1dSIoana Radulescu ch->stats.xdp_drop++; 398d678be1dSIoana Radulescu else 399d678be1dSIoana Radulescu ch->stats.xdp_redirect++; 400d678be1dSIoana Radulescu break; 4017e273a8eSIoana Ciocoi Radulescu } 4027e273a8eSIoana Ciocoi Radulescu 403d678be1dSIoana Radulescu ch->xdp.res |= xdp_act; 4047e273a8eSIoana Ciocoi Radulescu out: 4057e273a8eSIoana Ciocoi Radulescu rcu_read_unlock(); 4067e273a8eSIoana Ciocoi Radulescu return xdp_act; 4077e273a8eSIoana Ciocoi Radulescu } 4087e273a8eSIoana Ciocoi Radulescu 40934ff6846SIoana Radulescu /* Main Rx frame processing routine */ 41034ff6846SIoana Radulescu static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 41134ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 41234ff6846SIoana Radulescu const struct dpaa2_fd *fd, 413dbcdf728SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 41434ff6846SIoana Radulescu { 41534ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 41634ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 41734ff6846SIoana Radulescu void *vaddr; 41834ff6846SIoana Radulescu struct sk_buff *skb; 41934ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 42034ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 42134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 42234ff6846SIoana Radulescu struct dpaa2_fas *fas; 42334ff6846SIoana Radulescu void *buf_data; 42434ff6846SIoana Radulescu u32 status = 0; 4257e273a8eSIoana Ciocoi Radulescu u32 xdp_act; 42634ff6846SIoana Radulescu 42734ff6846SIoana Radulescu /* Tracing point */ 42834ff6846SIoana Radulescu trace_dpaa2_rx_fd(priv->net_dev, fd); 42934ff6846SIoana Radulescu 43034ff6846SIoana Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 431efa6a7d0SIoana Ciornei dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 43218c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 43334ff6846SIoana Radulescu 43434ff6846SIoana Radulescu fas = dpaa2_get_fas(vaddr, false); 43534ff6846SIoana Radulescu prefetch(fas); 43634ff6846SIoana Radulescu buf_data = vaddr + dpaa2_fd_get_offset(fd); 43734ff6846SIoana Radulescu prefetch(buf_data); 43834ff6846SIoana Radulescu 43934ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 44034ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 44134ff6846SIoana Radulescu 44234ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 44399e43521SIoana Ciocoi Radulescu xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); 4447e273a8eSIoana Ciocoi Radulescu if (xdp_act != XDP_PASS) { 4457e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_packets++; 4467e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 4477e273a8eSIoana Ciocoi Radulescu return; 4487e273a8eSIoana Ciocoi Radulescu } 4497e273a8eSIoana Ciocoi Radulescu 450efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 45118c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 452fdb6ca9eSIoana Ciornei skb = build_linear_skb(ch, fd, vaddr); 45334ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 4547e273a8eSIoana Ciocoi Radulescu WARN_ON(priv->xdp_prog); 4557e273a8eSIoana Ciocoi Radulescu 456efa6a7d0SIoana Ciornei dma_unmap_page(dev, addr, priv->rx_buf_size, 45718c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 45834ff6846SIoana Radulescu skb = build_frag_skb(priv, ch, buf_data); 45927c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 46034ff6846SIoana Radulescu percpu_extras->rx_sg_frames++; 46134ff6846SIoana Radulescu percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 46234ff6846SIoana Radulescu } else { 46334ff6846SIoana Radulescu /* We don't support any other format */ 46434ff6846SIoana Radulescu goto err_frame_format; 46534ff6846SIoana Radulescu } 46634ff6846SIoana Radulescu 46734ff6846SIoana Radulescu if (unlikely(!skb)) 46834ff6846SIoana Radulescu goto err_build_skb; 46934ff6846SIoana Radulescu 47034ff6846SIoana Radulescu prefetch(skb->data); 47134ff6846SIoana Radulescu 47234ff6846SIoana Radulescu /* Get the timestamp value */ 47334ff6846SIoana Radulescu if (priv->rx_tstamp) { 47434ff6846SIoana Radulescu struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 47534ff6846SIoana Radulescu __le64 *ts = dpaa2_get_ts(vaddr, false); 47634ff6846SIoana Radulescu u64 ns; 47734ff6846SIoana Radulescu 47834ff6846SIoana Radulescu memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 47934ff6846SIoana Radulescu 48034ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 48134ff6846SIoana Radulescu shhwtstamps->hwtstamp = ns_to_ktime(ns); 48234ff6846SIoana Radulescu } 48334ff6846SIoana Radulescu 48434ff6846SIoana Radulescu /* Check if we need to validate the L4 csum */ 48534ff6846SIoana Radulescu if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 48634ff6846SIoana Radulescu status = le32_to_cpu(fas->status); 48734ff6846SIoana Radulescu validate_rx_csum(priv, status, skb); 48834ff6846SIoana Radulescu } 48934ff6846SIoana Radulescu 49034ff6846SIoana Radulescu skb->protocol = eth_type_trans(skb, priv->net_dev); 491dbcdf728SIoana Ciocoi Radulescu skb_record_rx_queue(skb, fq->flowid); 49234ff6846SIoana Radulescu 49334ff6846SIoana Radulescu percpu_stats->rx_packets++; 49434ff6846SIoana Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 49534ff6846SIoana Radulescu 4960a25d92cSIoana Ciornei list_add_tail(&skb->list, ch->rx_list); 49734ff6846SIoana Radulescu 49834ff6846SIoana Radulescu return; 49934ff6846SIoana Radulescu 50034ff6846SIoana Radulescu err_build_skb: 50134ff6846SIoana Radulescu free_rx_fd(priv, fd, vaddr); 50234ff6846SIoana Radulescu err_frame_format: 50334ff6846SIoana Radulescu percpu_stats->rx_dropped++; 50434ff6846SIoana Radulescu } 50534ff6846SIoana Radulescu 50634ff6846SIoana Radulescu /* Consume all frames pull-dequeued into the store. This is the simplest way to 50734ff6846SIoana Radulescu * make sure we don't accidentally issue another volatile dequeue which would 50834ff6846SIoana Radulescu * overwrite (leak) frames already in the store. 50934ff6846SIoana Radulescu * 51034ff6846SIoana Radulescu * Observance of NAPI budget is not our concern, leaving that to the caller. 51134ff6846SIoana Radulescu */ 51268049a5fSIoana Ciocoi Radulescu static int consume_frames(struct dpaa2_eth_channel *ch, 513569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq **src) 51434ff6846SIoana Radulescu { 51534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = ch->priv; 51668049a5fSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq = NULL; 51734ff6846SIoana Radulescu struct dpaa2_dq *dq; 51834ff6846SIoana Radulescu const struct dpaa2_fd *fd; 519ef17bd7cSIoana Radulescu int cleaned = 0, retries = 0; 52034ff6846SIoana Radulescu int is_last; 52134ff6846SIoana Radulescu 52234ff6846SIoana Radulescu do { 52334ff6846SIoana Radulescu dq = dpaa2_io_store_next(ch->store, &is_last); 52434ff6846SIoana Radulescu if (unlikely(!dq)) { 52534ff6846SIoana Radulescu /* If we're here, we *must* have placed a 52634ff6846SIoana Radulescu * volatile dequeue comnmand, so keep reading through 52734ff6846SIoana Radulescu * the store until we get some sort of valid response 52834ff6846SIoana Radulescu * token (either a valid frame or an "empty dequeue") 52934ff6846SIoana Radulescu */ 530ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { 531ef17bd7cSIoana Radulescu netdev_err_once(priv->net_dev, 532ef17bd7cSIoana Radulescu "Unable to read a valid dequeue response\n"); 533ef17bd7cSIoana Radulescu return -ETIMEDOUT; 534ef17bd7cSIoana Radulescu } 53534ff6846SIoana Radulescu continue; 53634ff6846SIoana Radulescu } 53734ff6846SIoana Radulescu 53834ff6846SIoana Radulescu fd = dpaa2_dq_fd(dq); 53934ff6846SIoana Radulescu fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 54034ff6846SIoana Radulescu 541dbcdf728SIoana Ciocoi Radulescu fq->consume(priv, ch, fd, fq); 54234ff6846SIoana Radulescu cleaned++; 543ef17bd7cSIoana Radulescu retries = 0; 54434ff6846SIoana Radulescu } while (!is_last); 54534ff6846SIoana Radulescu 54668049a5fSIoana Ciocoi Radulescu if (!cleaned) 54768049a5fSIoana Ciocoi Radulescu return 0; 54868049a5fSIoana Ciocoi Radulescu 54968049a5fSIoana Ciocoi Radulescu fq->stats.frames += cleaned; 550460fd830SIoana Ciornei ch->stats.frames += cleaned; 55168049a5fSIoana Ciocoi Radulescu 55268049a5fSIoana Ciocoi Radulescu /* A dequeue operation only pulls frames from a single queue 553569dac6aSIoana Ciocoi Radulescu * into the store. Return the frame queue as an out param. 55468049a5fSIoana Ciocoi Radulescu */ 555569dac6aSIoana Ciocoi Radulescu if (src) 556569dac6aSIoana Ciocoi Radulescu *src = fq; 55768049a5fSIoana Ciocoi Radulescu 55834ff6846SIoana Radulescu return cleaned; 55934ff6846SIoana Radulescu } 56034ff6846SIoana Radulescu 56134ff6846SIoana Radulescu /* Configure the egress frame annotation for timestamp update */ 56234ff6846SIoana Radulescu static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) 56334ff6846SIoana Radulescu { 56434ff6846SIoana Radulescu struct dpaa2_faead *faead; 56534ff6846SIoana Radulescu u32 ctrl, frc; 56634ff6846SIoana Radulescu 56734ff6846SIoana Radulescu /* Mark the egress frame annotation area as valid */ 56834ff6846SIoana Radulescu frc = dpaa2_fd_get_frc(fd); 56934ff6846SIoana Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 57034ff6846SIoana Radulescu 57134ff6846SIoana Radulescu /* Set hardware annotation size */ 57234ff6846SIoana Radulescu ctrl = dpaa2_fd_get_ctrl(fd); 57334ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 57434ff6846SIoana Radulescu 57534ff6846SIoana Radulescu /* enable UPD (update prepanded data) bit in FAEAD field of 57634ff6846SIoana Radulescu * hardware frame annotation area 57734ff6846SIoana Radulescu */ 57834ff6846SIoana Radulescu ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 57934ff6846SIoana Radulescu faead = dpaa2_get_faead(buf_start, true); 58034ff6846SIoana Radulescu faead->ctrl = cpu_to_le32(ctrl); 58134ff6846SIoana Radulescu } 58234ff6846SIoana Radulescu 58334ff6846SIoana Radulescu /* Create a frame descriptor based on a fragmented skb */ 58434ff6846SIoana Radulescu static int build_sg_fd(struct dpaa2_eth_priv *priv, 58534ff6846SIoana Radulescu struct sk_buff *skb, 58634ff6846SIoana Radulescu struct dpaa2_fd *fd) 58734ff6846SIoana Radulescu { 58834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 58934ff6846SIoana Radulescu void *sgt_buf = NULL; 59034ff6846SIoana Radulescu dma_addr_t addr; 59134ff6846SIoana Radulescu int nr_frags = skb_shinfo(skb)->nr_frags; 59234ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 59334ff6846SIoana Radulescu int i, err; 59434ff6846SIoana Radulescu int sgt_buf_size; 59534ff6846SIoana Radulescu struct scatterlist *scl, *crt_scl; 59634ff6846SIoana Radulescu int num_sg; 59734ff6846SIoana Radulescu int num_dma_bufs; 59834ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 59934ff6846SIoana Radulescu 60034ff6846SIoana Radulescu /* Create and map scatterlist. 60134ff6846SIoana Radulescu * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 60234ff6846SIoana Radulescu * to go beyond nr_frags+1. 60334ff6846SIoana Radulescu * Note: We don't support chained scatterlists 60434ff6846SIoana Radulescu */ 60534ff6846SIoana Radulescu if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 60634ff6846SIoana Radulescu return -EINVAL; 60734ff6846SIoana Radulescu 60834ff6846SIoana Radulescu scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 60934ff6846SIoana Radulescu if (unlikely(!scl)) 61034ff6846SIoana Radulescu return -ENOMEM; 61134ff6846SIoana Radulescu 61234ff6846SIoana Radulescu sg_init_table(scl, nr_frags + 1); 61334ff6846SIoana Radulescu num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 61437fbbddaSIoana Ciornei if (unlikely(num_sg < 0)) { 61537fbbddaSIoana Ciornei err = -ENOMEM; 61637fbbddaSIoana Ciornei goto dma_map_sg_failed; 61737fbbddaSIoana Ciornei } 61834ff6846SIoana Radulescu num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 61934ff6846SIoana Radulescu if (unlikely(!num_dma_bufs)) { 62034ff6846SIoana Radulescu err = -ENOMEM; 62134ff6846SIoana Radulescu goto dma_map_sg_failed; 62234ff6846SIoana Radulescu } 62334ff6846SIoana Radulescu 62434ff6846SIoana Radulescu /* Prepare the HW SGT structure */ 62534ff6846SIoana Radulescu sgt_buf_size = priv->tx_data_offset + 62634ff6846SIoana Radulescu sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 62790bc6d4bSSebastian Andrzej Siewior sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); 62834ff6846SIoana Radulescu if (unlikely(!sgt_buf)) { 62934ff6846SIoana Radulescu err = -ENOMEM; 63034ff6846SIoana Radulescu goto sgt_buf_alloc_failed; 63134ff6846SIoana Radulescu } 63234ff6846SIoana Radulescu sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 63334ff6846SIoana Radulescu memset(sgt_buf, 0, sgt_buf_size); 63434ff6846SIoana Radulescu 63534ff6846SIoana Radulescu sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 63634ff6846SIoana Radulescu 63734ff6846SIoana Radulescu /* Fill in the HW SGT structure. 63834ff6846SIoana Radulescu * 63934ff6846SIoana Radulescu * sgt_buf is zeroed out, so the following fields are implicit 64034ff6846SIoana Radulescu * in all sgt entries: 64134ff6846SIoana Radulescu * - offset is 0 64234ff6846SIoana Radulescu * - format is 'dpaa2_sg_single' 64334ff6846SIoana Radulescu */ 64434ff6846SIoana Radulescu for_each_sg(scl, crt_scl, num_dma_bufs, i) { 64534ff6846SIoana Radulescu dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 64634ff6846SIoana Radulescu dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 64734ff6846SIoana Radulescu } 64834ff6846SIoana Radulescu dpaa2_sg_set_final(&sgt[i - 1], true); 64934ff6846SIoana Radulescu 65034ff6846SIoana Radulescu /* Store the skb backpointer in the SGT buffer. 65134ff6846SIoana Radulescu * Fit the scatterlist and the number of buffers alongside the 65234ff6846SIoana Radulescu * skb backpointer in the software annotation area. We'll need 65334ff6846SIoana Radulescu * all of them on Tx Conf. 65434ff6846SIoana Radulescu */ 65534ff6846SIoana Radulescu swa = (struct dpaa2_eth_swa *)sgt_buf; 656e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SG; 657e3fdf6baSIoana Radulescu swa->sg.skb = skb; 658e3fdf6baSIoana Radulescu swa->sg.scl = scl; 659e3fdf6baSIoana Radulescu swa->sg.num_sg = num_sg; 660e3fdf6baSIoana Radulescu swa->sg.sgt_size = sgt_buf_size; 66134ff6846SIoana Radulescu 66234ff6846SIoana Radulescu /* Separately map the SGT buffer */ 66334ff6846SIoana Radulescu addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 66434ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) { 66534ff6846SIoana Radulescu err = -ENOMEM; 66634ff6846SIoana Radulescu goto dma_map_single_failed; 66734ff6846SIoana Radulescu } 66834ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, priv->tx_data_offset); 66934ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_sg); 67034ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 67134ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 672b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 67334ff6846SIoana Radulescu 67434ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 67534ff6846SIoana Radulescu enable_tx_tstamp(fd, sgt_buf); 67634ff6846SIoana Radulescu 67734ff6846SIoana Radulescu return 0; 67834ff6846SIoana Radulescu 67934ff6846SIoana Radulescu dma_map_single_failed: 68034ff6846SIoana Radulescu skb_free_frag(sgt_buf); 68134ff6846SIoana Radulescu sgt_buf_alloc_failed: 68234ff6846SIoana Radulescu dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 68334ff6846SIoana Radulescu dma_map_sg_failed: 68434ff6846SIoana Radulescu kfree(scl); 68534ff6846SIoana Radulescu return err; 68634ff6846SIoana Radulescu } 68734ff6846SIoana Radulescu 68834ff6846SIoana Radulescu /* Create a frame descriptor based on a linear skb */ 68934ff6846SIoana Radulescu static int build_single_fd(struct dpaa2_eth_priv *priv, 69034ff6846SIoana Radulescu struct sk_buff *skb, 69134ff6846SIoana Radulescu struct dpaa2_fd *fd) 69234ff6846SIoana Radulescu { 69334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 69434ff6846SIoana Radulescu u8 *buffer_start, *aligned_start; 695e3fdf6baSIoana Radulescu struct dpaa2_eth_swa *swa; 69634ff6846SIoana Radulescu dma_addr_t addr; 69734ff6846SIoana Radulescu 69834ff6846SIoana Radulescu buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); 69934ff6846SIoana Radulescu 70034ff6846SIoana Radulescu /* If there's enough room to align the FD address, do it. 70134ff6846SIoana Radulescu * It will help hardware optimize accesses. 70234ff6846SIoana Radulescu */ 70334ff6846SIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 70434ff6846SIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 70534ff6846SIoana Radulescu if (aligned_start >= skb->head) 70634ff6846SIoana Radulescu buffer_start = aligned_start; 70734ff6846SIoana Radulescu 70834ff6846SIoana Radulescu /* Store a backpointer to the skb at the beginning of the buffer 70934ff6846SIoana Radulescu * (in the private data area) such that we can release it 71034ff6846SIoana Radulescu * on Tx confirm 71134ff6846SIoana Radulescu */ 712e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 713e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SINGLE; 714e3fdf6baSIoana Radulescu swa->single.skb = skb; 71534ff6846SIoana Radulescu 71634ff6846SIoana Radulescu addr = dma_map_single(dev, buffer_start, 71734ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 71834ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 71934ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 72034ff6846SIoana Radulescu return -ENOMEM; 72134ff6846SIoana Radulescu 72234ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 72334ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 72434ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 72534ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_single); 726b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 72734ff6846SIoana Radulescu 72834ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 72934ff6846SIoana Radulescu enable_tx_tstamp(fd, buffer_start); 73034ff6846SIoana Radulescu 73134ff6846SIoana Radulescu return 0; 73234ff6846SIoana Radulescu } 73334ff6846SIoana Radulescu 73434ff6846SIoana Radulescu /* FD freeing routine on the Tx path 73534ff6846SIoana Radulescu * 73634ff6846SIoana Radulescu * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 73734ff6846SIoana Radulescu * back-pointed to is also freed. 73834ff6846SIoana Radulescu * This can be called either from dpaa2_eth_tx_conf() or on the error path of 73934ff6846SIoana Radulescu * dpaa2_eth_tx(). 74034ff6846SIoana Radulescu */ 74134ff6846SIoana Radulescu static void free_tx_fd(const struct dpaa2_eth_priv *priv, 742d678be1dSIoana Radulescu struct dpaa2_eth_fq *fq, 7430723a3aeSIoana Ciocoi Radulescu const struct dpaa2_fd *fd, bool in_napi) 74434ff6846SIoana Radulescu { 74534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 74634ff6846SIoana Radulescu dma_addr_t fd_addr; 747d678be1dSIoana Radulescu struct sk_buff *skb = NULL; 74834ff6846SIoana Radulescu unsigned char *buffer_start; 74934ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 75034ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 751d678be1dSIoana Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 75234ff6846SIoana Radulescu 75334ff6846SIoana Radulescu fd_addr = dpaa2_fd_get_addr(fd); 754e3fdf6baSIoana Radulescu buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 755e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 75634ff6846SIoana Radulescu 75734ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 758d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_SINGLE) { 759e3fdf6baSIoana Radulescu skb = swa->single.skb; 760d678be1dSIoana Radulescu /* Accessing the skb buffer is safe before dma unmap, 761d678be1dSIoana Radulescu * because we didn't map the actual skb shell. 76234ff6846SIoana Radulescu */ 76334ff6846SIoana Radulescu dma_unmap_single(dev, fd_addr, 76434ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 76534ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 766d678be1dSIoana Radulescu } else { 767d678be1dSIoana Radulescu WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); 768d678be1dSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, 769d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 770d678be1dSIoana Radulescu } 77134ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 772e3fdf6baSIoana Radulescu skb = swa->sg.skb; 77334ff6846SIoana Radulescu 77434ff6846SIoana Radulescu /* Unmap the scatterlist */ 775e3fdf6baSIoana Radulescu dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, 776e3fdf6baSIoana Radulescu DMA_BIDIRECTIONAL); 777e3fdf6baSIoana Radulescu kfree(swa->sg.scl); 77834ff6846SIoana Radulescu 77934ff6846SIoana Radulescu /* Unmap the SGT buffer */ 780e3fdf6baSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, 78134ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 78234ff6846SIoana Radulescu } else { 78334ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "Invalid FD format\n"); 78434ff6846SIoana Radulescu return; 78534ff6846SIoana Radulescu } 78634ff6846SIoana Radulescu 787d678be1dSIoana Radulescu if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { 788d678be1dSIoana Radulescu fq->dq_frames++; 789d678be1dSIoana Radulescu fq->dq_bytes += fd_len; 790d678be1dSIoana Radulescu } 791d678be1dSIoana Radulescu 792d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_XDP) { 793d678be1dSIoana Radulescu xdp_return_frame(swa->xdp.xdpf); 794d678be1dSIoana Radulescu return; 795d678be1dSIoana Radulescu } 796d678be1dSIoana Radulescu 79734ff6846SIoana Radulescu /* Get the timestamp value */ 79834ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 79934ff6846SIoana Radulescu struct skb_shared_hwtstamps shhwtstamps; 800e3fdf6baSIoana Radulescu __le64 *ts = dpaa2_get_ts(buffer_start, true); 80134ff6846SIoana Radulescu u64 ns; 80234ff6846SIoana Radulescu 80334ff6846SIoana Radulescu memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 80434ff6846SIoana Radulescu 80534ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 80634ff6846SIoana Radulescu shhwtstamps.hwtstamp = ns_to_ktime(ns); 80734ff6846SIoana Radulescu skb_tstamp_tx(skb, &shhwtstamps); 80834ff6846SIoana Radulescu } 80934ff6846SIoana Radulescu 81034ff6846SIoana Radulescu /* Free SGT buffer allocated on tx */ 81134ff6846SIoana Radulescu if (fd_format != dpaa2_fd_single) 812e3fdf6baSIoana Radulescu skb_free_frag(buffer_start); 81334ff6846SIoana Radulescu 81434ff6846SIoana Radulescu /* Move on with skb release */ 8150723a3aeSIoana Ciocoi Radulescu napi_consume_skb(skb, in_napi); 81634ff6846SIoana Radulescu } 81734ff6846SIoana Radulescu 81834ff6846SIoana Radulescu static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 81934ff6846SIoana Radulescu { 82034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 82134ff6846SIoana Radulescu struct dpaa2_fd fd; 82234ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 82334ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 82434ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 825569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 82634ff6846SIoana Radulescu u16 queue_mapping; 82734ff6846SIoana Radulescu unsigned int needed_headroom; 828569dac6aSIoana Ciocoi Radulescu u32 fd_len; 829ab1e6de2SIoana Radulescu u8 prio = 0; 83034ff6846SIoana Radulescu int err, i; 83134ff6846SIoana Radulescu 83234ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 83334ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 83434ff6846SIoana Radulescu 83534ff6846SIoana Radulescu needed_headroom = dpaa2_eth_needed_headroom(priv, skb); 83634ff6846SIoana Radulescu if (skb_headroom(skb) < needed_headroom) { 83734ff6846SIoana Radulescu struct sk_buff *ns; 83834ff6846SIoana Radulescu 83934ff6846SIoana Radulescu ns = skb_realloc_headroom(skb, needed_headroom); 84034ff6846SIoana Radulescu if (unlikely(!ns)) { 84134ff6846SIoana Radulescu percpu_stats->tx_dropped++; 84234ff6846SIoana Radulescu goto err_alloc_headroom; 84334ff6846SIoana Radulescu } 84434ff6846SIoana Radulescu percpu_extras->tx_reallocs++; 84534ff6846SIoana Radulescu 84634ff6846SIoana Radulescu if (skb->sk) 84734ff6846SIoana Radulescu skb_set_owner_w(ns, skb->sk); 84834ff6846SIoana Radulescu 84934ff6846SIoana Radulescu dev_kfree_skb(skb); 85034ff6846SIoana Radulescu skb = ns; 85134ff6846SIoana Radulescu } 85234ff6846SIoana Radulescu 85334ff6846SIoana Radulescu /* We'll be holding a back-reference to the skb until Tx Confirmation; 85434ff6846SIoana Radulescu * we don't want that overwritten by a concurrent Tx with a cloned skb. 85534ff6846SIoana Radulescu */ 85634ff6846SIoana Radulescu skb = skb_unshare(skb, GFP_ATOMIC); 85734ff6846SIoana Radulescu if (unlikely(!skb)) { 85834ff6846SIoana Radulescu /* skb_unshare() has already freed the skb */ 85934ff6846SIoana Radulescu percpu_stats->tx_dropped++; 86034ff6846SIoana Radulescu return NETDEV_TX_OK; 86134ff6846SIoana Radulescu } 86234ff6846SIoana Radulescu 86334ff6846SIoana Radulescu /* Setup the FD fields */ 86434ff6846SIoana Radulescu memset(&fd, 0, sizeof(fd)); 86534ff6846SIoana Radulescu 86634ff6846SIoana Radulescu if (skb_is_nonlinear(skb)) { 86734ff6846SIoana Radulescu err = build_sg_fd(priv, skb, &fd); 86834ff6846SIoana Radulescu percpu_extras->tx_sg_frames++; 86934ff6846SIoana Radulescu percpu_extras->tx_sg_bytes += skb->len; 87034ff6846SIoana Radulescu } else { 87134ff6846SIoana Radulescu err = build_single_fd(priv, skb, &fd); 87234ff6846SIoana Radulescu } 87334ff6846SIoana Radulescu 87434ff6846SIoana Radulescu if (unlikely(err)) { 87534ff6846SIoana Radulescu percpu_stats->tx_dropped++; 87634ff6846SIoana Radulescu goto err_build_fd; 87734ff6846SIoana Radulescu } 87834ff6846SIoana Radulescu 87934ff6846SIoana Radulescu /* Tracing point */ 88034ff6846SIoana Radulescu trace_dpaa2_tx_fd(net_dev, &fd); 88134ff6846SIoana Radulescu 88234ff6846SIoana Radulescu /* TxConf FQ selection relies on queue id from the stack. 88334ff6846SIoana Radulescu * In case of a forwarded frame from another DPNI interface, we choose 88434ff6846SIoana Radulescu * a queue affined to the same core that processed the Rx frame 88534ff6846SIoana Radulescu */ 88634ff6846SIoana Radulescu queue_mapping = skb_get_queue_mapping(skb); 887ab1e6de2SIoana Radulescu 888ab1e6de2SIoana Radulescu if (net_dev->num_tc) { 889ab1e6de2SIoana Radulescu prio = netdev_txq_to_tc(net_dev, queue_mapping); 890ab1e6de2SIoana Radulescu /* Hardware interprets priority level 0 as being the highest, 891ab1e6de2SIoana Radulescu * so we need to do a reverse mapping to the netdev tc index 892ab1e6de2SIoana Radulescu */ 893ab1e6de2SIoana Radulescu prio = net_dev->num_tc - prio - 1; 894ab1e6de2SIoana Radulescu /* We have only one FQ array entry for all Tx hardware queues 895ab1e6de2SIoana Radulescu * with the same flow id (but different priority levels) 896ab1e6de2SIoana Radulescu */ 897ab1e6de2SIoana Radulescu queue_mapping %= dpaa2_eth_queue_count(priv); 898ab1e6de2SIoana Radulescu } 89934ff6846SIoana Radulescu fq = &priv->fq[queue_mapping]; 9008c838f53SIoana Ciornei 9018c838f53SIoana Ciornei fd_len = dpaa2_fd_get_len(&fd); 9028c838f53SIoana Ciornei nq = netdev_get_tx_queue(net_dev, queue_mapping); 9038c838f53SIoana Ciornei netdev_tx_sent_queue(nq, fd_len); 9048c838f53SIoana Ciornei 9058c838f53SIoana Ciornei /* Everything that happens after this enqueues might race with 9068c838f53SIoana Ciornei * the Tx confirmation callback for this frame 9078c838f53SIoana Ciornei */ 90834ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 9096ff80447SIoana Ciornei err = priv->enqueue(priv, fq, &fd, prio, 1, NULL); 91034ff6846SIoana Radulescu if (err != -EBUSY) 91134ff6846SIoana Radulescu break; 91234ff6846SIoana Radulescu } 91334ff6846SIoana Radulescu percpu_extras->tx_portal_busy += i; 91434ff6846SIoana Radulescu if (unlikely(err < 0)) { 91534ff6846SIoana Radulescu percpu_stats->tx_errors++; 91634ff6846SIoana Radulescu /* Clean up everything, including freeing the skb */ 917d678be1dSIoana Radulescu free_tx_fd(priv, fq, &fd, false); 9188c838f53SIoana Ciornei netdev_tx_completed_queue(nq, 1, fd_len); 91934ff6846SIoana Radulescu } else { 92034ff6846SIoana Radulescu percpu_stats->tx_packets++; 921569dac6aSIoana Ciocoi Radulescu percpu_stats->tx_bytes += fd_len; 92234ff6846SIoana Radulescu } 92334ff6846SIoana Radulescu 92434ff6846SIoana Radulescu return NETDEV_TX_OK; 92534ff6846SIoana Radulescu 92634ff6846SIoana Radulescu err_build_fd: 92734ff6846SIoana Radulescu err_alloc_headroom: 92834ff6846SIoana Radulescu dev_kfree_skb(skb); 92934ff6846SIoana Radulescu 93034ff6846SIoana Radulescu return NETDEV_TX_OK; 93134ff6846SIoana Radulescu } 93234ff6846SIoana Radulescu 93334ff6846SIoana Radulescu /* Tx confirmation frame processing routine */ 93434ff6846SIoana Radulescu static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 935b00c898cSIoana Ciornei struct dpaa2_eth_channel *ch __always_unused, 93634ff6846SIoana Radulescu const struct dpaa2_fd *fd, 937569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 93834ff6846SIoana Radulescu { 93934ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 94034ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 941569dac6aSIoana Ciocoi Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 94234ff6846SIoana Radulescu u32 fd_errors; 94334ff6846SIoana Radulescu 94434ff6846SIoana Radulescu /* Tracing point */ 94534ff6846SIoana Radulescu trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 94634ff6846SIoana Radulescu 94734ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 94834ff6846SIoana Radulescu percpu_extras->tx_conf_frames++; 949569dac6aSIoana Ciocoi Radulescu percpu_extras->tx_conf_bytes += fd_len; 950569dac6aSIoana Ciocoi Radulescu 95134ff6846SIoana Radulescu /* Check frame errors in the FD field */ 95234ff6846SIoana Radulescu fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 953d678be1dSIoana Radulescu free_tx_fd(priv, fq, fd, true); 95434ff6846SIoana Radulescu 95534ff6846SIoana Radulescu if (likely(!fd_errors)) 95634ff6846SIoana Radulescu return; 95734ff6846SIoana Radulescu 95834ff6846SIoana Radulescu if (net_ratelimit()) 95934ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 96034ff6846SIoana Radulescu fd_errors); 96134ff6846SIoana Radulescu 96234ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 96334ff6846SIoana Radulescu /* Tx-conf logically pertains to the egress path. */ 96434ff6846SIoana Radulescu percpu_stats->tx_errors++; 96534ff6846SIoana Radulescu } 96634ff6846SIoana Radulescu 96734ff6846SIoana Radulescu static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 96834ff6846SIoana Radulescu { 96934ff6846SIoana Radulescu int err; 97034ff6846SIoana Radulescu 97134ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 97234ff6846SIoana Radulescu DPNI_OFF_RX_L3_CSUM, enable); 97334ff6846SIoana Radulescu if (err) { 97434ff6846SIoana Radulescu netdev_err(priv->net_dev, 97534ff6846SIoana Radulescu "dpni_set_offload(RX_L3_CSUM) failed\n"); 97634ff6846SIoana Radulescu return err; 97734ff6846SIoana Radulescu } 97834ff6846SIoana Radulescu 97934ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 98034ff6846SIoana Radulescu DPNI_OFF_RX_L4_CSUM, enable); 98134ff6846SIoana Radulescu if (err) { 98234ff6846SIoana Radulescu netdev_err(priv->net_dev, 98334ff6846SIoana Radulescu "dpni_set_offload(RX_L4_CSUM) failed\n"); 98434ff6846SIoana Radulescu return err; 98534ff6846SIoana Radulescu } 98634ff6846SIoana Radulescu 98734ff6846SIoana Radulescu return 0; 98834ff6846SIoana Radulescu } 98934ff6846SIoana Radulescu 99034ff6846SIoana Radulescu static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 99134ff6846SIoana Radulescu { 99234ff6846SIoana Radulescu int err; 99334ff6846SIoana Radulescu 99434ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 99534ff6846SIoana Radulescu DPNI_OFF_TX_L3_CSUM, enable); 99634ff6846SIoana Radulescu if (err) { 99734ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 99834ff6846SIoana Radulescu return err; 99934ff6846SIoana Radulescu } 100034ff6846SIoana Radulescu 100134ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 100234ff6846SIoana Radulescu DPNI_OFF_TX_L4_CSUM, enable); 100334ff6846SIoana Radulescu if (err) { 100434ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 100534ff6846SIoana Radulescu return err; 100634ff6846SIoana Radulescu } 100734ff6846SIoana Radulescu 100834ff6846SIoana Radulescu return 0; 100934ff6846SIoana Radulescu } 101034ff6846SIoana Radulescu 101134ff6846SIoana Radulescu /* Perform a single release command to add buffers 101234ff6846SIoana Radulescu * to the specified buffer pool 101334ff6846SIoana Radulescu */ 101434ff6846SIoana Radulescu static int add_bufs(struct dpaa2_eth_priv *priv, 101534ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, u16 bpid) 101634ff6846SIoana Radulescu { 101734ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 101834ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 101927c87486SIoana Ciocoi Radulescu struct page *page; 102034ff6846SIoana Radulescu dma_addr_t addr; 1021ef17bd7cSIoana Radulescu int retries = 0; 102234ff6846SIoana Radulescu int i, err; 102334ff6846SIoana Radulescu 102434ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 102534ff6846SIoana Radulescu /* Allocate buffer visible to WRIOP + skb shared info + 102634ff6846SIoana Radulescu * alignment padding 102734ff6846SIoana Radulescu */ 102827c87486SIoana Ciocoi Radulescu /* allocate one page for each Rx buffer. WRIOP sees 102927c87486SIoana Ciocoi Radulescu * the entire page except for a tailroom reserved for 103027c87486SIoana Ciocoi Radulescu * skb shared info 103127c87486SIoana Ciocoi Radulescu */ 103227c87486SIoana Ciocoi Radulescu page = dev_alloc_pages(0); 103327c87486SIoana Ciocoi Radulescu if (!page) 103434ff6846SIoana Radulescu goto err_alloc; 103534ff6846SIoana Radulescu 1036efa6a7d0SIoana Ciornei addr = dma_map_page(dev, page, 0, priv->rx_buf_size, 103718c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 103834ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 103934ff6846SIoana Radulescu goto err_map; 104034ff6846SIoana Radulescu 104134ff6846SIoana Radulescu buf_array[i] = addr; 104234ff6846SIoana Radulescu 104334ff6846SIoana Radulescu /* tracing point */ 104434ff6846SIoana Radulescu trace_dpaa2_eth_buf_seed(priv->net_dev, 104527c87486SIoana Ciocoi Radulescu page, DPAA2_ETH_RX_BUF_RAW_SIZE, 1046efa6a7d0SIoana Ciornei addr, priv->rx_buf_size, 104734ff6846SIoana Radulescu bpid); 104834ff6846SIoana Radulescu } 104934ff6846SIoana Radulescu 105034ff6846SIoana Radulescu release_bufs: 105134ff6846SIoana Radulescu /* In case the portal is busy, retry until successful */ 105234ff6846SIoana Radulescu while ((err = dpaa2_io_service_release(ch->dpio, bpid, 1053ef17bd7cSIoana Radulescu buf_array, i)) == -EBUSY) { 1054ef17bd7cSIoana Radulescu if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 1055ef17bd7cSIoana Radulescu break; 105634ff6846SIoana Radulescu cpu_relax(); 1057ef17bd7cSIoana Radulescu } 105834ff6846SIoana Radulescu 105934ff6846SIoana Radulescu /* If release command failed, clean up and bail out; 106034ff6846SIoana Radulescu * not much else we can do about it 106134ff6846SIoana Radulescu */ 106234ff6846SIoana Radulescu if (err) { 106334ff6846SIoana Radulescu free_bufs(priv, buf_array, i); 106434ff6846SIoana Radulescu return 0; 106534ff6846SIoana Radulescu } 106634ff6846SIoana Radulescu 106734ff6846SIoana Radulescu return i; 106834ff6846SIoana Radulescu 106934ff6846SIoana Radulescu err_map: 107027c87486SIoana Ciocoi Radulescu __free_pages(page, 0); 107134ff6846SIoana Radulescu err_alloc: 107234ff6846SIoana Radulescu /* If we managed to allocate at least some buffers, 107334ff6846SIoana Radulescu * release them to hardware 107434ff6846SIoana Radulescu */ 107534ff6846SIoana Radulescu if (i) 107634ff6846SIoana Radulescu goto release_bufs; 107734ff6846SIoana Radulescu 107834ff6846SIoana Radulescu return 0; 107934ff6846SIoana Radulescu } 108034ff6846SIoana Radulescu 108134ff6846SIoana Radulescu static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 108234ff6846SIoana Radulescu { 108334ff6846SIoana Radulescu int i, j; 108434ff6846SIoana Radulescu int new_count; 108534ff6846SIoana Radulescu 108634ff6846SIoana Radulescu for (j = 0; j < priv->num_channels; j++) { 108734ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_NUM_BUFS; 108834ff6846SIoana Radulescu i += DPAA2_ETH_BUFS_PER_CMD) { 108934ff6846SIoana Radulescu new_count = add_bufs(priv, priv->channel[j], bpid); 109034ff6846SIoana Radulescu priv->channel[j]->buf_count += new_count; 109134ff6846SIoana Radulescu 109234ff6846SIoana Radulescu if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 109334ff6846SIoana Radulescu return -ENOMEM; 109434ff6846SIoana Radulescu } 109534ff6846SIoana Radulescu } 109634ff6846SIoana Radulescu } 109734ff6846SIoana Radulescu 109834ff6846SIoana Radulescu return 0; 109934ff6846SIoana Radulescu } 110034ff6846SIoana Radulescu 110134ff6846SIoana Radulescu /** 110234ff6846SIoana Radulescu * Drain the specified number of buffers from the DPNI's private buffer pool. 110334ff6846SIoana Radulescu * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 110434ff6846SIoana Radulescu */ 110534ff6846SIoana Radulescu static void drain_bufs(struct dpaa2_eth_priv *priv, int count) 110634ff6846SIoana Radulescu { 110734ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 1108ef17bd7cSIoana Radulescu int retries = 0; 110934ff6846SIoana Radulescu int ret; 111034ff6846SIoana Radulescu 111134ff6846SIoana Radulescu do { 111234ff6846SIoana Radulescu ret = dpaa2_io_service_acquire(NULL, priv->bpid, 111334ff6846SIoana Radulescu buf_array, count); 111434ff6846SIoana Radulescu if (ret < 0) { 1115ef17bd7cSIoana Radulescu if (ret == -EBUSY && 1116ef17bd7cSIoana Radulescu retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 1117ef17bd7cSIoana Radulescu continue; 111834ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 111934ff6846SIoana Radulescu return; 112034ff6846SIoana Radulescu } 112134ff6846SIoana Radulescu free_bufs(priv, buf_array, ret); 1122ef17bd7cSIoana Radulescu retries = 0; 112334ff6846SIoana Radulescu } while (ret); 112434ff6846SIoana Radulescu } 112534ff6846SIoana Radulescu 112634ff6846SIoana Radulescu static void drain_pool(struct dpaa2_eth_priv *priv) 112734ff6846SIoana Radulescu { 112834ff6846SIoana Radulescu int i; 112934ff6846SIoana Radulescu 113034ff6846SIoana Radulescu drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 113134ff6846SIoana Radulescu drain_bufs(priv, 1); 113234ff6846SIoana Radulescu 113334ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 113434ff6846SIoana Radulescu priv->channel[i]->buf_count = 0; 113534ff6846SIoana Radulescu } 113634ff6846SIoana Radulescu 113734ff6846SIoana Radulescu /* Function is called from softirq context only, so we don't need to guard 113834ff6846SIoana Radulescu * the access to percpu count 113934ff6846SIoana Radulescu */ 114034ff6846SIoana Radulescu static int refill_pool(struct dpaa2_eth_priv *priv, 114134ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 114234ff6846SIoana Radulescu u16 bpid) 114334ff6846SIoana Radulescu { 114434ff6846SIoana Radulescu int new_count; 114534ff6846SIoana Radulescu 114634ff6846SIoana Radulescu if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 114734ff6846SIoana Radulescu return 0; 114834ff6846SIoana Radulescu 114934ff6846SIoana Radulescu do { 115034ff6846SIoana Radulescu new_count = add_bufs(priv, ch, bpid); 115134ff6846SIoana Radulescu if (unlikely(!new_count)) { 115234ff6846SIoana Radulescu /* Out of memory; abort for now, we'll try later on */ 115334ff6846SIoana Radulescu break; 115434ff6846SIoana Radulescu } 115534ff6846SIoana Radulescu ch->buf_count += new_count; 115634ff6846SIoana Radulescu } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 115734ff6846SIoana Radulescu 115834ff6846SIoana Radulescu if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 115934ff6846SIoana Radulescu return -ENOMEM; 116034ff6846SIoana Radulescu 116134ff6846SIoana Radulescu return 0; 116234ff6846SIoana Radulescu } 116334ff6846SIoana Radulescu 116434ff6846SIoana Radulescu static int pull_channel(struct dpaa2_eth_channel *ch) 116534ff6846SIoana Radulescu { 116634ff6846SIoana Radulescu int err; 116734ff6846SIoana Radulescu int dequeues = -1; 116834ff6846SIoana Radulescu 116934ff6846SIoana Radulescu /* Retry while portal is busy */ 117034ff6846SIoana Radulescu do { 117134ff6846SIoana Radulescu err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 117234ff6846SIoana Radulescu ch->store); 117334ff6846SIoana Radulescu dequeues++; 117434ff6846SIoana Radulescu cpu_relax(); 1175ef17bd7cSIoana Radulescu } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); 117634ff6846SIoana Radulescu 117734ff6846SIoana Radulescu ch->stats.dequeue_portal_busy += dequeues; 117834ff6846SIoana Radulescu if (unlikely(err)) 117934ff6846SIoana Radulescu ch->stats.pull_err++; 118034ff6846SIoana Radulescu 118134ff6846SIoana Radulescu return err; 118234ff6846SIoana Radulescu } 118334ff6846SIoana Radulescu 118434ff6846SIoana Radulescu /* NAPI poll routine 118534ff6846SIoana Radulescu * 118634ff6846SIoana Radulescu * Frames are dequeued from the QMan channel associated with this NAPI context. 118734ff6846SIoana Radulescu * Rx, Tx confirmation and (if configured) Rx error frames all count 118834ff6846SIoana Radulescu * towards the NAPI budget. 118934ff6846SIoana Radulescu */ 119034ff6846SIoana Radulescu static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 119134ff6846SIoana Radulescu { 119234ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 119334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 119468049a5fSIoana Ciocoi Radulescu int rx_cleaned = 0, txconf_cleaned = 0; 1195569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, *txc_fq = NULL; 1196569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 1197569dac6aSIoana Ciocoi Radulescu int store_cleaned, work_done; 11980a25d92cSIoana Ciornei struct list_head rx_list; 1199ef17bd7cSIoana Radulescu int retries = 0; 120074a1c059SIoana Ciornei u16 flowid; 120134ff6846SIoana Radulescu int err; 120234ff6846SIoana Radulescu 120334ff6846SIoana Radulescu ch = container_of(napi, struct dpaa2_eth_channel, napi); 1204d678be1dSIoana Radulescu ch->xdp.res = 0; 120534ff6846SIoana Radulescu priv = ch->priv; 120634ff6846SIoana Radulescu 12070a25d92cSIoana Ciornei INIT_LIST_HEAD(&rx_list); 12080a25d92cSIoana Ciornei ch->rx_list = &rx_list; 12090a25d92cSIoana Ciornei 121068049a5fSIoana Ciocoi Radulescu do { 121134ff6846SIoana Radulescu err = pull_channel(ch); 121234ff6846SIoana Radulescu if (unlikely(err)) 121334ff6846SIoana Radulescu break; 121434ff6846SIoana Radulescu 121534ff6846SIoana Radulescu /* Refill pool if appropriate */ 121634ff6846SIoana Radulescu refill_pool(priv, ch, priv->bpid); 121734ff6846SIoana Radulescu 1218569dac6aSIoana Ciocoi Radulescu store_cleaned = consume_frames(ch, &fq); 1219ef17bd7cSIoana Radulescu if (store_cleaned <= 0) 1220569dac6aSIoana Ciocoi Radulescu break; 1221569dac6aSIoana Ciocoi Radulescu if (fq->type == DPAA2_RX_FQ) { 122268049a5fSIoana Ciocoi Radulescu rx_cleaned += store_cleaned; 122374a1c059SIoana Ciornei flowid = fq->flowid; 1224569dac6aSIoana Ciocoi Radulescu } else { 122568049a5fSIoana Ciocoi Radulescu txconf_cleaned += store_cleaned; 1226569dac6aSIoana Ciocoi Radulescu /* We have a single Tx conf FQ on this channel */ 1227569dac6aSIoana Ciocoi Radulescu txc_fq = fq; 1228569dac6aSIoana Ciocoi Radulescu } 122934ff6846SIoana Radulescu 123068049a5fSIoana Ciocoi Radulescu /* If we either consumed the whole NAPI budget with Rx frames 123168049a5fSIoana Ciocoi Radulescu * or we reached the Tx confirmations threshold, we're done. 123234ff6846SIoana Radulescu */ 123368049a5fSIoana Ciocoi Radulescu if (rx_cleaned >= budget || 1234569dac6aSIoana Ciocoi Radulescu txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { 1235569dac6aSIoana Ciocoi Radulescu work_done = budget; 1236569dac6aSIoana Ciocoi Radulescu goto out; 1237569dac6aSIoana Ciocoi Radulescu } 123868049a5fSIoana Ciocoi Radulescu } while (store_cleaned); 123934ff6846SIoana Radulescu 124068049a5fSIoana Ciocoi Radulescu /* We didn't consume the entire budget, so finish napi and 124168049a5fSIoana Ciocoi Radulescu * re-enable data availability notifications 124268049a5fSIoana Ciocoi Radulescu */ 124368049a5fSIoana Ciocoi Radulescu napi_complete_done(napi, rx_cleaned); 124434ff6846SIoana Radulescu do { 124534ff6846SIoana Radulescu err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 124634ff6846SIoana Radulescu cpu_relax(); 1247ef17bd7cSIoana Radulescu } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); 124834ff6846SIoana Radulescu WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 124934ff6846SIoana Radulescu ch->nctx.desired_cpu); 125034ff6846SIoana Radulescu 1251569dac6aSIoana Ciocoi Radulescu work_done = max(rx_cleaned, 1); 1252569dac6aSIoana Ciocoi Radulescu 1253569dac6aSIoana Ciocoi Radulescu out: 12540a25d92cSIoana Ciornei netif_receive_skb_list(ch->rx_list); 12550a25d92cSIoana Ciornei 1256d678be1dSIoana Radulescu if (txc_fq && txc_fq->dq_frames) { 1257569dac6aSIoana Ciocoi Radulescu nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); 1258569dac6aSIoana Ciocoi Radulescu netdev_tx_completed_queue(nq, txc_fq->dq_frames, 1259569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes); 1260569dac6aSIoana Ciocoi Radulescu txc_fq->dq_frames = 0; 1261569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes = 0; 1262569dac6aSIoana Ciocoi Radulescu } 1263569dac6aSIoana Ciocoi Radulescu 1264d678be1dSIoana Radulescu if (ch->xdp.res & XDP_REDIRECT) 1265d678be1dSIoana Radulescu xdp_do_flush_map(); 126674a1c059SIoana Ciornei else if (rx_cleaned && ch->xdp.res & XDP_TX) 126774a1c059SIoana Ciornei xdp_tx_flush(priv, ch, &priv->fq[flowid]); 1268d678be1dSIoana Radulescu 1269569dac6aSIoana Ciocoi Radulescu return work_done; 127034ff6846SIoana Radulescu } 127134ff6846SIoana Radulescu 127234ff6846SIoana Radulescu static void enable_ch_napi(struct dpaa2_eth_priv *priv) 127334ff6846SIoana Radulescu { 127434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 127534ff6846SIoana Radulescu int i; 127634ff6846SIoana Radulescu 127734ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 127834ff6846SIoana Radulescu ch = priv->channel[i]; 127934ff6846SIoana Radulescu napi_enable(&ch->napi); 128034ff6846SIoana Radulescu } 128134ff6846SIoana Radulescu } 128234ff6846SIoana Radulescu 128334ff6846SIoana Radulescu static void disable_ch_napi(struct dpaa2_eth_priv *priv) 128434ff6846SIoana Radulescu { 128534ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 128634ff6846SIoana Radulescu int i; 128734ff6846SIoana Radulescu 128834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 128934ff6846SIoana Radulescu ch = priv->channel[i]; 129034ff6846SIoana Radulescu napi_disable(&ch->napi); 129134ff6846SIoana Radulescu } 129234ff6846SIoana Radulescu } 129334ff6846SIoana Radulescu 129407beb165SIoana Ciornei void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, 129507beb165SIoana Ciornei bool tx_pause, bool pfc) 12968eb3cef8SIoana Radulescu { 12978eb3cef8SIoana Radulescu struct dpni_taildrop td = {0}; 1298685e39eaSIoana Radulescu struct dpaa2_eth_fq *fq; 12998eb3cef8SIoana Radulescu int i, err; 13008eb3cef8SIoana Radulescu 130107beb165SIoana Ciornei /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if 130207beb165SIoana Ciornei * flow control is disabled (as it might interfere with either the 130307beb165SIoana Ciornei * buffer pool depletion trigger for pause frames or with the group 130407beb165SIoana Ciornei * congestion trigger for PFC frames) 130507beb165SIoana Ciornei */ 13062c8d1c8dSIoana Radulescu td.enable = !tx_pause; 130707beb165SIoana Ciornei if (priv->rx_fqtd_enabled == td.enable) 130807beb165SIoana Ciornei goto set_cgtd; 13098eb3cef8SIoana Radulescu 13102c8d1c8dSIoana Radulescu td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH; 13112c8d1c8dSIoana Radulescu td.units = DPNI_CONGESTION_UNIT_BYTES; 13128eb3cef8SIoana Radulescu 13138eb3cef8SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 1314685e39eaSIoana Radulescu fq = &priv->fq[i]; 1315685e39eaSIoana Radulescu if (fq->type != DPAA2_RX_FQ) 13168eb3cef8SIoana Radulescu continue; 13178eb3cef8SIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 1318685e39eaSIoana Radulescu DPNI_CP_QUEUE, DPNI_QUEUE_RX, 1319685e39eaSIoana Radulescu fq->tc, fq->flowid, &td); 13208eb3cef8SIoana Radulescu if (err) { 13218eb3cef8SIoana Radulescu netdev_err(priv->net_dev, 13222c8d1c8dSIoana Radulescu "dpni_set_taildrop(FQ) failed\n"); 13232c8d1c8dSIoana Radulescu return; 13248eb3cef8SIoana Radulescu } 13258eb3cef8SIoana Radulescu } 13268eb3cef8SIoana Radulescu 132707beb165SIoana Ciornei priv->rx_fqtd_enabled = td.enable; 132807beb165SIoana Ciornei 132907beb165SIoana Ciornei set_cgtd: 13302c8d1c8dSIoana Radulescu /* Congestion group taildrop: threshold is in frames, per group 13312c8d1c8dSIoana Radulescu * of FQs belonging to the same traffic class 133207beb165SIoana Ciornei * Enabled if general Tx pause disabled or if PFCs are enabled 133307beb165SIoana Ciornei * (congestion group threhsold for PFC generation is lower than the 133407beb165SIoana Ciornei * CG taildrop threshold, so it won't interfere with it; we also 133507beb165SIoana Ciornei * want frames in non-PFC enabled traffic classes to be kept in check) 13362c8d1c8dSIoana Radulescu */ 133707beb165SIoana Ciornei td.enable = !tx_pause || (tx_pause && pfc); 133807beb165SIoana Ciornei if (priv->rx_cgtd_enabled == td.enable) 133907beb165SIoana Ciornei return; 134007beb165SIoana Ciornei 13412c8d1c8dSIoana Radulescu td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv); 13422c8d1c8dSIoana Radulescu td.units = DPNI_CONGESTION_UNIT_FRAMES; 13432c8d1c8dSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 13442c8d1c8dSIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 13452c8d1c8dSIoana Radulescu DPNI_CP_GROUP, DPNI_QUEUE_RX, 13462c8d1c8dSIoana Radulescu i, 0, &td); 13472c8d1c8dSIoana Radulescu if (err) { 13482c8d1c8dSIoana Radulescu netdev_err(priv->net_dev, 13492c8d1c8dSIoana Radulescu "dpni_set_taildrop(CG) failed\n"); 13502c8d1c8dSIoana Radulescu return; 13512c8d1c8dSIoana Radulescu } 13522c8d1c8dSIoana Radulescu } 13532c8d1c8dSIoana Radulescu 135407beb165SIoana Ciornei priv->rx_cgtd_enabled = td.enable; 13558eb3cef8SIoana Radulescu } 13568eb3cef8SIoana Radulescu 135734ff6846SIoana Radulescu static int link_state_update(struct dpaa2_eth_priv *priv) 135834ff6846SIoana Radulescu { 135985b7a342SIoana Ciornei struct dpni_link_state state = {0}; 13608eb3cef8SIoana Radulescu bool tx_pause; 136134ff6846SIoana Radulescu int err; 136234ff6846SIoana Radulescu 136334ff6846SIoana Radulescu err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 136434ff6846SIoana Radulescu if (unlikely(err)) { 136534ff6846SIoana Radulescu netdev_err(priv->net_dev, 136634ff6846SIoana Radulescu "dpni_get_link_state() failed\n"); 136734ff6846SIoana Radulescu return err; 136834ff6846SIoana Radulescu } 136934ff6846SIoana Radulescu 13708eb3cef8SIoana Radulescu /* If Tx pause frame settings have changed, we need to update 13718eb3cef8SIoana Radulescu * Rx FQ taildrop configuration as well. We configure taildrop 13728eb3cef8SIoana Radulescu * only when pause frame generation is disabled. 13738eb3cef8SIoana Radulescu */ 1374ad054f26SIoana Radulescu tx_pause = dpaa2_eth_tx_pause_enabled(state.options); 137507beb165SIoana Ciornei dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled); 13768eb3cef8SIoana Radulescu 137771947923SIoana Ciornei /* When we manage the MAC/PHY using phylink there is no need 137871947923SIoana Ciornei * to manually update the netif_carrier. 137971947923SIoana Ciornei */ 138071947923SIoana Ciornei if (priv->mac) 138171947923SIoana Ciornei goto out; 138271947923SIoana Ciornei 138334ff6846SIoana Radulescu /* Chech link state; speed / duplex changes are not treated yet */ 138434ff6846SIoana Radulescu if (priv->link_state.up == state.up) 1385cce62943SIoana Radulescu goto out; 138634ff6846SIoana Radulescu 138734ff6846SIoana Radulescu if (state.up) { 138834ff6846SIoana Radulescu netif_carrier_on(priv->net_dev); 138934ff6846SIoana Radulescu netif_tx_start_all_queues(priv->net_dev); 139034ff6846SIoana Radulescu } else { 139134ff6846SIoana Radulescu netif_tx_stop_all_queues(priv->net_dev); 139234ff6846SIoana Radulescu netif_carrier_off(priv->net_dev); 139334ff6846SIoana Radulescu } 139434ff6846SIoana Radulescu 139534ff6846SIoana Radulescu netdev_info(priv->net_dev, "Link Event: state %s\n", 139634ff6846SIoana Radulescu state.up ? "up" : "down"); 139734ff6846SIoana Radulescu 1398cce62943SIoana Radulescu out: 1399cce62943SIoana Radulescu priv->link_state = state; 1400cce62943SIoana Radulescu 140134ff6846SIoana Radulescu return 0; 140234ff6846SIoana Radulescu } 140334ff6846SIoana Radulescu 140434ff6846SIoana Radulescu static int dpaa2_eth_open(struct net_device *net_dev) 140534ff6846SIoana Radulescu { 140634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 140734ff6846SIoana Radulescu int err; 140834ff6846SIoana Radulescu 140934ff6846SIoana Radulescu err = seed_pool(priv, priv->bpid); 141034ff6846SIoana Radulescu if (err) { 141134ff6846SIoana Radulescu /* Not much to do; the buffer pool, though not filled up, 141234ff6846SIoana Radulescu * may still contain some buffers which would enable us 141334ff6846SIoana Radulescu * to limp on. 141434ff6846SIoana Radulescu */ 141534ff6846SIoana Radulescu netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 141634ff6846SIoana Radulescu priv->dpbp_dev->obj_desc.id, priv->bpid); 141734ff6846SIoana Radulescu } 141834ff6846SIoana Radulescu 141971947923SIoana Ciornei if (!priv->mac) { 142071947923SIoana Ciornei /* We'll only start the txqs when the link is actually ready; 142171947923SIoana Ciornei * make sure we don't race against the link up notification, 142271947923SIoana Ciornei * which may come immediately after dpni_enable(); 142334ff6846SIoana Radulescu */ 142434ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 142571947923SIoana Ciornei 142671947923SIoana Ciornei /* Also, explicitly set carrier off, otherwise 142771947923SIoana Ciornei * netif_carrier_ok() will return true and cause 'ip link show' 142871947923SIoana Ciornei * to report the LOWER_UP flag, even though the link 142971947923SIoana Ciornei * notification wasn't even received. 143034ff6846SIoana Radulescu */ 143134ff6846SIoana Radulescu netif_carrier_off(net_dev); 143271947923SIoana Ciornei } 143371947923SIoana Ciornei enable_ch_napi(priv); 143434ff6846SIoana Radulescu 143534ff6846SIoana Radulescu err = dpni_enable(priv->mc_io, 0, priv->mc_token); 143634ff6846SIoana Radulescu if (err < 0) { 143734ff6846SIoana Radulescu netdev_err(net_dev, "dpni_enable() failed\n"); 143834ff6846SIoana Radulescu goto enable_err; 143934ff6846SIoana Radulescu } 144034ff6846SIoana Radulescu 144171947923SIoana Ciornei if (!priv->mac) { 144271947923SIoana Ciornei /* If the DPMAC object has already processed the link up 144371947923SIoana Ciornei * interrupt, we have to learn the link state ourselves. 144434ff6846SIoana Radulescu */ 144534ff6846SIoana Radulescu err = link_state_update(priv); 144634ff6846SIoana Radulescu if (err < 0) { 144734ff6846SIoana Radulescu netdev_err(net_dev, "Can't update link state\n"); 144834ff6846SIoana Radulescu goto link_state_err; 144934ff6846SIoana Radulescu } 145071947923SIoana Ciornei } else { 145171947923SIoana Ciornei phylink_start(priv->mac->phylink); 145271947923SIoana Ciornei } 145334ff6846SIoana Radulescu 145434ff6846SIoana Radulescu return 0; 145534ff6846SIoana Radulescu 145634ff6846SIoana Radulescu link_state_err: 145734ff6846SIoana Radulescu enable_err: 145834ff6846SIoana Radulescu disable_ch_napi(priv); 145934ff6846SIoana Radulescu drain_pool(priv); 146034ff6846SIoana Radulescu return err; 146134ff6846SIoana Radulescu } 146234ff6846SIoana Radulescu 146368d74315SIoana Ciocoi Radulescu /* Total number of in-flight frames on ingress queues */ 146468d74315SIoana Ciocoi Radulescu static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) 146534ff6846SIoana Radulescu { 146668d74315SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq; 146768d74315SIoana Ciocoi Radulescu u32 fcnt = 0, bcnt = 0, total = 0; 146868d74315SIoana Ciocoi Radulescu int i, err; 146934ff6846SIoana Radulescu 147068d74315SIoana Ciocoi Radulescu for (i = 0; i < priv->num_fqs; i++) { 147168d74315SIoana Ciocoi Radulescu fq = &priv->fq[i]; 147268d74315SIoana Ciocoi Radulescu err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); 147368d74315SIoana Ciocoi Radulescu if (err) { 147468d74315SIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "query_fq_count failed"); 147568d74315SIoana Ciocoi Radulescu break; 147668d74315SIoana Ciocoi Radulescu } 147768d74315SIoana Ciocoi Radulescu total += fcnt; 147868d74315SIoana Ciocoi Radulescu } 147934ff6846SIoana Radulescu 148034ff6846SIoana Radulescu return total; 148134ff6846SIoana Radulescu } 148234ff6846SIoana Radulescu 148352b6a4ffSIoana Radulescu static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) 148434ff6846SIoana Radulescu { 148568d74315SIoana Ciocoi Radulescu int retries = 10; 148668d74315SIoana Ciocoi Radulescu u32 pending; 148734ff6846SIoana Radulescu 148868d74315SIoana Ciocoi Radulescu do { 148968d74315SIoana Ciocoi Radulescu pending = ingress_fq_count(priv); 149068d74315SIoana Ciocoi Radulescu if (pending) 149168d74315SIoana Ciocoi Radulescu msleep(100); 149268d74315SIoana Ciocoi Radulescu } while (pending && --retries); 149334ff6846SIoana Radulescu } 149434ff6846SIoana Radulescu 149552b6a4ffSIoana Radulescu #define DPNI_TX_PENDING_VER_MAJOR 7 149652b6a4ffSIoana Radulescu #define DPNI_TX_PENDING_VER_MINOR 13 149752b6a4ffSIoana Radulescu static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) 149852b6a4ffSIoana Radulescu { 149952b6a4ffSIoana Radulescu union dpni_statistics stats; 150052b6a4ffSIoana Radulescu int retries = 10; 150152b6a4ffSIoana Radulescu int err; 150252b6a4ffSIoana Radulescu 150352b6a4ffSIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, 150452b6a4ffSIoana Radulescu DPNI_TX_PENDING_VER_MINOR) < 0) 150552b6a4ffSIoana Radulescu goto out; 150652b6a4ffSIoana Radulescu 150752b6a4ffSIoana Radulescu do { 150852b6a4ffSIoana Radulescu err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, 150952b6a4ffSIoana Radulescu &stats); 151052b6a4ffSIoana Radulescu if (err) 151152b6a4ffSIoana Radulescu goto out; 151252b6a4ffSIoana Radulescu if (stats.page_6.tx_pending_frames == 0) 151352b6a4ffSIoana Radulescu return; 151452b6a4ffSIoana Radulescu } while (--retries); 151552b6a4ffSIoana Radulescu 151652b6a4ffSIoana Radulescu out: 151752b6a4ffSIoana Radulescu msleep(500); 151852b6a4ffSIoana Radulescu } 151952b6a4ffSIoana Radulescu 152034ff6846SIoana Radulescu static int dpaa2_eth_stop(struct net_device *net_dev) 152134ff6846SIoana Radulescu { 152234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 152385b7a342SIoana Ciornei int dpni_enabled = 0; 152434ff6846SIoana Radulescu int retries = 10; 152534ff6846SIoana Radulescu 152671947923SIoana Ciornei if (!priv->mac) { 152734ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 152834ff6846SIoana Radulescu netif_carrier_off(net_dev); 152971947923SIoana Ciornei } else { 153071947923SIoana Ciornei phylink_stop(priv->mac->phylink); 153171947923SIoana Ciornei } 153234ff6846SIoana Radulescu 153368d74315SIoana Ciocoi Radulescu /* On dpni_disable(), the MC firmware will: 153468d74315SIoana Ciocoi Radulescu * - stop MAC Rx and wait for all Rx frames to be enqueued to software 153568d74315SIoana Ciocoi Radulescu * - cut off WRIOP dequeues from egress FQs and wait until transmission 153668d74315SIoana Ciocoi Radulescu * of all in flight Tx frames is finished (and corresponding Tx conf 153768d74315SIoana Ciocoi Radulescu * frames are enqueued back to software) 153868d74315SIoana Ciocoi Radulescu * 153968d74315SIoana Ciocoi Radulescu * Before calling dpni_disable(), we wait for all Tx frames to arrive 154068d74315SIoana Ciocoi Radulescu * on WRIOP. After it finishes, wait until all remaining frames on Rx 154168d74315SIoana Ciocoi Radulescu * and Tx conf queues are consumed on NAPI poll. 154234ff6846SIoana Radulescu */ 154352b6a4ffSIoana Radulescu wait_for_egress_fq_empty(priv); 154468d74315SIoana Ciocoi Radulescu 154534ff6846SIoana Radulescu do { 154634ff6846SIoana Radulescu dpni_disable(priv->mc_io, 0, priv->mc_token); 154734ff6846SIoana Radulescu dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 154834ff6846SIoana Radulescu if (dpni_enabled) 154934ff6846SIoana Radulescu /* Allow the hardware some slack */ 155034ff6846SIoana Radulescu msleep(100); 155134ff6846SIoana Radulescu } while (dpni_enabled && --retries); 155234ff6846SIoana Radulescu if (!retries) { 155334ff6846SIoana Radulescu netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 155434ff6846SIoana Radulescu /* Must go on and disable NAPI nonetheless, so we don't crash at 155534ff6846SIoana Radulescu * the next "ifconfig up" 155634ff6846SIoana Radulescu */ 155734ff6846SIoana Radulescu } 155834ff6846SIoana Radulescu 155952b6a4ffSIoana Radulescu wait_for_ingress_fq_empty(priv); 156034ff6846SIoana Radulescu disable_ch_napi(priv); 156134ff6846SIoana Radulescu 156234ff6846SIoana Radulescu /* Empty the buffer pool */ 156334ff6846SIoana Radulescu drain_pool(priv); 156434ff6846SIoana Radulescu 156534ff6846SIoana Radulescu return 0; 156634ff6846SIoana Radulescu } 156734ff6846SIoana Radulescu 156834ff6846SIoana Radulescu static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 156934ff6846SIoana Radulescu { 157034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 157134ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 157234ff6846SIoana Radulescu int err; 157334ff6846SIoana Radulescu 157434ff6846SIoana Radulescu err = eth_mac_addr(net_dev, addr); 157534ff6846SIoana Radulescu if (err < 0) { 157634ff6846SIoana Radulescu dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 157734ff6846SIoana Radulescu return err; 157834ff6846SIoana Radulescu } 157934ff6846SIoana Radulescu 158034ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 158134ff6846SIoana Radulescu net_dev->dev_addr); 158234ff6846SIoana Radulescu if (err) { 158334ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 158434ff6846SIoana Radulescu return err; 158534ff6846SIoana Radulescu } 158634ff6846SIoana Radulescu 158734ff6846SIoana Radulescu return 0; 158834ff6846SIoana Radulescu } 158934ff6846SIoana Radulescu 159034ff6846SIoana Radulescu /** Fill in counters maintained by the GPP driver. These may be different from 159134ff6846SIoana Radulescu * the hardware counters obtained by ethtool. 159234ff6846SIoana Radulescu */ 159334ff6846SIoana Radulescu static void dpaa2_eth_get_stats(struct net_device *net_dev, 159434ff6846SIoana Radulescu struct rtnl_link_stats64 *stats) 159534ff6846SIoana Radulescu { 159634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 159734ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 159834ff6846SIoana Radulescu u64 *cpustats; 159934ff6846SIoana Radulescu u64 *netstats = (u64 *)stats; 160034ff6846SIoana Radulescu int i, j; 160134ff6846SIoana Radulescu int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 160234ff6846SIoana Radulescu 160334ff6846SIoana Radulescu for_each_possible_cpu(i) { 160434ff6846SIoana Radulescu percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 160534ff6846SIoana Radulescu cpustats = (u64 *)percpu_stats; 160634ff6846SIoana Radulescu for (j = 0; j < num; j++) 160734ff6846SIoana Radulescu netstats[j] += cpustats[j]; 160834ff6846SIoana Radulescu } 160934ff6846SIoana Radulescu } 161034ff6846SIoana Radulescu 161134ff6846SIoana Radulescu /* Copy mac unicast addresses from @net_dev to @priv. 161234ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 161334ff6846SIoana Radulescu */ 161434ff6846SIoana Radulescu static void add_uc_hw_addr(const struct net_device *net_dev, 161534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 161634ff6846SIoana Radulescu { 161734ff6846SIoana Radulescu struct netdev_hw_addr *ha; 161834ff6846SIoana Radulescu int err; 161934ff6846SIoana Radulescu 162034ff6846SIoana Radulescu netdev_for_each_uc_addr(ha, net_dev) { 162134ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 162234ff6846SIoana Radulescu ha->addr); 162334ff6846SIoana Radulescu if (err) 162434ff6846SIoana Radulescu netdev_warn(priv->net_dev, 162534ff6846SIoana Radulescu "Could not add ucast MAC %pM to the filtering table (err %d)\n", 162634ff6846SIoana Radulescu ha->addr, err); 162734ff6846SIoana Radulescu } 162834ff6846SIoana Radulescu } 162934ff6846SIoana Radulescu 163034ff6846SIoana Radulescu /* Copy mac multicast addresses from @net_dev to @priv 163134ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 163234ff6846SIoana Radulescu */ 163334ff6846SIoana Radulescu static void add_mc_hw_addr(const struct net_device *net_dev, 163434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 163534ff6846SIoana Radulescu { 163634ff6846SIoana Radulescu struct netdev_hw_addr *ha; 163734ff6846SIoana Radulescu int err; 163834ff6846SIoana Radulescu 163934ff6846SIoana Radulescu netdev_for_each_mc_addr(ha, net_dev) { 164034ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 164134ff6846SIoana Radulescu ha->addr); 164234ff6846SIoana Radulescu if (err) 164334ff6846SIoana Radulescu netdev_warn(priv->net_dev, 164434ff6846SIoana Radulescu "Could not add mcast MAC %pM to the filtering table (err %d)\n", 164534ff6846SIoana Radulescu ha->addr, err); 164634ff6846SIoana Radulescu } 164734ff6846SIoana Radulescu } 164834ff6846SIoana Radulescu 164934ff6846SIoana Radulescu static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 165034ff6846SIoana Radulescu { 165134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 165234ff6846SIoana Radulescu int uc_count = netdev_uc_count(net_dev); 165334ff6846SIoana Radulescu int mc_count = netdev_mc_count(net_dev); 165434ff6846SIoana Radulescu u8 max_mac = priv->dpni_attrs.mac_filter_entries; 165534ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 165634ff6846SIoana Radulescu u16 mc_token = priv->mc_token; 165734ff6846SIoana Radulescu struct fsl_mc_io *mc_io = priv->mc_io; 165834ff6846SIoana Radulescu int err; 165934ff6846SIoana Radulescu 166034ff6846SIoana Radulescu /* Basic sanity checks; these probably indicate a misconfiguration */ 166134ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 166234ff6846SIoana Radulescu netdev_info(net_dev, 166334ff6846SIoana Radulescu "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 166434ff6846SIoana Radulescu max_mac); 166534ff6846SIoana Radulescu 166634ff6846SIoana Radulescu /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 166734ff6846SIoana Radulescu if (uc_count > max_mac) { 166834ff6846SIoana Radulescu netdev_info(net_dev, 166934ff6846SIoana Radulescu "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 167034ff6846SIoana Radulescu uc_count, max_mac); 167134ff6846SIoana Radulescu goto force_promisc; 167234ff6846SIoana Radulescu } 167334ff6846SIoana Radulescu if (mc_count + uc_count > max_mac) { 167434ff6846SIoana Radulescu netdev_info(net_dev, 167534ff6846SIoana Radulescu "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 167634ff6846SIoana Radulescu uc_count + mc_count, max_mac); 167734ff6846SIoana Radulescu goto force_mc_promisc; 167834ff6846SIoana Radulescu } 167934ff6846SIoana Radulescu 168034ff6846SIoana Radulescu /* Adjust promisc settings due to flag combinations */ 168134ff6846SIoana Radulescu if (net_dev->flags & IFF_PROMISC) 168234ff6846SIoana Radulescu goto force_promisc; 168334ff6846SIoana Radulescu if (net_dev->flags & IFF_ALLMULTI) { 168434ff6846SIoana Radulescu /* First, rebuild unicast filtering table. This should be done 168534ff6846SIoana Radulescu * in promisc mode, in order to avoid frame loss while we 168634ff6846SIoana Radulescu * progressively add entries to the table. 168734ff6846SIoana Radulescu * We don't know whether we had been in promisc already, and 168834ff6846SIoana Radulescu * making an MC call to find out is expensive; so set uc promisc 168934ff6846SIoana Radulescu * nonetheless. 169034ff6846SIoana Radulescu */ 169134ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 169234ff6846SIoana Radulescu if (err) 169334ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc\n"); 169434ff6846SIoana Radulescu 169534ff6846SIoana Radulescu /* Actual uc table reconstruction. */ 169634ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 169734ff6846SIoana Radulescu if (err) 169834ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc filters\n"); 169934ff6846SIoana Radulescu add_uc_hw_addr(net_dev, priv); 170034ff6846SIoana Radulescu 170134ff6846SIoana Radulescu /* Finally, clear uc promisc and set mc promisc as requested. */ 170234ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 170334ff6846SIoana Radulescu if (err) 170434ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc promisc\n"); 170534ff6846SIoana Radulescu goto force_mc_promisc; 170634ff6846SIoana Radulescu } 170734ff6846SIoana Radulescu 170834ff6846SIoana Radulescu /* Neither unicast, nor multicast promisc will be on... eventually. 170934ff6846SIoana Radulescu * For now, rebuild mac filtering tables while forcing both of them on. 171034ff6846SIoana Radulescu */ 171134ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 171234ff6846SIoana Radulescu if (err) 171334ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 171434ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 171534ff6846SIoana Radulescu if (err) 171634ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 171734ff6846SIoana Radulescu 171834ff6846SIoana Radulescu /* Actual mac filtering tables reconstruction */ 171934ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 172034ff6846SIoana Radulescu if (err) 172134ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mac filters\n"); 172234ff6846SIoana Radulescu add_mc_hw_addr(net_dev, priv); 172334ff6846SIoana Radulescu add_uc_hw_addr(net_dev, priv); 172434ff6846SIoana Radulescu 172534ff6846SIoana Radulescu /* Now we can clear both ucast and mcast promisc, without risking 172634ff6846SIoana Radulescu * to drop legitimate frames anymore. 172734ff6846SIoana Radulescu */ 172834ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 172934ff6846SIoana Radulescu if (err) 173034ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear ucast promisc\n"); 173134ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 173234ff6846SIoana Radulescu if (err) 173334ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mcast promisc\n"); 173434ff6846SIoana Radulescu 173534ff6846SIoana Radulescu return; 173634ff6846SIoana Radulescu 173734ff6846SIoana Radulescu force_promisc: 173834ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 173934ff6846SIoana Radulescu if (err) 174034ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set ucast promisc\n"); 174134ff6846SIoana Radulescu force_mc_promisc: 174234ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 174334ff6846SIoana Radulescu if (err) 174434ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mcast promisc\n"); 174534ff6846SIoana Radulescu } 174634ff6846SIoana Radulescu 174734ff6846SIoana Radulescu static int dpaa2_eth_set_features(struct net_device *net_dev, 174834ff6846SIoana Radulescu netdev_features_t features) 174934ff6846SIoana Radulescu { 175034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 175134ff6846SIoana Radulescu netdev_features_t changed = features ^ net_dev->features; 175234ff6846SIoana Radulescu bool enable; 175334ff6846SIoana Radulescu int err; 175434ff6846SIoana Radulescu 175534ff6846SIoana Radulescu if (changed & NETIF_F_RXCSUM) { 175634ff6846SIoana Radulescu enable = !!(features & NETIF_F_RXCSUM); 175734ff6846SIoana Radulescu err = set_rx_csum(priv, enable); 175834ff6846SIoana Radulescu if (err) 175934ff6846SIoana Radulescu return err; 176034ff6846SIoana Radulescu } 176134ff6846SIoana Radulescu 176234ff6846SIoana Radulescu if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 176334ff6846SIoana Radulescu enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 176434ff6846SIoana Radulescu err = set_tx_csum(priv, enable); 176534ff6846SIoana Radulescu if (err) 176634ff6846SIoana Radulescu return err; 176734ff6846SIoana Radulescu } 176834ff6846SIoana Radulescu 176934ff6846SIoana Radulescu return 0; 177034ff6846SIoana Radulescu } 177134ff6846SIoana Radulescu 177234ff6846SIoana Radulescu static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 177334ff6846SIoana Radulescu { 177434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 177534ff6846SIoana Radulescu struct hwtstamp_config config; 177634ff6846SIoana Radulescu 177734ff6846SIoana Radulescu if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 177834ff6846SIoana Radulescu return -EFAULT; 177934ff6846SIoana Radulescu 178034ff6846SIoana Radulescu switch (config.tx_type) { 178134ff6846SIoana Radulescu case HWTSTAMP_TX_OFF: 178234ff6846SIoana Radulescu priv->tx_tstamp = false; 178334ff6846SIoana Radulescu break; 178434ff6846SIoana Radulescu case HWTSTAMP_TX_ON: 178534ff6846SIoana Radulescu priv->tx_tstamp = true; 178634ff6846SIoana Radulescu break; 178734ff6846SIoana Radulescu default: 178834ff6846SIoana Radulescu return -ERANGE; 178934ff6846SIoana Radulescu } 179034ff6846SIoana Radulescu 179134ff6846SIoana Radulescu if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 179234ff6846SIoana Radulescu priv->rx_tstamp = false; 179334ff6846SIoana Radulescu } else { 179434ff6846SIoana Radulescu priv->rx_tstamp = true; 179534ff6846SIoana Radulescu /* TS is set for all frame types, not only those requested */ 179634ff6846SIoana Radulescu config.rx_filter = HWTSTAMP_FILTER_ALL; 179734ff6846SIoana Radulescu } 179834ff6846SIoana Radulescu 179934ff6846SIoana Radulescu return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 180034ff6846SIoana Radulescu -EFAULT : 0; 180134ff6846SIoana Radulescu } 180234ff6846SIoana Radulescu 180334ff6846SIoana Radulescu static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 180434ff6846SIoana Radulescu { 18054a84182aSRussell King struct dpaa2_eth_priv *priv = netdev_priv(dev); 18064a84182aSRussell King 180734ff6846SIoana Radulescu if (cmd == SIOCSHWTSTAMP) 180834ff6846SIoana Radulescu return dpaa2_eth_ts_ioctl(dev, rq, cmd); 180934ff6846SIoana Radulescu 18104a84182aSRussell King if (priv->mac) 18114a84182aSRussell King return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); 18124a84182aSRussell King 18134a84182aSRussell King return -EOPNOTSUPP; 181434ff6846SIoana Radulescu } 181534ff6846SIoana Radulescu 18167e273a8eSIoana Ciocoi Radulescu static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) 18177e273a8eSIoana Ciocoi Radulescu { 18187e273a8eSIoana Ciocoi Radulescu int mfl, linear_mfl; 18197e273a8eSIoana Ciocoi Radulescu 18207e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 1821efa6a7d0SIoana Ciornei linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - 18227b1eea1aSIoana Ciocoi Radulescu dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; 18237e273a8eSIoana Ciocoi Radulescu 18247e273a8eSIoana Ciocoi Radulescu if (mfl > linear_mfl) { 18257e273a8eSIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", 18267e273a8eSIoana Ciocoi Radulescu linear_mfl - VLAN_ETH_HLEN); 18277e273a8eSIoana Ciocoi Radulescu return false; 18287e273a8eSIoana Ciocoi Radulescu } 18297e273a8eSIoana Ciocoi Radulescu 18307e273a8eSIoana Ciocoi Radulescu return true; 18317e273a8eSIoana Ciocoi Radulescu } 18327e273a8eSIoana Ciocoi Radulescu 18337e273a8eSIoana Ciocoi Radulescu static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) 18347e273a8eSIoana Ciocoi Radulescu { 18357e273a8eSIoana Ciocoi Radulescu int mfl, err; 18367e273a8eSIoana Ciocoi Radulescu 18377e273a8eSIoana Ciocoi Radulescu /* We enforce a maximum Rx frame length based on MTU only if we have 18387e273a8eSIoana Ciocoi Radulescu * an XDP program attached (in order to avoid Rx S/G frames). 18397e273a8eSIoana Ciocoi Radulescu * Otherwise, we accept all incoming frames as long as they are not 18407e273a8eSIoana Ciocoi Radulescu * larger than maximum size supported in hardware 18417e273a8eSIoana Ciocoi Radulescu */ 18427e273a8eSIoana Ciocoi Radulescu if (has_xdp) 18437e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 18447e273a8eSIoana Ciocoi Radulescu else 18457e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_MFL; 18467e273a8eSIoana Ciocoi Radulescu 18477e273a8eSIoana Ciocoi Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); 18487e273a8eSIoana Ciocoi Radulescu if (err) { 18497e273a8eSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); 18507e273a8eSIoana Ciocoi Radulescu return err; 18517e273a8eSIoana Ciocoi Radulescu } 18527e273a8eSIoana Ciocoi Radulescu 18537e273a8eSIoana Ciocoi Radulescu return 0; 18547e273a8eSIoana Ciocoi Radulescu } 18557e273a8eSIoana Ciocoi Radulescu 18567e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) 18577e273a8eSIoana Ciocoi Radulescu { 18587e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 18597e273a8eSIoana Ciocoi Radulescu int err; 18607e273a8eSIoana Ciocoi Radulescu 18617e273a8eSIoana Ciocoi Radulescu if (!priv->xdp_prog) 18627e273a8eSIoana Ciocoi Radulescu goto out; 18637e273a8eSIoana Ciocoi Radulescu 18647e273a8eSIoana Ciocoi Radulescu if (!xdp_mtu_valid(priv, new_mtu)) 18657e273a8eSIoana Ciocoi Radulescu return -EINVAL; 18667e273a8eSIoana Ciocoi Radulescu 18677e273a8eSIoana Ciocoi Radulescu err = set_rx_mfl(priv, new_mtu, true); 18687e273a8eSIoana Ciocoi Radulescu if (err) 18697e273a8eSIoana Ciocoi Radulescu return err; 18707e273a8eSIoana Ciocoi Radulescu 18717e273a8eSIoana Ciocoi Radulescu out: 18727e273a8eSIoana Ciocoi Radulescu dev->mtu = new_mtu; 18737e273a8eSIoana Ciocoi Radulescu return 0; 18747e273a8eSIoana Ciocoi Radulescu } 18757e273a8eSIoana Ciocoi Radulescu 18767b1eea1aSIoana Ciocoi Radulescu static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) 18777b1eea1aSIoana Ciocoi Radulescu { 18787b1eea1aSIoana Ciocoi Radulescu struct dpni_buffer_layout buf_layout = {0}; 18797b1eea1aSIoana Ciocoi Radulescu int err; 18807b1eea1aSIoana Ciocoi Radulescu 18817b1eea1aSIoana Ciocoi Radulescu err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, 18827b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 18837b1eea1aSIoana Ciocoi Radulescu if (err) { 18847b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); 18857b1eea1aSIoana Ciocoi Radulescu return err; 18867b1eea1aSIoana Ciocoi Radulescu } 18877b1eea1aSIoana Ciocoi Radulescu 18887b1eea1aSIoana Ciocoi Radulescu /* Reserve extra headroom for XDP header size changes */ 18897b1eea1aSIoana Ciocoi Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + 18907b1eea1aSIoana Ciocoi Radulescu (has_xdp ? XDP_PACKET_HEADROOM : 0); 18917b1eea1aSIoana Ciocoi Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; 18927b1eea1aSIoana Ciocoi Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 18937b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 18947b1eea1aSIoana Ciocoi Radulescu if (err) { 18957b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); 18967b1eea1aSIoana Ciocoi Radulescu return err; 18977b1eea1aSIoana Ciocoi Radulescu } 18987b1eea1aSIoana Ciocoi Radulescu 18997b1eea1aSIoana Ciocoi Radulescu return 0; 19007b1eea1aSIoana Ciocoi Radulescu } 19017b1eea1aSIoana Ciocoi Radulescu 19027e273a8eSIoana Ciocoi Radulescu static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) 19037e273a8eSIoana Ciocoi Radulescu { 19047e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 19057e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch; 19067e273a8eSIoana Ciocoi Radulescu struct bpf_prog *old; 19077e273a8eSIoana Ciocoi Radulescu bool up, need_update; 19087e273a8eSIoana Ciocoi Radulescu int i, err; 19097e273a8eSIoana Ciocoi Radulescu 19107e273a8eSIoana Ciocoi Radulescu if (prog && !xdp_mtu_valid(priv, dev->mtu)) 19117e273a8eSIoana Ciocoi Radulescu return -EINVAL; 19127e273a8eSIoana Ciocoi Radulescu 191385192dbfSAndrii Nakryiko if (prog) 191485192dbfSAndrii Nakryiko bpf_prog_add(prog, priv->num_channels); 19157e273a8eSIoana Ciocoi Radulescu 19167e273a8eSIoana Ciocoi Radulescu up = netif_running(dev); 19177e273a8eSIoana Ciocoi Radulescu need_update = (!!priv->xdp_prog != !!prog); 19187e273a8eSIoana Ciocoi Radulescu 19197e273a8eSIoana Ciocoi Radulescu if (up) 19207e273a8eSIoana Ciocoi Radulescu dpaa2_eth_stop(dev); 19217e273a8eSIoana Ciocoi Radulescu 19227b1eea1aSIoana Ciocoi Radulescu /* While in xdp mode, enforce a maximum Rx frame size based on MTU. 19237b1eea1aSIoana Ciocoi Radulescu * Also, when switching between xdp/non-xdp modes we need to reconfigure 19247b1eea1aSIoana Ciocoi Radulescu * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, 19257b1eea1aSIoana Ciocoi Radulescu * so we are sure no old format buffers will be used from now on. 19267b1eea1aSIoana Ciocoi Radulescu */ 19277e273a8eSIoana Ciocoi Radulescu if (need_update) { 19287e273a8eSIoana Ciocoi Radulescu err = set_rx_mfl(priv, dev->mtu, !!prog); 19297e273a8eSIoana Ciocoi Radulescu if (err) 19307e273a8eSIoana Ciocoi Radulescu goto out_err; 19317b1eea1aSIoana Ciocoi Radulescu err = update_rx_buffer_headroom(priv, !!prog); 19327b1eea1aSIoana Ciocoi Radulescu if (err) 19337b1eea1aSIoana Ciocoi Radulescu goto out_err; 19347e273a8eSIoana Ciocoi Radulescu } 19357e273a8eSIoana Ciocoi Radulescu 19367e273a8eSIoana Ciocoi Radulescu old = xchg(&priv->xdp_prog, prog); 19377e273a8eSIoana Ciocoi Radulescu if (old) 19387e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 19397e273a8eSIoana Ciocoi Radulescu 19407e273a8eSIoana Ciocoi Radulescu for (i = 0; i < priv->num_channels; i++) { 19417e273a8eSIoana Ciocoi Radulescu ch = priv->channel[i]; 19427e273a8eSIoana Ciocoi Radulescu old = xchg(&ch->xdp.prog, prog); 19437e273a8eSIoana Ciocoi Radulescu if (old) 19447e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 19457e273a8eSIoana Ciocoi Radulescu } 19467e273a8eSIoana Ciocoi Radulescu 19477e273a8eSIoana Ciocoi Radulescu if (up) { 19487e273a8eSIoana Ciocoi Radulescu err = dpaa2_eth_open(dev); 19497e273a8eSIoana Ciocoi Radulescu if (err) 19507e273a8eSIoana Ciocoi Radulescu return err; 19517e273a8eSIoana Ciocoi Radulescu } 19527e273a8eSIoana Ciocoi Radulescu 19537e273a8eSIoana Ciocoi Radulescu return 0; 19547e273a8eSIoana Ciocoi Radulescu 19557e273a8eSIoana Ciocoi Radulescu out_err: 19567e273a8eSIoana Ciocoi Radulescu if (prog) 19577e273a8eSIoana Ciocoi Radulescu bpf_prog_sub(prog, priv->num_channels); 19587e273a8eSIoana Ciocoi Radulescu if (up) 19597e273a8eSIoana Ciocoi Radulescu dpaa2_eth_open(dev); 19607e273a8eSIoana Ciocoi Radulescu 19617e273a8eSIoana Ciocoi Radulescu return err; 19627e273a8eSIoana Ciocoi Radulescu } 19637e273a8eSIoana Ciocoi Radulescu 19647e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) 19657e273a8eSIoana Ciocoi Radulescu { 19667e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 19677e273a8eSIoana Ciocoi Radulescu 19687e273a8eSIoana Ciocoi Radulescu switch (xdp->command) { 19697e273a8eSIoana Ciocoi Radulescu case XDP_SETUP_PROG: 19707e273a8eSIoana Ciocoi Radulescu return setup_xdp(dev, xdp->prog); 19717e273a8eSIoana Ciocoi Radulescu case XDP_QUERY_PROG: 19727e273a8eSIoana Ciocoi Radulescu xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; 19737e273a8eSIoana Ciocoi Radulescu break; 19747e273a8eSIoana Ciocoi Radulescu default: 19757e273a8eSIoana Ciocoi Radulescu return -EINVAL; 19767e273a8eSIoana Ciocoi Radulescu } 19777e273a8eSIoana Ciocoi Radulescu 19787e273a8eSIoana Ciocoi Radulescu return 0; 19797e273a8eSIoana Ciocoi Radulescu } 19807e273a8eSIoana Ciocoi Radulescu 19816aa40b9eSIoana Ciornei static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev, 19826aa40b9eSIoana Ciornei struct xdp_frame *xdpf, 19836aa40b9eSIoana Ciornei struct dpaa2_fd *fd) 1984d678be1dSIoana Radulescu { 1985d678be1dSIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1986d678be1dSIoana Radulescu struct device *dev = net_dev->dev.parent; 1987d678be1dSIoana Radulescu unsigned int needed_headroom; 1988d678be1dSIoana Radulescu struct dpaa2_eth_swa *swa; 1989d678be1dSIoana Radulescu void *buffer_start, *aligned_start; 1990d678be1dSIoana Radulescu dma_addr_t addr; 1991d678be1dSIoana Radulescu 1992d678be1dSIoana Radulescu /* We require a minimum headroom to be able to transmit the frame. 1993d678be1dSIoana Radulescu * Otherwise return an error and let the original net_device handle it 1994d678be1dSIoana Radulescu */ 1995d678be1dSIoana Radulescu needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); 1996d678be1dSIoana Radulescu if (xdpf->headroom < needed_headroom) 1997d678be1dSIoana Radulescu return -EINVAL; 1998d678be1dSIoana Radulescu 1999d678be1dSIoana Radulescu /* Setup the FD fields */ 20006aa40b9eSIoana Ciornei memset(fd, 0, sizeof(*fd)); 2001d678be1dSIoana Radulescu 2002d678be1dSIoana Radulescu /* Align FD address, if possible */ 2003d678be1dSIoana Radulescu buffer_start = xdpf->data - needed_headroom; 2004d678be1dSIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 2005d678be1dSIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 2006d678be1dSIoana Radulescu if (aligned_start >= xdpf->data - xdpf->headroom) 2007d678be1dSIoana Radulescu buffer_start = aligned_start; 2008d678be1dSIoana Radulescu 2009d678be1dSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 2010d678be1dSIoana Radulescu /* fill in necessary fields here */ 2011d678be1dSIoana Radulescu swa->type = DPAA2_ETH_SWA_XDP; 2012d678be1dSIoana Radulescu swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; 2013d678be1dSIoana Radulescu swa->xdp.xdpf = xdpf; 2014d678be1dSIoana Radulescu 2015d678be1dSIoana Radulescu addr = dma_map_single(dev, buffer_start, 2016d678be1dSIoana Radulescu swa->xdp.dma_size, 2017d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 20186aa40b9eSIoana Ciornei if (unlikely(dma_mapping_error(dev, addr))) 2019d678be1dSIoana Radulescu return -ENOMEM; 2020d678be1dSIoana Radulescu 20216aa40b9eSIoana Ciornei dpaa2_fd_set_addr(fd, addr); 20226aa40b9eSIoana Ciornei dpaa2_fd_set_offset(fd, xdpf->data - buffer_start); 20236aa40b9eSIoana Ciornei dpaa2_fd_set_len(fd, xdpf->len); 20246aa40b9eSIoana Ciornei dpaa2_fd_set_format(fd, dpaa2_fd_single); 20256aa40b9eSIoana Ciornei dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 2026d678be1dSIoana Radulescu 2027d678be1dSIoana Radulescu return 0; 2028d678be1dSIoana Radulescu } 2029d678be1dSIoana Radulescu 2030d678be1dSIoana Radulescu static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, 2031d678be1dSIoana Radulescu struct xdp_frame **frames, u32 flags) 2032d678be1dSIoana Radulescu { 20336aa40b9eSIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 203438c440b2SIoana Ciornei struct dpaa2_eth_xdp_fds *xdp_redirect_fds; 20356aa40b9eSIoana Ciornei struct rtnl_link_stats64 *percpu_stats; 20366aa40b9eSIoana Ciornei struct dpaa2_eth_fq *fq; 20378665d978SIoana Ciornei struct dpaa2_fd *fds; 203838c440b2SIoana Ciornei int enqueued, i, err; 2039d678be1dSIoana Radulescu 2040d678be1dSIoana Radulescu if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2041d678be1dSIoana Radulescu return -EINVAL; 2042d678be1dSIoana Radulescu 2043d678be1dSIoana Radulescu if (!netif_running(net_dev)) 2044d678be1dSIoana Radulescu return -ENETDOWN; 2045d678be1dSIoana Radulescu 20468665d978SIoana Ciornei fq = &priv->fq[smp_processor_id()]; 204738c440b2SIoana Ciornei xdp_redirect_fds = &fq->xdp_redirect_fds; 204838c440b2SIoana Ciornei fds = xdp_redirect_fds->fds; 20498665d978SIoana Ciornei 20506aa40b9eSIoana Ciornei percpu_stats = this_cpu_ptr(priv->percpu_stats); 20516aa40b9eSIoana Ciornei 20528665d978SIoana Ciornei /* create a FD for each xdp_frame in the list received */ 2053d678be1dSIoana Radulescu for (i = 0; i < n; i++) { 20548665d978SIoana Ciornei err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]); 20558665d978SIoana Ciornei if (err) 20566aa40b9eSIoana Ciornei break; 20576aa40b9eSIoana Ciornei } 205838c440b2SIoana Ciornei xdp_redirect_fds->num = i; 20596aa40b9eSIoana Ciornei 206038c440b2SIoana Ciornei /* enqueue all the frame descriptors */ 206138c440b2SIoana Ciornei enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds); 2062d678be1dSIoana Radulescu 20638665d978SIoana Ciornei /* update statistics */ 206438c440b2SIoana Ciornei percpu_stats->tx_packets += enqueued; 206538c440b2SIoana Ciornei for (i = 0; i < enqueued; i++) 20668665d978SIoana Ciornei percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 206738c440b2SIoana Ciornei for (i = enqueued; i < n; i++) 20688665d978SIoana Ciornei xdp_return_frame_rx_napi(frames[i]); 20698665d978SIoana Ciornei 207038c440b2SIoana Ciornei return enqueued; 2071d678be1dSIoana Radulescu } 2072d678be1dSIoana Radulescu 207306d5b179SIoana Radulescu static int update_xps(struct dpaa2_eth_priv *priv) 207406d5b179SIoana Radulescu { 207506d5b179SIoana Radulescu struct net_device *net_dev = priv->net_dev; 207606d5b179SIoana Radulescu struct cpumask xps_mask; 207706d5b179SIoana Radulescu struct dpaa2_eth_fq *fq; 2078ab1e6de2SIoana Radulescu int i, num_queues, netdev_queues; 207906d5b179SIoana Radulescu int err = 0; 208006d5b179SIoana Radulescu 208106d5b179SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 2082ab1e6de2SIoana Radulescu netdev_queues = (net_dev->num_tc ? : 1) * num_queues; 208306d5b179SIoana Radulescu 208406d5b179SIoana Radulescu /* The first <num_queues> entries in priv->fq array are Tx/Tx conf 208506d5b179SIoana Radulescu * queues, so only process those 208606d5b179SIoana Radulescu */ 2087ab1e6de2SIoana Radulescu for (i = 0; i < netdev_queues; i++) { 2088ab1e6de2SIoana Radulescu fq = &priv->fq[i % num_queues]; 208906d5b179SIoana Radulescu 209006d5b179SIoana Radulescu cpumask_clear(&xps_mask); 209106d5b179SIoana Radulescu cpumask_set_cpu(fq->target_cpu, &xps_mask); 209206d5b179SIoana Radulescu 209306d5b179SIoana Radulescu err = netif_set_xps_queue(net_dev, &xps_mask, i); 209406d5b179SIoana Radulescu if (err) { 209506d5b179SIoana Radulescu netdev_warn_once(net_dev, "Error setting XPS queue\n"); 209606d5b179SIoana Radulescu break; 209706d5b179SIoana Radulescu } 209806d5b179SIoana Radulescu } 209906d5b179SIoana Radulescu 210006d5b179SIoana Radulescu return err; 210106d5b179SIoana Radulescu } 210206d5b179SIoana Radulescu 2103ab1e6de2SIoana Radulescu static int dpaa2_eth_setup_tc(struct net_device *net_dev, 2104ab1e6de2SIoana Radulescu enum tc_setup_type type, void *type_data) 2105ab1e6de2SIoana Radulescu { 2106ab1e6de2SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2107ab1e6de2SIoana Radulescu struct tc_mqprio_qopt *mqprio = type_data; 2108ab1e6de2SIoana Radulescu u8 num_tc, num_queues; 2109ab1e6de2SIoana Radulescu int i; 2110ab1e6de2SIoana Radulescu 2111ab1e6de2SIoana Radulescu if (type != TC_SETUP_QDISC_MQPRIO) 2112b89c1e6bSJesper Dangaard Brouer return -EOPNOTSUPP; 2113ab1e6de2SIoana Radulescu 2114ab1e6de2SIoana Radulescu mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 2115ab1e6de2SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 2116ab1e6de2SIoana Radulescu num_tc = mqprio->num_tc; 2117ab1e6de2SIoana Radulescu 2118ab1e6de2SIoana Radulescu if (num_tc == net_dev->num_tc) 2119ab1e6de2SIoana Radulescu return 0; 2120ab1e6de2SIoana Radulescu 2121ab1e6de2SIoana Radulescu if (num_tc > dpaa2_eth_tc_count(priv)) { 2122ab1e6de2SIoana Radulescu netdev_err(net_dev, "Max %d traffic classes supported\n", 2123ab1e6de2SIoana Radulescu dpaa2_eth_tc_count(priv)); 2124b89c1e6bSJesper Dangaard Brouer return -EOPNOTSUPP; 2125ab1e6de2SIoana Radulescu } 2126ab1e6de2SIoana Radulescu 2127ab1e6de2SIoana Radulescu if (!num_tc) { 2128ab1e6de2SIoana Radulescu netdev_reset_tc(net_dev); 2129ab1e6de2SIoana Radulescu netif_set_real_num_tx_queues(net_dev, num_queues); 2130ab1e6de2SIoana Radulescu goto out; 2131ab1e6de2SIoana Radulescu } 2132ab1e6de2SIoana Radulescu 2133ab1e6de2SIoana Radulescu netdev_set_num_tc(net_dev, num_tc); 2134ab1e6de2SIoana Radulescu netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); 2135ab1e6de2SIoana Radulescu 2136ab1e6de2SIoana Radulescu for (i = 0; i < num_tc; i++) 2137ab1e6de2SIoana Radulescu netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); 2138ab1e6de2SIoana Radulescu 2139ab1e6de2SIoana Radulescu out: 2140ab1e6de2SIoana Radulescu update_xps(priv); 2141ab1e6de2SIoana Radulescu 2142ab1e6de2SIoana Radulescu return 0; 2143ab1e6de2SIoana Radulescu } 2144ab1e6de2SIoana Radulescu 214534ff6846SIoana Radulescu static const struct net_device_ops dpaa2_eth_ops = { 214634ff6846SIoana Radulescu .ndo_open = dpaa2_eth_open, 214734ff6846SIoana Radulescu .ndo_start_xmit = dpaa2_eth_tx, 214834ff6846SIoana Radulescu .ndo_stop = dpaa2_eth_stop, 214934ff6846SIoana Radulescu .ndo_set_mac_address = dpaa2_eth_set_addr, 215034ff6846SIoana Radulescu .ndo_get_stats64 = dpaa2_eth_get_stats, 215134ff6846SIoana Radulescu .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 215234ff6846SIoana Radulescu .ndo_set_features = dpaa2_eth_set_features, 215334ff6846SIoana Radulescu .ndo_do_ioctl = dpaa2_eth_ioctl, 21547e273a8eSIoana Ciocoi Radulescu .ndo_change_mtu = dpaa2_eth_change_mtu, 21557e273a8eSIoana Ciocoi Radulescu .ndo_bpf = dpaa2_eth_xdp, 2156d678be1dSIoana Radulescu .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, 2157ab1e6de2SIoana Radulescu .ndo_setup_tc = dpaa2_eth_setup_tc, 215834ff6846SIoana Radulescu }; 215934ff6846SIoana Radulescu 216034ff6846SIoana Radulescu static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) 216134ff6846SIoana Radulescu { 216234ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 216334ff6846SIoana Radulescu 216434ff6846SIoana Radulescu ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 216534ff6846SIoana Radulescu 216634ff6846SIoana Radulescu /* Update NAPI statistics */ 216734ff6846SIoana Radulescu ch->stats.cdan++; 216834ff6846SIoana Radulescu 216934ff6846SIoana Radulescu napi_schedule_irqoff(&ch->napi); 217034ff6846SIoana Radulescu } 217134ff6846SIoana Radulescu 217234ff6846SIoana Radulescu /* Allocate and configure a DPCON object */ 217334ff6846SIoana Radulescu static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) 217434ff6846SIoana Radulescu { 217534ff6846SIoana Radulescu struct fsl_mc_device *dpcon; 217634ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 217734ff6846SIoana Radulescu int err; 217834ff6846SIoana Radulescu 217934ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 218034ff6846SIoana Radulescu FSL_MC_POOL_DPCON, &dpcon); 218134ff6846SIoana Radulescu if (err) { 2182d7f5a9d8SIoana Ciornei if (err == -ENXIO) 2183d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 2184d7f5a9d8SIoana Ciornei else 218534ff6846SIoana Radulescu dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 2186d7f5a9d8SIoana Ciornei return ERR_PTR(err); 218734ff6846SIoana Radulescu } 218834ff6846SIoana Radulescu 218934ff6846SIoana Radulescu err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 219034ff6846SIoana Radulescu if (err) { 219134ff6846SIoana Radulescu dev_err(dev, "dpcon_open() failed\n"); 219234ff6846SIoana Radulescu goto free; 219334ff6846SIoana Radulescu } 219434ff6846SIoana Radulescu 219534ff6846SIoana Radulescu err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 219634ff6846SIoana Radulescu if (err) { 219734ff6846SIoana Radulescu dev_err(dev, "dpcon_reset() failed\n"); 219834ff6846SIoana Radulescu goto close; 219934ff6846SIoana Radulescu } 220034ff6846SIoana Radulescu 220134ff6846SIoana Radulescu err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 220234ff6846SIoana Radulescu if (err) { 220334ff6846SIoana Radulescu dev_err(dev, "dpcon_enable() failed\n"); 220434ff6846SIoana Radulescu goto close; 220534ff6846SIoana Radulescu } 220634ff6846SIoana Radulescu 220734ff6846SIoana Radulescu return dpcon; 220834ff6846SIoana Radulescu 220934ff6846SIoana Radulescu close: 221034ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 221134ff6846SIoana Radulescu free: 221234ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 221334ff6846SIoana Radulescu 221434ff6846SIoana Radulescu return NULL; 221534ff6846SIoana Radulescu } 221634ff6846SIoana Radulescu 221734ff6846SIoana Radulescu static void free_dpcon(struct dpaa2_eth_priv *priv, 221834ff6846SIoana Radulescu struct fsl_mc_device *dpcon) 221934ff6846SIoana Radulescu { 222034ff6846SIoana Radulescu dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 222134ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 222234ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 222334ff6846SIoana Radulescu } 222434ff6846SIoana Radulescu 222534ff6846SIoana Radulescu static struct dpaa2_eth_channel * 222634ff6846SIoana Radulescu alloc_channel(struct dpaa2_eth_priv *priv) 222734ff6846SIoana Radulescu { 222834ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 222934ff6846SIoana Radulescu struct dpcon_attr attr; 223034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 223134ff6846SIoana Radulescu int err; 223234ff6846SIoana Radulescu 223334ff6846SIoana Radulescu channel = kzalloc(sizeof(*channel), GFP_KERNEL); 223434ff6846SIoana Radulescu if (!channel) 223534ff6846SIoana Radulescu return NULL; 223634ff6846SIoana Radulescu 223734ff6846SIoana Radulescu channel->dpcon = setup_dpcon(priv); 2238d7f5a9d8SIoana Ciornei if (IS_ERR_OR_NULL(channel->dpcon)) { 2239bd8460faSIoana Radulescu err = PTR_ERR_OR_ZERO(channel->dpcon); 224034ff6846SIoana Radulescu goto err_setup; 2241d7f5a9d8SIoana Ciornei } 224234ff6846SIoana Radulescu 224334ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 224434ff6846SIoana Radulescu &attr); 224534ff6846SIoana Radulescu if (err) { 224634ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 224734ff6846SIoana Radulescu goto err_get_attr; 224834ff6846SIoana Radulescu } 224934ff6846SIoana Radulescu 225034ff6846SIoana Radulescu channel->dpcon_id = attr.id; 225134ff6846SIoana Radulescu channel->ch_id = attr.qbman_ch_id; 225234ff6846SIoana Radulescu channel->priv = priv; 225334ff6846SIoana Radulescu 225434ff6846SIoana Radulescu return channel; 225534ff6846SIoana Radulescu 225634ff6846SIoana Radulescu err_get_attr: 225734ff6846SIoana Radulescu free_dpcon(priv, channel->dpcon); 225834ff6846SIoana Radulescu err_setup: 225934ff6846SIoana Radulescu kfree(channel); 2260d7f5a9d8SIoana Ciornei return ERR_PTR(err); 226134ff6846SIoana Radulescu } 226234ff6846SIoana Radulescu 226334ff6846SIoana Radulescu static void free_channel(struct dpaa2_eth_priv *priv, 226434ff6846SIoana Radulescu struct dpaa2_eth_channel *channel) 226534ff6846SIoana Radulescu { 226634ff6846SIoana Radulescu free_dpcon(priv, channel->dpcon); 226734ff6846SIoana Radulescu kfree(channel); 226834ff6846SIoana Radulescu } 226934ff6846SIoana Radulescu 227034ff6846SIoana Radulescu /* DPIO setup: allocate and configure QBMan channels, setup core affinity 227134ff6846SIoana Radulescu * and register data availability notifications 227234ff6846SIoana Radulescu */ 227334ff6846SIoana Radulescu static int setup_dpio(struct dpaa2_eth_priv *priv) 227434ff6846SIoana Radulescu { 227534ff6846SIoana Radulescu struct dpaa2_io_notification_ctx *nctx; 227634ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 227734ff6846SIoana Radulescu struct dpcon_notification_cfg dpcon_notif_cfg; 227834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 227934ff6846SIoana Radulescu int i, err; 228034ff6846SIoana Radulescu 228134ff6846SIoana Radulescu /* We want the ability to spread ingress traffic (RX, TX conf) to as 228234ff6846SIoana Radulescu * many cores as possible, so we need one channel for each core 228334ff6846SIoana Radulescu * (unless there's fewer queues than cores, in which case the extra 228434ff6846SIoana Radulescu * channels would be wasted). 228534ff6846SIoana Radulescu * Allocate one channel per core and register it to the core's 228634ff6846SIoana Radulescu * affine DPIO. If not enough channels are available for all cores 228734ff6846SIoana Radulescu * or if some cores don't have an affine DPIO, there will be no 228834ff6846SIoana Radulescu * ingress frame processing on those cores. 228934ff6846SIoana Radulescu */ 229034ff6846SIoana Radulescu cpumask_clear(&priv->dpio_cpumask); 229134ff6846SIoana Radulescu for_each_online_cpu(i) { 229234ff6846SIoana Radulescu /* Try to allocate a channel */ 229334ff6846SIoana Radulescu channel = alloc_channel(priv); 2294d7f5a9d8SIoana Ciornei if (IS_ERR_OR_NULL(channel)) { 2295bd8460faSIoana Radulescu err = PTR_ERR_OR_ZERO(channel); 2296d7f5a9d8SIoana Ciornei if (err != -EPROBE_DEFER) 229734ff6846SIoana Radulescu dev_info(dev, 229834ff6846SIoana Radulescu "No affine channel for cpu %d and above\n", i); 229934ff6846SIoana Radulescu goto err_alloc_ch; 230034ff6846SIoana Radulescu } 230134ff6846SIoana Radulescu 230234ff6846SIoana Radulescu priv->channel[priv->num_channels] = channel; 230334ff6846SIoana Radulescu 230434ff6846SIoana Radulescu nctx = &channel->nctx; 230534ff6846SIoana Radulescu nctx->is_cdan = 1; 230634ff6846SIoana Radulescu nctx->cb = cdan_cb; 230734ff6846SIoana Radulescu nctx->id = channel->ch_id; 230834ff6846SIoana Radulescu nctx->desired_cpu = i; 230934ff6846SIoana Radulescu 231034ff6846SIoana Radulescu /* Register the new context */ 231134ff6846SIoana Radulescu channel->dpio = dpaa2_io_service_select(i); 231247441f7fSIoana Ciornei err = dpaa2_io_service_register(channel->dpio, nctx, dev); 231334ff6846SIoana Radulescu if (err) { 231434ff6846SIoana Radulescu dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 231534ff6846SIoana Radulescu /* If no affine DPIO for this core, there's probably 231634ff6846SIoana Radulescu * none available for next cores either. Signal we want 231734ff6846SIoana Radulescu * to retry later, in case the DPIO devices weren't 231834ff6846SIoana Radulescu * probed yet. 231934ff6846SIoana Radulescu */ 232034ff6846SIoana Radulescu err = -EPROBE_DEFER; 232134ff6846SIoana Radulescu goto err_service_reg; 232234ff6846SIoana Radulescu } 232334ff6846SIoana Radulescu 232434ff6846SIoana Radulescu /* Register DPCON notification with MC */ 232534ff6846SIoana Radulescu dpcon_notif_cfg.dpio_id = nctx->dpio_id; 232634ff6846SIoana Radulescu dpcon_notif_cfg.priority = 0; 232734ff6846SIoana Radulescu dpcon_notif_cfg.user_ctx = nctx->qman64; 232834ff6846SIoana Radulescu err = dpcon_set_notification(priv->mc_io, 0, 232934ff6846SIoana Radulescu channel->dpcon->mc_handle, 233034ff6846SIoana Radulescu &dpcon_notif_cfg); 233134ff6846SIoana Radulescu if (err) { 233234ff6846SIoana Radulescu dev_err(dev, "dpcon_set_notification failed()\n"); 233334ff6846SIoana Radulescu goto err_set_cdan; 233434ff6846SIoana Radulescu } 233534ff6846SIoana Radulescu 233634ff6846SIoana Radulescu /* If we managed to allocate a channel and also found an affine 233734ff6846SIoana Radulescu * DPIO for this core, add it to the final mask 233834ff6846SIoana Radulescu */ 233934ff6846SIoana Radulescu cpumask_set_cpu(i, &priv->dpio_cpumask); 234034ff6846SIoana Radulescu priv->num_channels++; 234134ff6846SIoana Radulescu 234234ff6846SIoana Radulescu /* Stop if we already have enough channels to accommodate all 234334ff6846SIoana Radulescu * RX and TX conf queues 234434ff6846SIoana Radulescu */ 2345b0e4f37bSIoana Ciocoi Radulescu if (priv->num_channels == priv->dpni_attrs.num_queues) 234634ff6846SIoana Radulescu break; 234734ff6846SIoana Radulescu } 234834ff6846SIoana Radulescu 234934ff6846SIoana Radulescu return 0; 235034ff6846SIoana Radulescu 235134ff6846SIoana Radulescu err_set_cdan: 235247441f7fSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 235334ff6846SIoana Radulescu err_service_reg: 235434ff6846SIoana Radulescu free_channel(priv, channel); 235534ff6846SIoana Radulescu err_alloc_ch: 23565aa4277dSIoana Ciornei if (err == -EPROBE_DEFER) { 23575aa4277dSIoana Ciornei for (i = 0; i < priv->num_channels; i++) { 23585aa4277dSIoana Ciornei channel = priv->channel[i]; 23595aa4277dSIoana Ciornei nctx = &channel->nctx; 23605aa4277dSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 23615aa4277dSIoana Ciornei free_channel(priv, channel); 23625aa4277dSIoana Ciornei } 23635aa4277dSIoana Ciornei priv->num_channels = 0; 2364d7f5a9d8SIoana Ciornei return err; 23655aa4277dSIoana Ciornei } 2366d7f5a9d8SIoana Ciornei 236734ff6846SIoana Radulescu if (cpumask_empty(&priv->dpio_cpumask)) { 236834ff6846SIoana Radulescu dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 2369d7f5a9d8SIoana Ciornei return -ENODEV; 237034ff6846SIoana Radulescu } 237134ff6846SIoana Radulescu 237234ff6846SIoana Radulescu dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 237334ff6846SIoana Radulescu cpumask_pr_args(&priv->dpio_cpumask)); 237434ff6846SIoana Radulescu 237534ff6846SIoana Radulescu return 0; 237634ff6846SIoana Radulescu } 237734ff6846SIoana Radulescu 237834ff6846SIoana Radulescu static void free_dpio(struct dpaa2_eth_priv *priv) 237934ff6846SIoana Radulescu { 238047441f7fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 238134ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 238247441f7fSIoana Ciornei int i; 238334ff6846SIoana Radulescu 238434ff6846SIoana Radulescu /* deregister CDAN notifications and free channels */ 238534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 238634ff6846SIoana Radulescu ch = priv->channel[i]; 238747441f7fSIoana Ciornei dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); 238834ff6846SIoana Radulescu free_channel(priv, ch); 238934ff6846SIoana Radulescu } 239034ff6846SIoana Radulescu } 239134ff6846SIoana Radulescu 239234ff6846SIoana Radulescu static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, 239334ff6846SIoana Radulescu int cpu) 239434ff6846SIoana Radulescu { 239534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 239634ff6846SIoana Radulescu int i; 239734ff6846SIoana Radulescu 239834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 239934ff6846SIoana Radulescu if (priv->channel[i]->nctx.desired_cpu == cpu) 240034ff6846SIoana Radulescu return priv->channel[i]; 240134ff6846SIoana Radulescu 240234ff6846SIoana Radulescu /* We should never get here. Issue a warning and return 240334ff6846SIoana Radulescu * the first channel, because it's still better than nothing 240434ff6846SIoana Radulescu */ 240534ff6846SIoana Radulescu dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 240634ff6846SIoana Radulescu 240734ff6846SIoana Radulescu return priv->channel[0]; 240834ff6846SIoana Radulescu } 240934ff6846SIoana Radulescu 241034ff6846SIoana Radulescu static void set_fq_affinity(struct dpaa2_eth_priv *priv) 241134ff6846SIoana Radulescu { 241234ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 241334ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 241434ff6846SIoana Radulescu int rx_cpu, txc_cpu; 241506d5b179SIoana Radulescu int i; 241634ff6846SIoana Radulescu 241734ff6846SIoana Radulescu /* For each FQ, pick one channel/CPU to deliver frames to. 241834ff6846SIoana Radulescu * This may well change at runtime, either through irqbalance or 241934ff6846SIoana Radulescu * through direct user intervention. 242034ff6846SIoana Radulescu */ 242134ff6846SIoana Radulescu rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 242234ff6846SIoana Radulescu 242334ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 242434ff6846SIoana Radulescu fq = &priv->fq[i]; 242534ff6846SIoana Radulescu switch (fq->type) { 242634ff6846SIoana Radulescu case DPAA2_RX_FQ: 242734ff6846SIoana Radulescu fq->target_cpu = rx_cpu; 242834ff6846SIoana Radulescu rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 242934ff6846SIoana Radulescu if (rx_cpu >= nr_cpu_ids) 243034ff6846SIoana Radulescu rx_cpu = cpumask_first(&priv->dpio_cpumask); 243134ff6846SIoana Radulescu break; 243234ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 243334ff6846SIoana Radulescu fq->target_cpu = txc_cpu; 243434ff6846SIoana Radulescu txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 243534ff6846SIoana Radulescu if (txc_cpu >= nr_cpu_ids) 243634ff6846SIoana Radulescu txc_cpu = cpumask_first(&priv->dpio_cpumask); 243734ff6846SIoana Radulescu break; 243834ff6846SIoana Radulescu default: 243934ff6846SIoana Radulescu dev_err(dev, "Unknown FQ type: %d\n", fq->type); 244034ff6846SIoana Radulescu } 244134ff6846SIoana Radulescu fq->channel = get_affine_channel(priv, fq->target_cpu); 244234ff6846SIoana Radulescu } 244306d5b179SIoana Radulescu 244406d5b179SIoana Radulescu update_xps(priv); 244534ff6846SIoana Radulescu } 244634ff6846SIoana Radulescu 244734ff6846SIoana Radulescu static void setup_fqs(struct dpaa2_eth_priv *priv) 244834ff6846SIoana Radulescu { 2449685e39eaSIoana Radulescu int i, j; 245034ff6846SIoana Radulescu 245134ff6846SIoana Radulescu /* We have one TxConf FQ per Tx flow. 245234ff6846SIoana Radulescu * The number of Tx and Rx queues is the same. 245334ff6846SIoana Radulescu * Tx queues come first in the fq array. 245434ff6846SIoana Radulescu */ 245534ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 245634ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 245734ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 245834ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 245934ff6846SIoana Radulescu } 246034ff6846SIoana Radulescu 2461685e39eaSIoana Radulescu for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 246234ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 246334ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 246434ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 2465685e39eaSIoana Radulescu priv->fq[priv->num_fqs].tc = (u8)j; 246634ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 246734ff6846SIoana Radulescu } 2468685e39eaSIoana Radulescu } 246934ff6846SIoana Radulescu 247034ff6846SIoana Radulescu /* For each FQ, decide on which core to process incoming frames */ 247134ff6846SIoana Radulescu set_fq_affinity(priv); 247234ff6846SIoana Radulescu } 247334ff6846SIoana Radulescu 247434ff6846SIoana Radulescu /* Allocate and configure one buffer pool for each interface */ 247534ff6846SIoana Radulescu static int setup_dpbp(struct dpaa2_eth_priv *priv) 247634ff6846SIoana Radulescu { 247734ff6846SIoana Radulescu int err; 247834ff6846SIoana Radulescu struct fsl_mc_device *dpbp_dev; 247934ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 248034ff6846SIoana Radulescu struct dpbp_attr dpbp_attrs; 248134ff6846SIoana Radulescu 248234ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 248334ff6846SIoana Radulescu &dpbp_dev); 248434ff6846SIoana Radulescu if (err) { 2485d7f5a9d8SIoana Ciornei if (err == -ENXIO) 2486d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 2487d7f5a9d8SIoana Ciornei else 248834ff6846SIoana Radulescu dev_err(dev, "DPBP device allocation failed\n"); 248934ff6846SIoana Radulescu return err; 249034ff6846SIoana Radulescu } 249134ff6846SIoana Radulescu 249234ff6846SIoana Radulescu priv->dpbp_dev = dpbp_dev; 249334ff6846SIoana Radulescu 249434ff6846SIoana Radulescu err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 249534ff6846SIoana Radulescu &dpbp_dev->mc_handle); 249634ff6846SIoana Radulescu if (err) { 249734ff6846SIoana Radulescu dev_err(dev, "dpbp_open() failed\n"); 249834ff6846SIoana Radulescu goto err_open; 249934ff6846SIoana Radulescu } 250034ff6846SIoana Radulescu 250134ff6846SIoana Radulescu err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 250234ff6846SIoana Radulescu if (err) { 250334ff6846SIoana Radulescu dev_err(dev, "dpbp_reset() failed\n"); 250434ff6846SIoana Radulescu goto err_reset; 250534ff6846SIoana Radulescu } 250634ff6846SIoana Radulescu 250734ff6846SIoana Radulescu err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 250834ff6846SIoana Radulescu if (err) { 250934ff6846SIoana Radulescu dev_err(dev, "dpbp_enable() failed\n"); 251034ff6846SIoana Radulescu goto err_enable; 251134ff6846SIoana Radulescu } 251234ff6846SIoana Radulescu 251334ff6846SIoana Radulescu err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 251434ff6846SIoana Radulescu &dpbp_attrs); 251534ff6846SIoana Radulescu if (err) { 251634ff6846SIoana Radulescu dev_err(dev, "dpbp_get_attributes() failed\n"); 251734ff6846SIoana Radulescu goto err_get_attr; 251834ff6846SIoana Radulescu } 251934ff6846SIoana Radulescu priv->bpid = dpbp_attrs.bpid; 252034ff6846SIoana Radulescu 252134ff6846SIoana Radulescu return 0; 252234ff6846SIoana Radulescu 252334ff6846SIoana Radulescu err_get_attr: 252434ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 252534ff6846SIoana Radulescu err_enable: 252634ff6846SIoana Radulescu err_reset: 252734ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 252834ff6846SIoana Radulescu err_open: 252934ff6846SIoana Radulescu fsl_mc_object_free(dpbp_dev); 253034ff6846SIoana Radulescu 253134ff6846SIoana Radulescu return err; 253234ff6846SIoana Radulescu } 253334ff6846SIoana Radulescu 253434ff6846SIoana Radulescu static void free_dpbp(struct dpaa2_eth_priv *priv) 253534ff6846SIoana Radulescu { 253634ff6846SIoana Radulescu drain_pool(priv); 253734ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 253834ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 253934ff6846SIoana Radulescu fsl_mc_object_free(priv->dpbp_dev); 254034ff6846SIoana Radulescu } 254134ff6846SIoana Radulescu 254234ff6846SIoana Radulescu static int set_buffer_layout(struct dpaa2_eth_priv *priv) 254334ff6846SIoana Radulescu { 254434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 254534ff6846SIoana Radulescu struct dpni_buffer_layout buf_layout = {0}; 254627c87486SIoana Ciocoi Radulescu u16 rx_buf_align; 254734ff6846SIoana Radulescu int err; 254834ff6846SIoana Radulescu 254934ff6846SIoana Radulescu /* We need to check for WRIOP version 1.0.0, but depending on the MC 255034ff6846SIoana Radulescu * version, this number is not always provided correctly on rev1. 255134ff6846SIoana Radulescu * We need to check for both alternatives in this situation. 255234ff6846SIoana Radulescu */ 255334ff6846SIoana Radulescu if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 255434ff6846SIoana Radulescu priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 255527c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 255634ff6846SIoana Radulescu else 255727c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 255834ff6846SIoana Radulescu 2559efa6a7d0SIoana Ciornei /* We need to ensure that the buffer size seen by WRIOP is a multiple 2560efa6a7d0SIoana Ciornei * of 64 or 256 bytes depending on the WRIOP version. 2561efa6a7d0SIoana Ciornei */ 2562efa6a7d0SIoana Ciornei priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); 2563efa6a7d0SIoana Ciornei 256434ff6846SIoana Radulescu /* tx buffer */ 256534ff6846SIoana Radulescu buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 256634ff6846SIoana Radulescu buf_layout.pass_timestamp = true; 256734ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 256834ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 256934ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 257034ff6846SIoana Radulescu DPNI_QUEUE_TX, &buf_layout); 257134ff6846SIoana Radulescu if (err) { 257234ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 257334ff6846SIoana Radulescu return err; 257434ff6846SIoana Radulescu } 257534ff6846SIoana Radulescu 257634ff6846SIoana Radulescu /* tx-confirm buffer */ 257734ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 257834ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 257934ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, &buf_layout); 258034ff6846SIoana Radulescu if (err) { 258134ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 258234ff6846SIoana Radulescu return err; 258334ff6846SIoana Radulescu } 258434ff6846SIoana Radulescu 258534ff6846SIoana Radulescu /* Now that we've set our tx buffer layout, retrieve the minimum 258634ff6846SIoana Radulescu * required tx data offset. 258734ff6846SIoana Radulescu */ 258834ff6846SIoana Radulescu err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 258934ff6846SIoana Radulescu &priv->tx_data_offset); 259034ff6846SIoana Radulescu if (err) { 259134ff6846SIoana Radulescu dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 259234ff6846SIoana Radulescu return err; 259334ff6846SIoana Radulescu } 259434ff6846SIoana Radulescu 259534ff6846SIoana Radulescu if ((priv->tx_data_offset % 64) != 0) 259634ff6846SIoana Radulescu dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 259734ff6846SIoana Radulescu priv->tx_data_offset); 259834ff6846SIoana Radulescu 259934ff6846SIoana Radulescu /* rx buffer */ 260034ff6846SIoana Radulescu buf_layout.pass_frame_status = true; 260134ff6846SIoana Radulescu buf_layout.pass_parser_result = true; 260227c87486SIoana Ciocoi Radulescu buf_layout.data_align = rx_buf_align; 260334ff6846SIoana Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 260434ff6846SIoana Radulescu buf_layout.private_data_size = 0; 260534ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 260634ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 260734ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 260834ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 260934ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 261034ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 261134ff6846SIoana Radulescu DPNI_QUEUE_RX, &buf_layout); 261234ff6846SIoana Radulescu if (err) { 261334ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 261434ff6846SIoana Radulescu return err; 261534ff6846SIoana Radulescu } 261634ff6846SIoana Radulescu 261734ff6846SIoana Radulescu return 0; 261834ff6846SIoana Radulescu } 261934ff6846SIoana Radulescu 26201fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 26211fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MINOR 9 26221fa0f68cSIoana Ciocoi Radulescu 26231fa0f68cSIoana Ciocoi Radulescu static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, 26241fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 262548c0481eSIoana Ciornei struct dpaa2_fd *fd, u8 prio, 26266ff80447SIoana Ciornei u32 num_frames __always_unused, 262748c0481eSIoana Ciornei int *frames_enqueued) 26281fa0f68cSIoana Ciocoi Radulescu { 262948c0481eSIoana Ciornei int err; 263048c0481eSIoana Ciornei 263148c0481eSIoana Ciornei err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 26321fa0f68cSIoana Ciocoi Radulescu priv->tx_qdid, prio, 26331fa0f68cSIoana Ciocoi Radulescu fq->tx_qdbin, fd); 263448c0481eSIoana Ciornei if (!err && frames_enqueued) 263548c0481eSIoana Ciornei *frames_enqueued = 1; 263648c0481eSIoana Ciornei return err; 26371fa0f68cSIoana Ciocoi Radulescu } 26381fa0f68cSIoana Ciocoi Radulescu 26396ff80447SIoana Ciornei static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv, 26401fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 26416ff80447SIoana Ciornei struct dpaa2_fd *fd, 26426ff80447SIoana Ciornei u8 prio, u32 num_frames, 264348c0481eSIoana Ciornei int *frames_enqueued) 26441fa0f68cSIoana Ciocoi Radulescu { 264548c0481eSIoana Ciornei int err; 264648c0481eSIoana Ciornei 26476ff80447SIoana Ciornei err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio, 26486ff80447SIoana Ciornei fq->tx_fqid[prio], 26496ff80447SIoana Ciornei fd, num_frames); 26506ff80447SIoana Ciornei 26516ff80447SIoana Ciornei if (err == 0) 26526ff80447SIoana Ciornei return -EBUSY; 26536ff80447SIoana Ciornei 26546ff80447SIoana Ciornei if (frames_enqueued) 26556ff80447SIoana Ciornei *frames_enqueued = err; 26566ff80447SIoana Ciornei return 0; 26571fa0f68cSIoana Ciocoi Radulescu } 26581fa0f68cSIoana Ciocoi Radulescu 26591fa0f68cSIoana Ciocoi Radulescu static void set_enqueue_mode(struct dpaa2_eth_priv *priv) 26601fa0f68cSIoana Ciocoi Radulescu { 26611fa0f68cSIoana Ciocoi Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 26621fa0f68cSIoana Ciocoi Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 26631fa0f68cSIoana Ciocoi Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 26641fa0f68cSIoana Ciocoi Radulescu else 26656ff80447SIoana Ciornei priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 26661fa0f68cSIoana Ciocoi Radulescu } 26671fa0f68cSIoana Ciocoi Radulescu 26688eb3cef8SIoana Radulescu static int set_pause(struct dpaa2_eth_priv *priv) 26698eb3cef8SIoana Radulescu { 26708eb3cef8SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 26718eb3cef8SIoana Radulescu struct dpni_link_cfg link_cfg = {0}; 26728eb3cef8SIoana Radulescu int err; 26738eb3cef8SIoana Radulescu 26748eb3cef8SIoana Radulescu /* Get the default link options so we don't override other flags */ 26758eb3cef8SIoana Radulescu err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 26768eb3cef8SIoana Radulescu if (err) { 26778eb3cef8SIoana Radulescu dev_err(dev, "dpni_get_link_cfg() failed\n"); 26788eb3cef8SIoana Radulescu return err; 26798eb3cef8SIoana Radulescu } 26808eb3cef8SIoana Radulescu 26818eb3cef8SIoana Radulescu /* By default, enable both Rx and Tx pause frames */ 26828eb3cef8SIoana Radulescu link_cfg.options |= DPNI_LINK_OPT_PAUSE; 26838eb3cef8SIoana Radulescu link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 26848eb3cef8SIoana Radulescu err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 26858eb3cef8SIoana Radulescu if (err) { 26868eb3cef8SIoana Radulescu dev_err(dev, "dpni_set_link_cfg() failed\n"); 26878eb3cef8SIoana Radulescu return err; 26888eb3cef8SIoana Radulescu } 26898eb3cef8SIoana Radulescu 26908eb3cef8SIoana Radulescu priv->link_state.options = link_cfg.options; 26918eb3cef8SIoana Radulescu 26928eb3cef8SIoana Radulescu return 0; 26938eb3cef8SIoana Radulescu } 26948eb3cef8SIoana Radulescu 2695a690af4fSIoana Radulescu static void update_tx_fqids(struct dpaa2_eth_priv *priv) 2696a690af4fSIoana Radulescu { 2697a690af4fSIoana Radulescu struct dpni_queue_id qid = {0}; 2698a690af4fSIoana Radulescu struct dpaa2_eth_fq *fq; 2699a690af4fSIoana Radulescu struct dpni_queue queue; 2700a690af4fSIoana Radulescu int i, j, err; 2701a690af4fSIoana Radulescu 2702a690af4fSIoana Radulescu /* We only use Tx FQIDs for FQID-based enqueue, so check 2703a690af4fSIoana Radulescu * if DPNI version supports it before updating FQIDs 2704a690af4fSIoana Radulescu */ 2705a690af4fSIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 2706a690af4fSIoana Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 2707a690af4fSIoana Radulescu return; 2708a690af4fSIoana Radulescu 2709a690af4fSIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 2710a690af4fSIoana Radulescu fq = &priv->fq[i]; 2711a690af4fSIoana Radulescu if (fq->type != DPAA2_TX_CONF_FQ) 2712a690af4fSIoana Radulescu continue; 2713a690af4fSIoana Radulescu for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 2714a690af4fSIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 2715a690af4fSIoana Radulescu DPNI_QUEUE_TX, j, fq->flowid, 2716a690af4fSIoana Radulescu &queue, &qid); 2717a690af4fSIoana Radulescu if (err) 2718a690af4fSIoana Radulescu goto out_err; 2719a690af4fSIoana Radulescu 2720a690af4fSIoana Radulescu fq->tx_fqid[j] = qid.fqid; 2721a690af4fSIoana Radulescu if (fq->tx_fqid[j] == 0) 2722a690af4fSIoana Radulescu goto out_err; 2723a690af4fSIoana Radulescu } 2724a690af4fSIoana Radulescu } 2725a690af4fSIoana Radulescu 27266ff80447SIoana Ciornei priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 2727a690af4fSIoana Radulescu 2728a690af4fSIoana Radulescu return; 2729a690af4fSIoana Radulescu 2730a690af4fSIoana Radulescu out_err: 2731a690af4fSIoana Radulescu netdev_info(priv->net_dev, 2732a690af4fSIoana Radulescu "Error reading Tx FQID, fallback to QDID-based enqueue\n"); 2733a690af4fSIoana Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 2734a690af4fSIoana Radulescu } 2735a690af4fSIoana Radulescu 27366aa90fe2SIoana Radulescu /* Configure ingress classification based on VLAN PCP */ 27376aa90fe2SIoana Radulescu static int set_vlan_qos(struct dpaa2_eth_priv *priv) 27386aa90fe2SIoana Radulescu { 27396aa90fe2SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 27406aa90fe2SIoana Radulescu struct dpkg_profile_cfg kg_cfg = {0}; 27416aa90fe2SIoana Radulescu struct dpni_qos_tbl_cfg qos_cfg = {0}; 27426aa90fe2SIoana Radulescu struct dpni_rule_cfg key_params; 27436aa90fe2SIoana Radulescu void *dma_mem, *key, *mask; 27446aa90fe2SIoana Radulescu u8 key_size = 2; /* VLAN TCI field */ 27456aa90fe2SIoana Radulescu int i, pcp, err; 27466aa90fe2SIoana Radulescu 27476aa90fe2SIoana Radulescu /* VLAN-based classification only makes sense if we have multiple 27486aa90fe2SIoana Radulescu * traffic classes. 27496aa90fe2SIoana Radulescu * Also, we need to extract just the 3-bit PCP field from the VLAN 27506aa90fe2SIoana Radulescu * header and we can only do that by using a mask 27516aa90fe2SIoana Radulescu */ 27526aa90fe2SIoana Radulescu if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) { 27536aa90fe2SIoana Radulescu dev_dbg(dev, "VLAN-based QoS classification not supported\n"); 27546aa90fe2SIoana Radulescu return -EOPNOTSUPP; 27556aa90fe2SIoana Radulescu } 27566aa90fe2SIoana Radulescu 27576aa90fe2SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 27586aa90fe2SIoana Radulescu if (!dma_mem) 27596aa90fe2SIoana Radulescu return -ENOMEM; 27606aa90fe2SIoana Radulescu 27616aa90fe2SIoana Radulescu kg_cfg.num_extracts = 1; 27626aa90fe2SIoana Radulescu kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 27636aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; 27646aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 27656aa90fe2SIoana Radulescu kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; 27666aa90fe2SIoana Radulescu 27676aa90fe2SIoana Radulescu err = dpni_prepare_key_cfg(&kg_cfg, dma_mem); 27686aa90fe2SIoana Radulescu if (err) { 27696aa90fe2SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg failed\n"); 27706aa90fe2SIoana Radulescu goto out_free_tbl; 27716aa90fe2SIoana Radulescu } 27726aa90fe2SIoana Radulescu 27736aa90fe2SIoana Radulescu /* set QoS table */ 27746aa90fe2SIoana Radulescu qos_cfg.default_tc = 0; 27756aa90fe2SIoana Radulescu qos_cfg.discard_on_miss = 0; 27766aa90fe2SIoana Radulescu qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, 27776aa90fe2SIoana Radulescu DPAA2_CLASSIFIER_DMA_SIZE, 27786aa90fe2SIoana Radulescu DMA_TO_DEVICE); 27796aa90fe2SIoana Radulescu if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { 27806aa90fe2SIoana Radulescu dev_err(dev, "QoS table DMA mapping failed\n"); 27816aa90fe2SIoana Radulescu err = -ENOMEM; 27826aa90fe2SIoana Radulescu goto out_free_tbl; 27836aa90fe2SIoana Radulescu } 27846aa90fe2SIoana Radulescu 27856aa90fe2SIoana Radulescu err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); 27866aa90fe2SIoana Radulescu if (err) { 27876aa90fe2SIoana Radulescu dev_err(dev, "dpni_set_qos_table failed\n"); 27886aa90fe2SIoana Radulescu goto out_unmap_tbl; 27896aa90fe2SIoana Radulescu } 27906aa90fe2SIoana Radulescu 27916aa90fe2SIoana Radulescu /* Add QoS table entries */ 27926aa90fe2SIoana Radulescu key = kzalloc(key_size * 2, GFP_KERNEL); 27936aa90fe2SIoana Radulescu if (!key) { 27946aa90fe2SIoana Radulescu err = -ENOMEM; 27956aa90fe2SIoana Radulescu goto out_unmap_tbl; 27966aa90fe2SIoana Radulescu } 27976aa90fe2SIoana Radulescu mask = key + key_size; 27986aa90fe2SIoana Radulescu *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK); 27996aa90fe2SIoana Radulescu 28006aa90fe2SIoana Radulescu key_params.key_iova = dma_map_single(dev, key, key_size * 2, 28016aa90fe2SIoana Radulescu DMA_TO_DEVICE); 28026aa90fe2SIoana Radulescu if (dma_mapping_error(dev, key_params.key_iova)) { 28036aa90fe2SIoana Radulescu dev_err(dev, "Qos table entry DMA mapping failed\n"); 28046aa90fe2SIoana Radulescu err = -ENOMEM; 28056aa90fe2SIoana Radulescu goto out_free_key; 28066aa90fe2SIoana Radulescu } 28076aa90fe2SIoana Radulescu 28086aa90fe2SIoana Radulescu key_params.mask_iova = key_params.key_iova + key_size; 28096aa90fe2SIoana Radulescu key_params.key_size = key_size; 28106aa90fe2SIoana Radulescu 28116aa90fe2SIoana Radulescu /* We add rules for PCP-based distribution starting with highest 28126aa90fe2SIoana Radulescu * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic 28136aa90fe2SIoana Radulescu * classes to accommodate all priority levels, the lowest ones end up 28146aa90fe2SIoana Radulescu * on TC 0 which was configured as default 28156aa90fe2SIoana Radulescu */ 28166aa90fe2SIoana Radulescu for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { 28176aa90fe2SIoana Radulescu *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT); 28186aa90fe2SIoana Radulescu dma_sync_single_for_device(dev, key_params.key_iova, 28196aa90fe2SIoana Radulescu key_size * 2, DMA_TO_DEVICE); 28206aa90fe2SIoana Radulescu 28216aa90fe2SIoana Radulescu err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, 28226aa90fe2SIoana Radulescu &key_params, i, i); 28236aa90fe2SIoana Radulescu if (err) { 28246aa90fe2SIoana Radulescu dev_err(dev, "dpni_add_qos_entry failed\n"); 28256aa90fe2SIoana Radulescu dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token); 28266aa90fe2SIoana Radulescu goto out_unmap_key; 28276aa90fe2SIoana Radulescu } 28286aa90fe2SIoana Radulescu } 28296aa90fe2SIoana Radulescu 28306aa90fe2SIoana Radulescu priv->vlan_cls_enabled = true; 28316aa90fe2SIoana Radulescu 28326aa90fe2SIoana Radulescu /* Table and key memory is not persistent, clean everything up after 28336aa90fe2SIoana Radulescu * configuration is finished 28346aa90fe2SIoana Radulescu */ 28356aa90fe2SIoana Radulescu out_unmap_key: 28366aa90fe2SIoana Radulescu dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE); 28376aa90fe2SIoana Radulescu out_free_key: 28386aa90fe2SIoana Radulescu kfree(key); 28396aa90fe2SIoana Radulescu out_unmap_tbl: 28406aa90fe2SIoana Radulescu dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE, 28416aa90fe2SIoana Radulescu DMA_TO_DEVICE); 28426aa90fe2SIoana Radulescu out_free_tbl: 28436aa90fe2SIoana Radulescu kfree(dma_mem); 28446aa90fe2SIoana Radulescu 28456aa90fe2SIoana Radulescu return err; 28466aa90fe2SIoana Radulescu } 28476aa90fe2SIoana Radulescu 284834ff6846SIoana Radulescu /* Configure the DPNI object this interface is associated with */ 284934ff6846SIoana Radulescu static int setup_dpni(struct fsl_mc_device *ls_dev) 285034ff6846SIoana Radulescu { 285134ff6846SIoana Radulescu struct device *dev = &ls_dev->dev; 285234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 285334ff6846SIoana Radulescu struct net_device *net_dev; 285434ff6846SIoana Radulescu int err; 285534ff6846SIoana Radulescu 285634ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 285734ff6846SIoana Radulescu priv = netdev_priv(net_dev); 285834ff6846SIoana Radulescu 285934ff6846SIoana Radulescu /* get a handle for the DPNI object */ 286034ff6846SIoana Radulescu err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 286134ff6846SIoana Radulescu if (err) { 286234ff6846SIoana Radulescu dev_err(dev, "dpni_open() failed\n"); 286334ff6846SIoana Radulescu return err; 286434ff6846SIoana Radulescu } 286534ff6846SIoana Radulescu 286634ff6846SIoana Radulescu /* Check if we can work with this DPNI object */ 286734ff6846SIoana Radulescu err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 286834ff6846SIoana Radulescu &priv->dpni_ver_minor); 286934ff6846SIoana Radulescu if (err) { 287034ff6846SIoana Radulescu dev_err(dev, "dpni_get_api_version() failed\n"); 287134ff6846SIoana Radulescu goto close; 287234ff6846SIoana Radulescu } 287334ff6846SIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 287434ff6846SIoana Radulescu dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 287534ff6846SIoana Radulescu priv->dpni_ver_major, priv->dpni_ver_minor, 287634ff6846SIoana Radulescu DPNI_VER_MAJOR, DPNI_VER_MINOR); 287734ff6846SIoana Radulescu err = -ENOTSUPP; 287834ff6846SIoana Radulescu goto close; 287934ff6846SIoana Radulescu } 288034ff6846SIoana Radulescu 288134ff6846SIoana Radulescu ls_dev->mc_io = priv->mc_io; 288234ff6846SIoana Radulescu ls_dev->mc_handle = priv->mc_token; 288334ff6846SIoana Radulescu 288434ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 288534ff6846SIoana Radulescu if (err) { 288634ff6846SIoana Radulescu dev_err(dev, "dpni_reset() failed\n"); 288734ff6846SIoana Radulescu goto close; 288834ff6846SIoana Radulescu } 288934ff6846SIoana Radulescu 289034ff6846SIoana Radulescu err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 289134ff6846SIoana Radulescu &priv->dpni_attrs); 289234ff6846SIoana Radulescu if (err) { 289334ff6846SIoana Radulescu dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 289434ff6846SIoana Radulescu goto close; 289534ff6846SIoana Radulescu } 289634ff6846SIoana Radulescu 289734ff6846SIoana Radulescu err = set_buffer_layout(priv); 289834ff6846SIoana Radulescu if (err) 289934ff6846SIoana Radulescu goto close; 290034ff6846SIoana Radulescu 29011fa0f68cSIoana Ciocoi Radulescu set_enqueue_mode(priv); 29021fa0f68cSIoana Ciocoi Radulescu 29038eb3cef8SIoana Radulescu /* Enable pause frame support */ 29048eb3cef8SIoana Radulescu if (dpaa2_eth_has_pause_support(priv)) { 29058eb3cef8SIoana Radulescu err = set_pause(priv); 29068eb3cef8SIoana Radulescu if (err) 29078eb3cef8SIoana Radulescu goto close; 29088eb3cef8SIoana Radulescu } 29098eb3cef8SIoana Radulescu 29106aa90fe2SIoana Radulescu err = set_vlan_qos(priv); 29116aa90fe2SIoana Radulescu if (err && err != -EOPNOTSUPP) 29126aa90fe2SIoana Radulescu goto close; 29136aa90fe2SIoana Radulescu 29149334d5baSXu Wang priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), 29159334d5baSXu Wang sizeof(struct dpaa2_eth_cls_rule), 29169334d5baSXu Wang GFP_KERNEL); 291797fff7c8SWei Yongjun if (!priv->cls_rules) { 291897fff7c8SWei Yongjun err = -ENOMEM; 2919afb90dbbSIoana Radulescu goto close; 292097fff7c8SWei Yongjun } 2921afb90dbbSIoana Radulescu 292234ff6846SIoana Radulescu return 0; 292334ff6846SIoana Radulescu 292434ff6846SIoana Radulescu close: 292534ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 292634ff6846SIoana Radulescu 292734ff6846SIoana Radulescu return err; 292834ff6846SIoana Radulescu } 292934ff6846SIoana Radulescu 293034ff6846SIoana Radulescu static void free_dpni(struct dpaa2_eth_priv *priv) 293134ff6846SIoana Radulescu { 293234ff6846SIoana Radulescu int err; 293334ff6846SIoana Radulescu 293434ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 293534ff6846SIoana Radulescu if (err) 293634ff6846SIoana Radulescu netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 293734ff6846SIoana Radulescu err); 293834ff6846SIoana Radulescu 293934ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 294034ff6846SIoana Radulescu } 294134ff6846SIoana Radulescu 294234ff6846SIoana Radulescu static int setup_rx_flow(struct dpaa2_eth_priv *priv, 294334ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 294434ff6846SIoana Radulescu { 294534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 294634ff6846SIoana Radulescu struct dpni_queue queue; 294734ff6846SIoana Radulescu struct dpni_queue_id qid; 294834ff6846SIoana Radulescu int err; 294934ff6846SIoana Radulescu 295034ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 2951685e39eaSIoana Radulescu DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); 295234ff6846SIoana Radulescu if (err) { 295334ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(RX) failed\n"); 295434ff6846SIoana Radulescu return err; 295534ff6846SIoana Radulescu } 295634ff6846SIoana Radulescu 295734ff6846SIoana Radulescu fq->fqid = qid.fqid; 295834ff6846SIoana Radulescu 295934ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 296034ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 296134ff6846SIoana Radulescu queue.destination.priority = 1; 296234ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 296334ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 2964685e39eaSIoana Radulescu DPNI_QUEUE_RX, fq->tc, fq->flowid, 296516fa1cf1SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 296634ff6846SIoana Radulescu &queue); 296734ff6846SIoana Radulescu if (err) { 296834ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(RX) failed\n"); 296934ff6846SIoana Radulescu return err; 297034ff6846SIoana Radulescu } 297134ff6846SIoana Radulescu 2972d678be1dSIoana Radulescu /* xdp_rxq setup */ 2973685e39eaSIoana Radulescu /* only once for each channel */ 2974685e39eaSIoana Radulescu if (fq->tc > 0) 2975685e39eaSIoana Radulescu return 0; 2976685e39eaSIoana Radulescu 2977d678be1dSIoana Radulescu err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, 2978d678be1dSIoana Radulescu fq->flowid); 2979d678be1dSIoana Radulescu if (err) { 2980d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg failed\n"); 2981d678be1dSIoana Radulescu return err; 2982d678be1dSIoana Radulescu } 2983d678be1dSIoana Radulescu 2984d678be1dSIoana Radulescu err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, 2985d678be1dSIoana Radulescu MEM_TYPE_PAGE_ORDER0, NULL); 2986d678be1dSIoana Radulescu if (err) { 2987d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); 2988d678be1dSIoana Radulescu return err; 2989d678be1dSIoana Radulescu } 2990d678be1dSIoana Radulescu 299134ff6846SIoana Radulescu return 0; 299234ff6846SIoana Radulescu } 299334ff6846SIoana Radulescu 299434ff6846SIoana Radulescu static int setup_tx_flow(struct dpaa2_eth_priv *priv, 299534ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 299634ff6846SIoana Radulescu { 299734ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 299834ff6846SIoana Radulescu struct dpni_queue queue; 299934ff6846SIoana Radulescu struct dpni_queue_id qid; 300015c87f6bSIoana Radulescu int i, err; 300134ff6846SIoana Radulescu 300215c87f6bSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 300334ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 300415c87f6bSIoana Radulescu DPNI_QUEUE_TX, i, fq->flowid, 300515c87f6bSIoana Radulescu &queue, &qid); 300634ff6846SIoana Radulescu if (err) { 300734ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX) failed\n"); 300834ff6846SIoana Radulescu return err; 300934ff6846SIoana Radulescu } 301015c87f6bSIoana Radulescu fq->tx_fqid[i] = qid.fqid; 301115c87f6bSIoana Radulescu } 301234ff6846SIoana Radulescu 301315c87f6bSIoana Radulescu /* All Tx queues belonging to the same flowid have the same qdbin */ 301434ff6846SIoana Radulescu fq->tx_qdbin = qid.qdbin; 301534ff6846SIoana Radulescu 301634ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 301734ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 301834ff6846SIoana Radulescu &queue, &qid); 301934ff6846SIoana Radulescu if (err) { 302034ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 302134ff6846SIoana Radulescu return err; 302234ff6846SIoana Radulescu } 302334ff6846SIoana Radulescu 302434ff6846SIoana Radulescu fq->fqid = qid.fqid; 302534ff6846SIoana Radulescu 302634ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 302734ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 302834ff6846SIoana Radulescu queue.destination.priority = 0; 302934ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 303034ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 303134ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 303234ff6846SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 303334ff6846SIoana Radulescu &queue); 303434ff6846SIoana Radulescu if (err) { 303534ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 303634ff6846SIoana Radulescu return err; 303734ff6846SIoana Radulescu } 303834ff6846SIoana Radulescu 303934ff6846SIoana Radulescu return 0; 304034ff6846SIoana Radulescu } 304134ff6846SIoana Radulescu 3042edad8d26SIoana Ciocoi Radulescu /* Supported header fields for Rx hash distribution key */ 3043f76c483aSIoana Radulescu static const struct dpaa2_eth_dist_fields dist_fields[] = { 304434ff6846SIoana Radulescu { 3045edad8d26SIoana Ciocoi Radulescu /* L2 header */ 3046edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_L2DA, 3047edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_ETH, 3048edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_ETH_DA, 30493a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHDST, 3050edad8d26SIoana Ciocoi Radulescu .size = 6, 3051edad8d26SIoana Ciocoi Radulescu }, { 3052afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 3053afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_SA, 30543a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHSRC, 3055afb90dbbSIoana Radulescu .size = 6, 3056afb90dbbSIoana Radulescu }, { 3057afb90dbbSIoana Radulescu /* This is the last ethertype field parsed: 3058afb90dbbSIoana Radulescu * depending on frame format, it can be the MAC ethertype 3059afb90dbbSIoana Radulescu * or the VLAN etype. 3060afb90dbbSIoana Radulescu */ 3061afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 3062afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_TYPE, 30633a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHTYPE, 3064afb90dbbSIoana Radulescu .size = 2, 3065afb90dbbSIoana Radulescu }, { 3066edad8d26SIoana Ciocoi Radulescu /* VLAN header */ 3067edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_VLAN, 3068edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_VLAN, 3069edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_VLAN_TCI, 30703a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_VLAN, 3071edad8d26SIoana Ciocoi Radulescu .size = 2, 3072edad8d26SIoana Ciocoi Radulescu }, { 307334ff6846SIoana Radulescu /* IP header */ 307434ff6846SIoana Radulescu .rxnfc_field = RXH_IP_SRC, 307534ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 307634ff6846SIoana Radulescu .cls_field = NH_FLD_IP_SRC, 30773a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPSRC, 307834ff6846SIoana Radulescu .size = 4, 307934ff6846SIoana Radulescu }, { 308034ff6846SIoana Radulescu .rxnfc_field = RXH_IP_DST, 308134ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 308234ff6846SIoana Radulescu .cls_field = NH_FLD_IP_DST, 30833a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPDST, 308434ff6846SIoana Radulescu .size = 4, 308534ff6846SIoana Radulescu }, { 308634ff6846SIoana Radulescu .rxnfc_field = RXH_L3_PROTO, 308734ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 308834ff6846SIoana Radulescu .cls_field = NH_FLD_IP_PROTO, 30893a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPPROTO, 309034ff6846SIoana Radulescu .size = 1, 309134ff6846SIoana Radulescu }, { 309234ff6846SIoana Radulescu /* Using UDP ports, this is functionally equivalent to raw 309334ff6846SIoana Radulescu * byte pairs from L4 header. 309434ff6846SIoana Radulescu */ 309534ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_0_1, 309634ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 309734ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_SRC, 30983a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4SRC, 309934ff6846SIoana Radulescu .size = 2, 310034ff6846SIoana Radulescu }, { 310134ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_2_3, 310234ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 310334ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_DST, 31043a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4DST, 310534ff6846SIoana Radulescu .size = 2, 310634ff6846SIoana Radulescu }, 310734ff6846SIoana Radulescu }; 310834ff6846SIoana Radulescu 3109df85aeb9SIoana Radulescu /* Configure the Rx hash key using the legacy API */ 3110df85aeb9SIoana Radulescu static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3111df85aeb9SIoana Radulescu { 3112df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3113df85aeb9SIoana Radulescu struct dpni_rx_tc_dist_cfg dist_cfg; 3114685e39eaSIoana Radulescu int i, err = 0; 3115df85aeb9SIoana Radulescu 3116df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 3117df85aeb9SIoana Radulescu 3118df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 3119df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3120df85aeb9SIoana Radulescu dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 3121df85aeb9SIoana Radulescu 3122685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3123685e39eaSIoana Radulescu err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 3124685e39eaSIoana Radulescu i, &dist_cfg); 3125685e39eaSIoana Radulescu if (err) { 3126df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 3127685e39eaSIoana Radulescu break; 3128685e39eaSIoana Radulescu } 3129685e39eaSIoana Radulescu } 3130df85aeb9SIoana Radulescu 3131df85aeb9SIoana Radulescu return err; 3132df85aeb9SIoana Radulescu } 3133df85aeb9SIoana Radulescu 3134df85aeb9SIoana Radulescu /* Configure the Rx hash key using the new API */ 3135df85aeb9SIoana Radulescu static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3136df85aeb9SIoana Radulescu { 3137df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3138df85aeb9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 3139685e39eaSIoana Radulescu int i, err = 0; 3140df85aeb9SIoana Radulescu 3141df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 3142df85aeb9SIoana Radulescu 3143df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 3144df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3145df85aeb9SIoana Radulescu dist_cfg.enable = 1; 3146df85aeb9SIoana Radulescu 3147685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3148685e39eaSIoana Radulescu dist_cfg.tc = i; 3149685e39eaSIoana Radulescu err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, 3150685e39eaSIoana Radulescu &dist_cfg); 3151685e39eaSIoana Radulescu if (err) { 3152df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 3153685e39eaSIoana Radulescu break; 3154685e39eaSIoana Radulescu } 3155685e39eaSIoana Radulescu } 3156df85aeb9SIoana Radulescu 3157df85aeb9SIoana Radulescu return err; 3158df85aeb9SIoana Radulescu } 3159df85aeb9SIoana Radulescu 31604aaaf9b9SIoana Radulescu /* Configure the Rx flow classification key */ 31614aaaf9b9SIoana Radulescu static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 31624aaaf9b9SIoana Radulescu { 31634aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 31644aaaf9b9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 3165685e39eaSIoana Radulescu int i, err = 0; 31664aaaf9b9SIoana Radulescu 31674aaaf9b9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 31684aaaf9b9SIoana Radulescu 31694aaaf9b9SIoana Radulescu dist_cfg.key_cfg_iova = key; 31704aaaf9b9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 31714aaaf9b9SIoana Radulescu dist_cfg.enable = 1; 31724aaaf9b9SIoana Radulescu 3173685e39eaSIoana Radulescu for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3174685e39eaSIoana Radulescu dist_cfg.tc = i; 3175685e39eaSIoana Radulescu err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, 3176685e39eaSIoana Radulescu &dist_cfg); 3177685e39eaSIoana Radulescu if (err) { 31784aaaf9b9SIoana Radulescu dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 3179685e39eaSIoana Radulescu break; 3180685e39eaSIoana Radulescu } 3181685e39eaSIoana Radulescu } 31824aaaf9b9SIoana Radulescu 31834aaaf9b9SIoana Radulescu return err; 31844aaaf9b9SIoana Radulescu } 31854aaaf9b9SIoana Radulescu 3186afb90dbbSIoana Radulescu /* Size of the Rx flow classification key */ 31872d680237SIoana Ciocoi Radulescu int dpaa2_eth_cls_key_size(u64 fields) 3188afb90dbbSIoana Radulescu { 3189afb90dbbSIoana Radulescu int i, size = 0; 3190afb90dbbSIoana Radulescu 31912d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 31922d680237SIoana Ciocoi Radulescu if (!(fields & dist_fields[i].id)) 31932d680237SIoana Ciocoi Radulescu continue; 3194afb90dbbSIoana Radulescu size += dist_fields[i].size; 31952d680237SIoana Ciocoi Radulescu } 3196afb90dbbSIoana Radulescu 3197afb90dbbSIoana Radulescu return size; 3198afb90dbbSIoana Radulescu } 3199afb90dbbSIoana Radulescu 3200afb90dbbSIoana Radulescu /* Offset of header field in Rx classification key */ 3201afb90dbbSIoana Radulescu int dpaa2_eth_cls_fld_off(int prot, int field) 3202afb90dbbSIoana Radulescu { 3203afb90dbbSIoana Radulescu int i, off = 0; 3204afb90dbbSIoana Radulescu 3205afb90dbbSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3206afb90dbbSIoana Radulescu if (dist_fields[i].cls_prot == prot && 3207afb90dbbSIoana Radulescu dist_fields[i].cls_field == field) 3208afb90dbbSIoana Radulescu return off; 3209afb90dbbSIoana Radulescu off += dist_fields[i].size; 3210afb90dbbSIoana Radulescu } 3211afb90dbbSIoana Radulescu 3212afb90dbbSIoana Radulescu WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 3213afb90dbbSIoana Radulescu return 0; 3214afb90dbbSIoana Radulescu } 3215afb90dbbSIoana Radulescu 32162d680237SIoana Ciocoi Radulescu /* Prune unused fields from the classification rule. 32172d680237SIoana Ciocoi Radulescu * Used when masking is not supported 32182d680237SIoana Ciocoi Radulescu */ 32192d680237SIoana Ciocoi Radulescu void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) 32202d680237SIoana Ciocoi Radulescu { 32212d680237SIoana Ciocoi Radulescu int off = 0, new_off = 0; 32222d680237SIoana Ciocoi Radulescu int i, size; 32232d680237SIoana Ciocoi Radulescu 32242d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 32252d680237SIoana Ciocoi Radulescu size = dist_fields[i].size; 32262d680237SIoana Ciocoi Radulescu if (dist_fields[i].id & fields) { 32272d680237SIoana Ciocoi Radulescu memcpy(key_mem + new_off, key_mem + off, size); 32282d680237SIoana Ciocoi Radulescu new_off += size; 32292d680237SIoana Ciocoi Radulescu } 32302d680237SIoana Ciocoi Radulescu off += size; 32312d680237SIoana Ciocoi Radulescu } 32322d680237SIoana Ciocoi Radulescu } 32332d680237SIoana Ciocoi Radulescu 32344aaaf9b9SIoana Radulescu /* Set Rx distribution (hash or flow classification) key 323534ff6846SIoana Radulescu * flags is a combination of RXH_ bits 323634ff6846SIoana Radulescu */ 32373233c151SIoana Ciornei static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 32384aaaf9b9SIoana Radulescu enum dpaa2_eth_rx_dist type, u64 flags) 323934ff6846SIoana Radulescu { 324034ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 324134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 324234ff6846SIoana Radulescu struct dpkg_profile_cfg cls_cfg; 3243edad8d26SIoana Ciocoi Radulescu u32 rx_hash_fields = 0; 3244df85aeb9SIoana Radulescu dma_addr_t key_iova; 324534ff6846SIoana Radulescu u8 *dma_mem; 324634ff6846SIoana Radulescu int i; 324734ff6846SIoana Radulescu int err = 0; 324834ff6846SIoana Radulescu 324934ff6846SIoana Radulescu memset(&cls_cfg, 0, sizeof(cls_cfg)); 325034ff6846SIoana Radulescu 3251f76c483aSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 325234ff6846SIoana Radulescu struct dpkg_extract *key = 325334ff6846SIoana Radulescu &cls_cfg.extracts[cls_cfg.num_extracts]; 325434ff6846SIoana Radulescu 32552d680237SIoana Ciocoi Radulescu /* For both Rx hashing and classification keys 32562d680237SIoana Ciocoi Radulescu * we set only the selected fields. 32574aaaf9b9SIoana Radulescu */ 32583a1e6b84SIoana Ciocoi Radulescu if (!(flags & dist_fields[i].id)) 325934ff6846SIoana Radulescu continue; 32602d680237SIoana Ciocoi Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) 32614aaaf9b9SIoana Radulescu rx_hash_fields |= dist_fields[i].rxnfc_field; 326234ff6846SIoana Radulescu 326334ff6846SIoana Radulescu if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 326434ff6846SIoana Radulescu dev_err(dev, "error adding key extraction rule, too many rules?\n"); 326534ff6846SIoana Radulescu return -E2BIG; 326634ff6846SIoana Radulescu } 326734ff6846SIoana Radulescu 326834ff6846SIoana Radulescu key->type = DPKG_EXTRACT_FROM_HDR; 3269f76c483aSIoana Radulescu key->extract.from_hdr.prot = dist_fields[i].cls_prot; 327034ff6846SIoana Radulescu key->extract.from_hdr.type = DPKG_FULL_FIELD; 3271f76c483aSIoana Radulescu key->extract.from_hdr.field = dist_fields[i].cls_field; 327234ff6846SIoana Radulescu cls_cfg.num_extracts++; 327334ff6846SIoana Radulescu } 327434ff6846SIoana Radulescu 327534ff6846SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 327634ff6846SIoana Radulescu if (!dma_mem) 327734ff6846SIoana Radulescu return -ENOMEM; 327834ff6846SIoana Radulescu 327934ff6846SIoana Radulescu err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 328034ff6846SIoana Radulescu if (err) { 328134ff6846SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 3282df85aeb9SIoana Radulescu goto free_key; 328334ff6846SIoana Radulescu } 328434ff6846SIoana Radulescu 328534ff6846SIoana Radulescu /* Prepare for setting the rx dist */ 3286df85aeb9SIoana Radulescu key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 328734ff6846SIoana Radulescu DMA_TO_DEVICE); 3288df85aeb9SIoana Radulescu if (dma_mapping_error(dev, key_iova)) { 328934ff6846SIoana Radulescu dev_err(dev, "DMA mapping failed\n"); 329034ff6846SIoana Radulescu err = -ENOMEM; 3291df85aeb9SIoana Radulescu goto free_key; 329234ff6846SIoana Radulescu } 329334ff6846SIoana Radulescu 32944aaaf9b9SIoana Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) { 3295df85aeb9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) 3296df85aeb9SIoana Radulescu err = config_legacy_hash_key(priv, key_iova); 3297edad8d26SIoana Ciocoi Radulescu else 3298df85aeb9SIoana Radulescu err = config_hash_key(priv, key_iova); 32994aaaf9b9SIoana Radulescu } else { 33004aaaf9b9SIoana Radulescu err = config_cls_key(priv, key_iova); 33014aaaf9b9SIoana Radulescu } 3302df85aeb9SIoana Radulescu 3303df85aeb9SIoana Radulescu dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 3304df85aeb9SIoana Radulescu DMA_TO_DEVICE); 33054aaaf9b9SIoana Radulescu if (!err && type == DPAA2_ETH_RX_DIST_HASH) 3306edad8d26SIoana Ciocoi Radulescu priv->rx_hash_fields = rx_hash_fields; 330734ff6846SIoana Radulescu 3308df85aeb9SIoana Radulescu free_key: 330934ff6846SIoana Radulescu kfree(dma_mem); 331034ff6846SIoana Radulescu return err; 331134ff6846SIoana Radulescu } 331234ff6846SIoana Radulescu 33134aaaf9b9SIoana Radulescu int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 33144aaaf9b9SIoana Radulescu { 33154aaaf9b9SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 33163a1e6b84SIoana Ciocoi Radulescu u64 key = 0; 33173a1e6b84SIoana Ciocoi Radulescu int i; 33184aaaf9b9SIoana Radulescu 33194aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) 33204aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 33214aaaf9b9SIoana Radulescu 33223a1e6b84SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 33233a1e6b84SIoana Ciocoi Radulescu if (dist_fields[i].rxnfc_field & flags) 33243a1e6b84SIoana Ciocoi Radulescu key |= dist_fields[i].id; 33253a1e6b84SIoana Ciocoi Radulescu 33263a1e6b84SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); 33274aaaf9b9SIoana Radulescu } 33284aaaf9b9SIoana Radulescu 33292d680237SIoana Ciocoi Radulescu int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) 33302d680237SIoana Ciocoi Radulescu { 33312d680237SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); 33322d680237SIoana Ciocoi Radulescu } 33332d680237SIoana Ciocoi Radulescu 33342d680237SIoana Ciocoi Radulescu static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) 33354aaaf9b9SIoana Radulescu { 33364aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 3337df8e249bSIoana Ciocoi Radulescu int err; 33384aaaf9b9SIoana Radulescu 33394aaaf9b9SIoana Radulescu /* Check if we actually support Rx flow classification */ 33404aaaf9b9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) { 33414aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls not supported by current MC version\n"); 33424aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 33434aaaf9b9SIoana Radulescu } 33444aaaf9b9SIoana Radulescu 33452d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_enabled(priv)) { 33464aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 33474aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 33484aaaf9b9SIoana Radulescu } 33494aaaf9b9SIoana Radulescu 33504aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) { 33514aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 33524aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 33534aaaf9b9SIoana Radulescu } 33544aaaf9b9SIoana Radulescu 33552d680237SIoana Ciocoi Radulescu /* If there is no support for masking in the classification table, 33562d680237SIoana Ciocoi Radulescu * we don't set a default key, as it will depend on the rules 33572d680237SIoana Ciocoi Radulescu * added by the user at runtime. 33582d680237SIoana Ciocoi Radulescu */ 33592d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_mask_enabled(priv)) 33602d680237SIoana Ciocoi Radulescu goto out; 33612d680237SIoana Ciocoi Radulescu 33622d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); 3363df8e249bSIoana Ciocoi Radulescu if (err) 3364df8e249bSIoana Ciocoi Radulescu return err; 3365df8e249bSIoana Ciocoi Radulescu 33662d680237SIoana Ciocoi Radulescu out: 33674aaaf9b9SIoana Radulescu priv->rx_cls_enabled = 1; 33684aaaf9b9SIoana Radulescu 3369df8e249bSIoana Ciocoi Radulescu return 0; 33704aaaf9b9SIoana Radulescu } 33714aaaf9b9SIoana Radulescu 337234ff6846SIoana Radulescu /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 337334ff6846SIoana Radulescu * frame queues and channels 337434ff6846SIoana Radulescu */ 337534ff6846SIoana Radulescu static int bind_dpni(struct dpaa2_eth_priv *priv) 337634ff6846SIoana Radulescu { 337734ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 337834ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 337934ff6846SIoana Radulescu struct dpni_pools_cfg pools_params; 338034ff6846SIoana Radulescu struct dpni_error_cfg err_cfg; 338134ff6846SIoana Radulescu int err = 0; 338234ff6846SIoana Radulescu int i; 338334ff6846SIoana Radulescu 338434ff6846SIoana Radulescu pools_params.num_dpbp = 1; 338534ff6846SIoana Radulescu pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 338634ff6846SIoana Radulescu pools_params.pools[0].backup_pool = 0; 3387efa6a7d0SIoana Ciornei pools_params.pools[0].buffer_size = priv->rx_buf_size; 338834ff6846SIoana Radulescu err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 338934ff6846SIoana Radulescu if (err) { 339034ff6846SIoana Radulescu dev_err(dev, "dpni_set_pools() failed\n"); 339134ff6846SIoana Radulescu return err; 339234ff6846SIoana Radulescu } 339334ff6846SIoana Radulescu 339434ff6846SIoana Radulescu /* have the interface implicitly distribute traffic based on 339534ff6846SIoana Radulescu * the default hash key 339634ff6846SIoana Radulescu */ 339734ff6846SIoana Radulescu err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 3398edad8d26SIoana Ciocoi Radulescu if (err && err != -EOPNOTSUPP) 339934ff6846SIoana Radulescu dev_err(dev, "Failed to configure hashing\n"); 340034ff6846SIoana Radulescu 34014aaaf9b9SIoana Radulescu /* Configure the flow classification key; it includes all 34024aaaf9b9SIoana Radulescu * supported header fields and cannot be modified at runtime 34034aaaf9b9SIoana Radulescu */ 34042d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_default_cls(priv); 34054aaaf9b9SIoana Radulescu if (err && err != -EOPNOTSUPP) 34064aaaf9b9SIoana Radulescu dev_err(dev, "Failed to configure Rx classification key\n"); 34074aaaf9b9SIoana Radulescu 340834ff6846SIoana Radulescu /* Configure handling of error frames */ 340934ff6846SIoana Radulescu err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 341034ff6846SIoana Radulescu err_cfg.set_frame_annotation = 1; 341134ff6846SIoana Radulescu err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 341234ff6846SIoana Radulescu err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 341334ff6846SIoana Radulescu &err_cfg); 341434ff6846SIoana Radulescu if (err) { 341534ff6846SIoana Radulescu dev_err(dev, "dpni_set_errors_behavior failed\n"); 341634ff6846SIoana Radulescu return err; 341734ff6846SIoana Radulescu } 341834ff6846SIoana Radulescu 341934ff6846SIoana Radulescu /* Configure Rx and Tx conf queues to generate CDANs */ 342034ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 342134ff6846SIoana Radulescu switch (priv->fq[i].type) { 342234ff6846SIoana Radulescu case DPAA2_RX_FQ: 342334ff6846SIoana Radulescu err = setup_rx_flow(priv, &priv->fq[i]); 342434ff6846SIoana Radulescu break; 342534ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 342634ff6846SIoana Radulescu err = setup_tx_flow(priv, &priv->fq[i]); 342734ff6846SIoana Radulescu break; 342834ff6846SIoana Radulescu default: 342934ff6846SIoana Radulescu dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 343034ff6846SIoana Radulescu return -EINVAL; 343134ff6846SIoana Radulescu } 343234ff6846SIoana Radulescu if (err) 343334ff6846SIoana Radulescu return err; 343434ff6846SIoana Radulescu } 343534ff6846SIoana Radulescu 343634ff6846SIoana Radulescu err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 343734ff6846SIoana Radulescu DPNI_QUEUE_TX, &priv->tx_qdid); 343834ff6846SIoana Radulescu if (err) { 343934ff6846SIoana Radulescu dev_err(dev, "dpni_get_qdid() failed\n"); 344034ff6846SIoana Radulescu return err; 344134ff6846SIoana Radulescu } 344234ff6846SIoana Radulescu 344334ff6846SIoana Radulescu return 0; 344434ff6846SIoana Radulescu } 344534ff6846SIoana Radulescu 344634ff6846SIoana Radulescu /* Allocate rings for storing incoming frame descriptors */ 344734ff6846SIoana Radulescu static int alloc_rings(struct dpaa2_eth_priv *priv) 344834ff6846SIoana Radulescu { 344934ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 345034ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 345134ff6846SIoana Radulescu int i; 345234ff6846SIoana Radulescu 345334ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 345434ff6846SIoana Radulescu priv->channel[i]->store = 345534ff6846SIoana Radulescu dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 345634ff6846SIoana Radulescu if (!priv->channel[i]->store) { 345734ff6846SIoana Radulescu netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 345834ff6846SIoana Radulescu goto err_ring; 345934ff6846SIoana Radulescu } 346034ff6846SIoana Radulescu } 346134ff6846SIoana Radulescu 346234ff6846SIoana Radulescu return 0; 346334ff6846SIoana Radulescu 346434ff6846SIoana Radulescu err_ring: 346534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 346634ff6846SIoana Radulescu if (!priv->channel[i]->store) 346734ff6846SIoana Radulescu break; 346834ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 346934ff6846SIoana Radulescu } 347034ff6846SIoana Radulescu 347134ff6846SIoana Radulescu return -ENOMEM; 347234ff6846SIoana Radulescu } 347334ff6846SIoana Radulescu 347434ff6846SIoana Radulescu static void free_rings(struct dpaa2_eth_priv *priv) 347534ff6846SIoana Radulescu { 347634ff6846SIoana Radulescu int i; 347734ff6846SIoana Radulescu 347834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 347934ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 348034ff6846SIoana Radulescu } 348134ff6846SIoana Radulescu 348234ff6846SIoana Radulescu static int set_mac_addr(struct dpaa2_eth_priv *priv) 348334ff6846SIoana Radulescu { 348434ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 348534ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 348634ff6846SIoana Radulescu u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 348734ff6846SIoana Radulescu int err; 348834ff6846SIoana Radulescu 348934ff6846SIoana Radulescu /* Get firmware address, if any */ 349034ff6846SIoana Radulescu err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 349134ff6846SIoana Radulescu if (err) { 349234ff6846SIoana Radulescu dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 349334ff6846SIoana Radulescu return err; 349434ff6846SIoana Radulescu } 349534ff6846SIoana Radulescu 349634ff6846SIoana Radulescu /* Get DPNI attributes address, if any */ 349734ff6846SIoana Radulescu err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 349834ff6846SIoana Radulescu dpni_mac_addr); 349934ff6846SIoana Radulescu if (err) { 350034ff6846SIoana Radulescu dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 350134ff6846SIoana Radulescu return err; 350234ff6846SIoana Radulescu } 350334ff6846SIoana Radulescu 350434ff6846SIoana Radulescu /* First check if firmware has any address configured by bootloader */ 350534ff6846SIoana Radulescu if (!is_zero_ether_addr(mac_addr)) { 350634ff6846SIoana Radulescu /* If the DPMAC addr != DPNI addr, update it */ 350734ff6846SIoana Radulescu if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 350834ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, 350934ff6846SIoana Radulescu priv->mc_token, 351034ff6846SIoana Radulescu mac_addr); 351134ff6846SIoana Radulescu if (err) { 351234ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 351334ff6846SIoana Radulescu return err; 351434ff6846SIoana Radulescu } 351534ff6846SIoana Radulescu } 351634ff6846SIoana Radulescu memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 351734ff6846SIoana Radulescu } else if (is_zero_ether_addr(dpni_mac_addr)) { 351834ff6846SIoana Radulescu /* No MAC address configured, fill in net_dev->dev_addr 351934ff6846SIoana Radulescu * with a random one 352034ff6846SIoana Radulescu */ 352134ff6846SIoana Radulescu eth_hw_addr_random(net_dev); 352234ff6846SIoana Radulescu dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 352334ff6846SIoana Radulescu 352434ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 352534ff6846SIoana Radulescu net_dev->dev_addr); 352634ff6846SIoana Radulescu if (err) { 352734ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 352834ff6846SIoana Radulescu return err; 352934ff6846SIoana Radulescu } 353034ff6846SIoana Radulescu 353134ff6846SIoana Radulescu /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 353234ff6846SIoana Radulescu * practical purposes, this will be our "permanent" mac address, 353334ff6846SIoana Radulescu * at least until the next reboot. This move will also permit 353434ff6846SIoana Radulescu * register_netdevice() to properly fill up net_dev->perm_addr. 353534ff6846SIoana Radulescu */ 353634ff6846SIoana Radulescu net_dev->addr_assign_type = NET_ADDR_PERM; 353734ff6846SIoana Radulescu } else { 353834ff6846SIoana Radulescu /* NET_ADDR_PERM is default, all we have to do is 353934ff6846SIoana Radulescu * fill in the device addr. 354034ff6846SIoana Radulescu */ 354134ff6846SIoana Radulescu memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 354234ff6846SIoana Radulescu } 354334ff6846SIoana Radulescu 354434ff6846SIoana Radulescu return 0; 354534ff6846SIoana Radulescu } 354634ff6846SIoana Radulescu 354734ff6846SIoana Radulescu static int netdev_init(struct net_device *net_dev) 354834ff6846SIoana Radulescu { 354934ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 355034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 355134ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 355234ff6846SIoana Radulescu u64 supported = 0, not_supported = 0; 355334ff6846SIoana Radulescu u8 bcast_addr[ETH_ALEN]; 355434ff6846SIoana Radulescu u8 num_queues; 355534ff6846SIoana Radulescu int err; 355634ff6846SIoana Radulescu 355734ff6846SIoana Radulescu net_dev->netdev_ops = &dpaa2_eth_ops; 355834ff6846SIoana Radulescu net_dev->ethtool_ops = &dpaa2_ethtool_ops; 355934ff6846SIoana Radulescu 356034ff6846SIoana Radulescu err = set_mac_addr(priv); 356134ff6846SIoana Radulescu if (err) 356234ff6846SIoana Radulescu return err; 356334ff6846SIoana Radulescu 356434ff6846SIoana Radulescu /* Explicitly add the broadcast address to the MAC filtering table */ 356534ff6846SIoana Radulescu eth_broadcast_addr(bcast_addr); 356634ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 356734ff6846SIoana Radulescu if (err) { 356834ff6846SIoana Radulescu dev_err(dev, "dpni_add_mac_addr() failed\n"); 356934ff6846SIoana Radulescu return err; 357034ff6846SIoana Radulescu } 357134ff6846SIoana Radulescu 357234ff6846SIoana Radulescu /* Set MTU upper limit; lower limit is 68B (default value) */ 357334ff6846SIoana Radulescu net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 357434ff6846SIoana Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 357534ff6846SIoana Radulescu DPAA2_ETH_MFL); 357634ff6846SIoana Radulescu if (err) { 357734ff6846SIoana Radulescu dev_err(dev, "dpni_set_max_frame_length() failed\n"); 357834ff6846SIoana Radulescu return err; 357934ff6846SIoana Radulescu } 358034ff6846SIoana Radulescu 358134ff6846SIoana Radulescu /* Set actual number of queues in the net device */ 358234ff6846SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 358334ff6846SIoana Radulescu err = netif_set_real_num_tx_queues(net_dev, num_queues); 358434ff6846SIoana Radulescu if (err) { 358534ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 358634ff6846SIoana Radulescu return err; 358734ff6846SIoana Radulescu } 358834ff6846SIoana Radulescu err = netif_set_real_num_rx_queues(net_dev, num_queues); 358934ff6846SIoana Radulescu if (err) { 359034ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 359134ff6846SIoana Radulescu return err; 359234ff6846SIoana Radulescu } 359334ff6846SIoana Radulescu 359434ff6846SIoana Radulescu /* Capabilities listing */ 359534ff6846SIoana Radulescu supported |= IFF_LIVE_ADDR_CHANGE; 359634ff6846SIoana Radulescu 359734ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER) 359834ff6846SIoana Radulescu not_supported |= IFF_UNICAST_FLT; 359934ff6846SIoana Radulescu else 360034ff6846SIoana Radulescu supported |= IFF_UNICAST_FLT; 360134ff6846SIoana Radulescu 360234ff6846SIoana Radulescu net_dev->priv_flags |= supported; 360334ff6846SIoana Radulescu net_dev->priv_flags &= ~not_supported; 360434ff6846SIoana Radulescu 360534ff6846SIoana Radulescu /* Features */ 360634ff6846SIoana Radulescu net_dev->features = NETIF_F_RXCSUM | 360734ff6846SIoana Radulescu NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 360834ff6846SIoana Radulescu NETIF_F_SG | NETIF_F_HIGHDMA | 360934ff6846SIoana Radulescu NETIF_F_LLTX; 361034ff6846SIoana Radulescu net_dev->hw_features = net_dev->features; 361134ff6846SIoana Radulescu 361234ff6846SIoana Radulescu return 0; 361334ff6846SIoana Radulescu } 361434ff6846SIoana Radulescu 361534ff6846SIoana Radulescu static int poll_link_state(void *arg) 361634ff6846SIoana Radulescu { 361734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 361834ff6846SIoana Radulescu int err; 361934ff6846SIoana Radulescu 362034ff6846SIoana Radulescu while (!kthread_should_stop()) { 362134ff6846SIoana Radulescu err = link_state_update(priv); 362234ff6846SIoana Radulescu if (unlikely(err)) 362334ff6846SIoana Radulescu return err; 362434ff6846SIoana Radulescu 362534ff6846SIoana Radulescu msleep(DPAA2_ETH_LINK_STATE_REFRESH); 362634ff6846SIoana Radulescu } 362734ff6846SIoana Radulescu 362834ff6846SIoana Radulescu return 0; 362934ff6846SIoana Radulescu } 363034ff6846SIoana Radulescu 363171947923SIoana Ciornei static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) 363271947923SIoana Ciornei { 363371947923SIoana Ciornei struct fsl_mc_device *dpni_dev, *dpmac_dev; 363471947923SIoana Ciornei struct dpaa2_mac *mac; 363571947923SIoana Ciornei int err; 363671947923SIoana Ciornei 363771947923SIoana Ciornei dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); 363871947923SIoana Ciornei dpmac_dev = fsl_mc_get_endpoint(dpni_dev); 363971947923SIoana Ciornei if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 364071947923SIoana Ciornei return 0; 364171947923SIoana Ciornei 364271947923SIoana Ciornei if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) 364371947923SIoana Ciornei return 0; 364471947923SIoana Ciornei 364571947923SIoana Ciornei mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); 364671947923SIoana Ciornei if (!mac) 364771947923SIoana Ciornei return -ENOMEM; 364871947923SIoana Ciornei 364971947923SIoana Ciornei mac->mc_dev = dpmac_dev; 365071947923SIoana Ciornei mac->mc_io = priv->mc_io; 365171947923SIoana Ciornei mac->net_dev = priv->net_dev; 365271947923SIoana Ciornei 365371947923SIoana Ciornei err = dpaa2_mac_connect(mac); 365471947923SIoana Ciornei if (err) { 365571947923SIoana Ciornei netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n"); 365671947923SIoana Ciornei kfree(mac); 365771947923SIoana Ciornei return err; 365871947923SIoana Ciornei } 365971947923SIoana Ciornei priv->mac = mac; 366071947923SIoana Ciornei 366171947923SIoana Ciornei return 0; 366271947923SIoana Ciornei } 366371947923SIoana Ciornei 366471947923SIoana Ciornei static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) 366571947923SIoana Ciornei { 366671947923SIoana Ciornei if (!priv->mac) 366771947923SIoana Ciornei return; 366871947923SIoana Ciornei 366971947923SIoana Ciornei dpaa2_mac_disconnect(priv->mac); 367071947923SIoana Ciornei kfree(priv->mac); 367171947923SIoana Ciornei priv->mac = NULL; 367271947923SIoana Ciornei } 367371947923SIoana Ciornei 367434ff6846SIoana Radulescu static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 367534ff6846SIoana Radulescu { 367634ff6846SIoana Radulescu u32 status = ~0; 367734ff6846SIoana Radulescu struct device *dev = (struct device *)arg; 367834ff6846SIoana Radulescu struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 367934ff6846SIoana Radulescu struct net_device *net_dev = dev_get_drvdata(dev); 368071947923SIoana Ciornei struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 368134ff6846SIoana Radulescu int err; 368234ff6846SIoana Radulescu 368334ff6846SIoana Radulescu err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 368434ff6846SIoana Radulescu DPNI_IRQ_INDEX, &status); 368534ff6846SIoana Radulescu if (unlikely(err)) { 368634ff6846SIoana Radulescu netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 368734ff6846SIoana Radulescu return IRQ_HANDLED; 368834ff6846SIoana Radulescu } 368934ff6846SIoana Radulescu 369034ff6846SIoana Radulescu if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 369134ff6846SIoana Radulescu link_state_update(netdev_priv(net_dev)); 369234ff6846SIoana Radulescu 3693f5c3fffaSIoana Ciornei if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { 36948398b375SFlorin Chiculita set_mac_addr(netdev_priv(net_dev)); 3695f5c3fffaSIoana Ciornei update_tx_fqids(priv); 369671947923SIoana Ciornei 369771947923SIoana Ciornei rtnl_lock(); 369871947923SIoana Ciornei if (priv->mac) 369971947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 370071947923SIoana Ciornei else 370171947923SIoana Ciornei dpaa2_eth_connect_mac(priv); 370271947923SIoana Ciornei rtnl_unlock(); 3703f5c3fffaSIoana Ciornei } 37048398b375SFlorin Chiculita 370534ff6846SIoana Radulescu return IRQ_HANDLED; 370634ff6846SIoana Radulescu } 370734ff6846SIoana Radulescu 370834ff6846SIoana Radulescu static int setup_irqs(struct fsl_mc_device *ls_dev) 370934ff6846SIoana Radulescu { 371034ff6846SIoana Radulescu int err = 0; 371134ff6846SIoana Radulescu struct fsl_mc_device_irq *irq; 371234ff6846SIoana Radulescu 371334ff6846SIoana Radulescu err = fsl_mc_allocate_irqs(ls_dev); 371434ff6846SIoana Radulescu if (err) { 371534ff6846SIoana Radulescu dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 371634ff6846SIoana Radulescu return err; 371734ff6846SIoana Radulescu } 371834ff6846SIoana Radulescu 371934ff6846SIoana Radulescu irq = ls_dev->irqs[0]; 372034ff6846SIoana Radulescu err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 372134ff6846SIoana Radulescu NULL, dpni_irq0_handler_thread, 372234ff6846SIoana Radulescu IRQF_NO_SUSPEND | IRQF_ONESHOT, 372334ff6846SIoana Radulescu dev_name(&ls_dev->dev), &ls_dev->dev); 372434ff6846SIoana Radulescu if (err < 0) { 372534ff6846SIoana Radulescu dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 372634ff6846SIoana Radulescu goto free_mc_irq; 372734ff6846SIoana Radulescu } 372834ff6846SIoana Radulescu 372934ff6846SIoana Radulescu err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 37308398b375SFlorin Chiculita DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | 37318398b375SFlorin Chiculita DPNI_IRQ_EVENT_ENDPOINT_CHANGED); 373234ff6846SIoana Radulescu if (err < 0) { 373334ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 373434ff6846SIoana Radulescu goto free_irq; 373534ff6846SIoana Radulescu } 373634ff6846SIoana Radulescu 373734ff6846SIoana Radulescu err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 373834ff6846SIoana Radulescu DPNI_IRQ_INDEX, 1); 373934ff6846SIoana Radulescu if (err < 0) { 374034ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 374134ff6846SIoana Radulescu goto free_irq; 374234ff6846SIoana Radulescu } 374334ff6846SIoana Radulescu 374434ff6846SIoana Radulescu return 0; 374534ff6846SIoana Radulescu 374634ff6846SIoana Radulescu free_irq: 374734ff6846SIoana Radulescu devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 374834ff6846SIoana Radulescu free_mc_irq: 374934ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 375034ff6846SIoana Radulescu 375134ff6846SIoana Radulescu return err; 375234ff6846SIoana Radulescu } 375334ff6846SIoana Radulescu 375434ff6846SIoana Radulescu static void add_ch_napi(struct dpaa2_eth_priv *priv) 375534ff6846SIoana Radulescu { 375634ff6846SIoana Radulescu int i; 375734ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 375834ff6846SIoana Radulescu 375934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 376034ff6846SIoana Radulescu ch = priv->channel[i]; 376134ff6846SIoana Radulescu /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 376234ff6846SIoana Radulescu netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 376334ff6846SIoana Radulescu NAPI_POLL_WEIGHT); 376434ff6846SIoana Radulescu } 376534ff6846SIoana Radulescu } 376634ff6846SIoana Radulescu 376734ff6846SIoana Radulescu static void del_ch_napi(struct dpaa2_eth_priv *priv) 376834ff6846SIoana Radulescu { 376934ff6846SIoana Radulescu int i; 377034ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 377134ff6846SIoana Radulescu 377234ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 377334ff6846SIoana Radulescu ch = priv->channel[i]; 377434ff6846SIoana Radulescu netif_napi_del(&ch->napi); 377534ff6846SIoana Radulescu } 377634ff6846SIoana Radulescu } 377734ff6846SIoana Radulescu 377834ff6846SIoana Radulescu static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 377934ff6846SIoana Radulescu { 378034ff6846SIoana Radulescu struct device *dev; 378134ff6846SIoana Radulescu struct net_device *net_dev = NULL; 378234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = NULL; 378334ff6846SIoana Radulescu int err = 0; 378434ff6846SIoana Radulescu 378534ff6846SIoana Radulescu dev = &dpni_dev->dev; 378634ff6846SIoana Radulescu 378734ff6846SIoana Radulescu /* Net device */ 3788ab1e6de2SIoana Radulescu net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); 378934ff6846SIoana Radulescu if (!net_dev) { 379034ff6846SIoana Radulescu dev_err(dev, "alloc_etherdev_mq() failed\n"); 379134ff6846SIoana Radulescu return -ENOMEM; 379234ff6846SIoana Radulescu } 379334ff6846SIoana Radulescu 379434ff6846SIoana Radulescu SET_NETDEV_DEV(net_dev, dev); 379534ff6846SIoana Radulescu dev_set_drvdata(dev, net_dev); 379634ff6846SIoana Radulescu 379734ff6846SIoana Radulescu priv = netdev_priv(net_dev); 379834ff6846SIoana Radulescu priv->net_dev = net_dev; 379934ff6846SIoana Radulescu 380034ff6846SIoana Radulescu priv->iommu_domain = iommu_get_domain_for_dev(dev); 380134ff6846SIoana Radulescu 380234ff6846SIoana Radulescu /* Obtain a MC portal */ 380334ff6846SIoana Radulescu err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 380434ff6846SIoana Radulescu &priv->mc_io); 380534ff6846SIoana Radulescu if (err) { 380634ff6846SIoana Radulescu if (err == -ENXIO) 380734ff6846SIoana Radulescu err = -EPROBE_DEFER; 380834ff6846SIoana Radulescu else 380934ff6846SIoana Radulescu dev_err(dev, "MC portal allocation failed\n"); 381034ff6846SIoana Radulescu goto err_portal_alloc; 381134ff6846SIoana Radulescu } 381234ff6846SIoana Radulescu 381334ff6846SIoana Radulescu /* MC objects initialization and configuration */ 381434ff6846SIoana Radulescu err = setup_dpni(dpni_dev); 381534ff6846SIoana Radulescu if (err) 381634ff6846SIoana Radulescu goto err_dpni_setup; 381734ff6846SIoana Radulescu 381834ff6846SIoana Radulescu err = setup_dpio(priv); 381934ff6846SIoana Radulescu if (err) 382034ff6846SIoana Radulescu goto err_dpio_setup; 382134ff6846SIoana Radulescu 382234ff6846SIoana Radulescu setup_fqs(priv); 382334ff6846SIoana Radulescu 382434ff6846SIoana Radulescu err = setup_dpbp(priv); 382534ff6846SIoana Radulescu if (err) 382634ff6846SIoana Radulescu goto err_dpbp_setup; 382734ff6846SIoana Radulescu 382834ff6846SIoana Radulescu err = bind_dpni(priv); 382934ff6846SIoana Radulescu if (err) 383034ff6846SIoana Radulescu goto err_bind; 383134ff6846SIoana Radulescu 383234ff6846SIoana Radulescu /* Add a NAPI context for each channel */ 383334ff6846SIoana Radulescu add_ch_napi(priv); 383434ff6846SIoana Radulescu 383534ff6846SIoana Radulescu /* Percpu statistics */ 383634ff6846SIoana Radulescu priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 383734ff6846SIoana Radulescu if (!priv->percpu_stats) { 383834ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 383934ff6846SIoana Radulescu err = -ENOMEM; 384034ff6846SIoana Radulescu goto err_alloc_percpu_stats; 384134ff6846SIoana Radulescu } 384234ff6846SIoana Radulescu priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 384334ff6846SIoana Radulescu if (!priv->percpu_extras) { 384434ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 384534ff6846SIoana Radulescu err = -ENOMEM; 384634ff6846SIoana Radulescu goto err_alloc_percpu_extras; 384734ff6846SIoana Radulescu } 384834ff6846SIoana Radulescu 384934ff6846SIoana Radulescu err = netdev_init(net_dev); 385034ff6846SIoana Radulescu if (err) 385134ff6846SIoana Radulescu goto err_netdev_init; 385234ff6846SIoana Radulescu 385334ff6846SIoana Radulescu /* Configure checksum offload based on current interface flags */ 385434ff6846SIoana Radulescu err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 385534ff6846SIoana Radulescu if (err) 385634ff6846SIoana Radulescu goto err_csum; 385734ff6846SIoana Radulescu 385834ff6846SIoana Radulescu err = set_tx_csum(priv, !!(net_dev->features & 385934ff6846SIoana Radulescu (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 386034ff6846SIoana Radulescu if (err) 386134ff6846SIoana Radulescu goto err_csum; 386234ff6846SIoana Radulescu 386334ff6846SIoana Radulescu err = alloc_rings(priv); 386434ff6846SIoana Radulescu if (err) 386534ff6846SIoana Radulescu goto err_alloc_rings; 386634ff6846SIoana Radulescu 3867f395b69fSIoana Ciornei #ifdef CONFIG_FSL_DPAA2_ETH_DCB 3868f395b69fSIoana Ciornei if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { 3869f395b69fSIoana Ciornei priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; 3870f395b69fSIoana Ciornei net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; 3871f395b69fSIoana Ciornei } else { 3872f395b69fSIoana Ciornei dev_dbg(dev, "PFC not supported\n"); 3873f395b69fSIoana Ciornei } 3874f395b69fSIoana Ciornei #endif 3875f395b69fSIoana Ciornei 387634ff6846SIoana Radulescu err = setup_irqs(dpni_dev); 387734ff6846SIoana Radulescu if (err) { 387834ff6846SIoana Radulescu netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 387934ff6846SIoana Radulescu priv->poll_thread = kthread_run(poll_link_state, priv, 388034ff6846SIoana Radulescu "%s_poll_link", net_dev->name); 388134ff6846SIoana Radulescu if (IS_ERR(priv->poll_thread)) { 388234ff6846SIoana Radulescu dev_err(dev, "Error starting polling thread\n"); 388334ff6846SIoana Radulescu goto err_poll_thread; 388434ff6846SIoana Radulescu } 388534ff6846SIoana Radulescu priv->do_link_poll = true; 388634ff6846SIoana Radulescu } 388734ff6846SIoana Radulescu 388871947923SIoana Ciornei err = dpaa2_eth_connect_mac(priv); 388971947923SIoana Ciornei if (err) 389071947923SIoana Ciornei goto err_connect_mac; 389171947923SIoana Ciornei 389234ff6846SIoana Radulescu err = register_netdev(net_dev); 389334ff6846SIoana Radulescu if (err < 0) { 389434ff6846SIoana Radulescu dev_err(dev, "register_netdev() failed\n"); 389534ff6846SIoana Radulescu goto err_netdev_reg; 389634ff6846SIoana Radulescu } 389734ff6846SIoana Radulescu 3898091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 3899091a19eaSIoana Radulescu dpaa2_dbg_add(priv); 3900091a19eaSIoana Radulescu #endif 3901091a19eaSIoana Radulescu 390234ff6846SIoana Radulescu dev_info(dev, "Probed interface %s\n", net_dev->name); 390334ff6846SIoana Radulescu return 0; 390434ff6846SIoana Radulescu 390534ff6846SIoana Radulescu err_netdev_reg: 390671947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 390771947923SIoana Ciornei err_connect_mac: 390834ff6846SIoana Radulescu if (priv->do_link_poll) 390934ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 391034ff6846SIoana Radulescu else 391134ff6846SIoana Radulescu fsl_mc_free_irqs(dpni_dev); 391234ff6846SIoana Radulescu err_poll_thread: 391334ff6846SIoana Radulescu free_rings(priv); 391434ff6846SIoana Radulescu err_alloc_rings: 391534ff6846SIoana Radulescu err_csum: 391634ff6846SIoana Radulescu err_netdev_init: 391734ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 391834ff6846SIoana Radulescu err_alloc_percpu_extras: 391934ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 392034ff6846SIoana Radulescu err_alloc_percpu_stats: 392134ff6846SIoana Radulescu del_ch_napi(priv); 392234ff6846SIoana Radulescu err_bind: 392334ff6846SIoana Radulescu free_dpbp(priv); 392434ff6846SIoana Radulescu err_dpbp_setup: 392534ff6846SIoana Radulescu free_dpio(priv); 392634ff6846SIoana Radulescu err_dpio_setup: 392734ff6846SIoana Radulescu free_dpni(priv); 392834ff6846SIoana Radulescu err_dpni_setup: 392934ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 393034ff6846SIoana Radulescu err_portal_alloc: 393134ff6846SIoana Radulescu dev_set_drvdata(dev, NULL); 393234ff6846SIoana Radulescu free_netdev(net_dev); 393334ff6846SIoana Radulescu 393434ff6846SIoana Radulescu return err; 393534ff6846SIoana Radulescu } 393634ff6846SIoana Radulescu 393734ff6846SIoana Radulescu static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 393834ff6846SIoana Radulescu { 393934ff6846SIoana Radulescu struct device *dev; 394034ff6846SIoana Radulescu struct net_device *net_dev; 394134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 394234ff6846SIoana Radulescu 394334ff6846SIoana Radulescu dev = &ls_dev->dev; 394434ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 394534ff6846SIoana Radulescu priv = netdev_priv(net_dev); 394634ff6846SIoana Radulescu 3947091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 3948091a19eaSIoana Radulescu dpaa2_dbg_remove(priv); 3949091a19eaSIoana Radulescu #endif 395071947923SIoana Ciornei rtnl_lock(); 395171947923SIoana Ciornei dpaa2_eth_disconnect_mac(priv); 395271947923SIoana Ciornei rtnl_unlock(); 395371947923SIoana Ciornei 395434ff6846SIoana Radulescu unregister_netdev(net_dev); 395534ff6846SIoana Radulescu 395634ff6846SIoana Radulescu if (priv->do_link_poll) 395734ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 395834ff6846SIoana Radulescu else 395934ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 396034ff6846SIoana Radulescu 396134ff6846SIoana Radulescu free_rings(priv); 396234ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 396334ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 396434ff6846SIoana Radulescu 396534ff6846SIoana Radulescu del_ch_napi(priv); 396634ff6846SIoana Radulescu free_dpbp(priv); 396734ff6846SIoana Radulescu free_dpio(priv); 396834ff6846SIoana Radulescu free_dpni(priv); 396934ff6846SIoana Radulescu 397034ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 397134ff6846SIoana Radulescu 397234ff6846SIoana Radulescu free_netdev(net_dev); 397334ff6846SIoana Radulescu 397434ff6846SIoana Radulescu dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 397534ff6846SIoana Radulescu 397634ff6846SIoana Radulescu return 0; 397734ff6846SIoana Radulescu } 397834ff6846SIoana Radulescu 397934ff6846SIoana Radulescu static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 398034ff6846SIoana Radulescu { 398134ff6846SIoana Radulescu .vendor = FSL_MC_VENDOR_FREESCALE, 398234ff6846SIoana Radulescu .obj_type = "dpni", 398334ff6846SIoana Radulescu }, 398434ff6846SIoana Radulescu { .vendor = 0x0 } 398534ff6846SIoana Radulescu }; 398634ff6846SIoana Radulescu MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 398734ff6846SIoana Radulescu 398834ff6846SIoana Radulescu static struct fsl_mc_driver dpaa2_eth_driver = { 398934ff6846SIoana Radulescu .driver = { 399034ff6846SIoana Radulescu .name = KBUILD_MODNAME, 399134ff6846SIoana Radulescu .owner = THIS_MODULE, 399234ff6846SIoana Radulescu }, 399334ff6846SIoana Radulescu .probe = dpaa2_eth_probe, 399434ff6846SIoana Radulescu .remove = dpaa2_eth_remove, 399534ff6846SIoana Radulescu .match_id_table = dpaa2_eth_match_id_table 399634ff6846SIoana Radulescu }; 399734ff6846SIoana Radulescu 3998091a19eaSIoana Radulescu static int __init dpaa2_eth_driver_init(void) 3999091a19eaSIoana Radulescu { 4000091a19eaSIoana Radulescu int err; 4001091a19eaSIoana Radulescu 4002091a19eaSIoana Radulescu dpaa2_eth_dbg_init(); 4003091a19eaSIoana Radulescu err = fsl_mc_driver_register(&dpaa2_eth_driver); 4004091a19eaSIoana Radulescu if (err) { 4005091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 4006091a19eaSIoana Radulescu return err; 4007091a19eaSIoana Radulescu } 4008091a19eaSIoana Radulescu 4009091a19eaSIoana Radulescu return 0; 4010091a19eaSIoana Radulescu } 4011091a19eaSIoana Radulescu 4012091a19eaSIoana Radulescu static void __exit dpaa2_eth_driver_exit(void) 4013091a19eaSIoana Radulescu { 4014091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 4015091a19eaSIoana Radulescu fsl_mc_driver_unregister(&dpaa2_eth_driver); 4016091a19eaSIoana Radulescu } 4017091a19eaSIoana Radulescu 4018091a19eaSIoana Radulescu module_init(dpaa2_eth_driver_init); 4019091a19eaSIoana Radulescu module_exit(dpaa2_eth_driver_exit); 4020