134ff6846SIoana Radulescu // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 234ff6846SIoana Radulescu /* Copyright 2014-2016 Freescale Semiconductor Inc. 334ff6846SIoana Radulescu * Copyright 2016-2017 NXP 434ff6846SIoana Radulescu */ 534ff6846SIoana Radulescu #include <linux/init.h> 634ff6846SIoana Radulescu #include <linux/module.h> 734ff6846SIoana Radulescu #include <linux/platform_device.h> 834ff6846SIoana Radulescu #include <linux/etherdevice.h> 934ff6846SIoana Radulescu #include <linux/of_net.h> 1034ff6846SIoana Radulescu #include <linux/interrupt.h> 1134ff6846SIoana Radulescu #include <linux/msi.h> 1234ff6846SIoana Radulescu #include <linux/kthread.h> 1334ff6846SIoana Radulescu #include <linux/iommu.h> 1434ff6846SIoana Radulescu #include <linux/net_tstamp.h> 1534ff6846SIoana Radulescu #include <linux/fsl/mc.h> 167e273a8eSIoana Ciocoi Radulescu #include <linux/bpf.h> 177e273a8eSIoana Ciocoi Radulescu #include <linux/bpf_trace.h> 1834ff6846SIoana Radulescu #include <net/sock.h> 1934ff6846SIoana Radulescu 2034ff6846SIoana Radulescu #include "dpaa2-eth.h" 2134ff6846SIoana Radulescu 2234ff6846SIoana Radulescu /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 2334ff6846SIoana Radulescu * using trace events only need to #include <trace/events/sched.h> 2434ff6846SIoana Radulescu */ 2534ff6846SIoana Radulescu #define CREATE_TRACE_POINTS 2634ff6846SIoana Radulescu #include "dpaa2-eth-trace.h" 2734ff6846SIoana Radulescu 2834ff6846SIoana Radulescu MODULE_LICENSE("Dual BSD/GPL"); 2934ff6846SIoana Radulescu MODULE_AUTHOR("Freescale Semiconductor, Inc"); 3034ff6846SIoana Radulescu MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 3134ff6846SIoana Radulescu 3234ff6846SIoana Radulescu static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 3334ff6846SIoana Radulescu dma_addr_t iova_addr) 3434ff6846SIoana Radulescu { 3534ff6846SIoana Radulescu phys_addr_t phys_addr; 3634ff6846SIoana Radulescu 3734ff6846SIoana Radulescu phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 3834ff6846SIoana Radulescu 3934ff6846SIoana Radulescu return phys_to_virt(phys_addr); 4034ff6846SIoana Radulescu } 4134ff6846SIoana Radulescu 4234ff6846SIoana Radulescu static void validate_rx_csum(struct dpaa2_eth_priv *priv, 4334ff6846SIoana Radulescu u32 fd_status, 4434ff6846SIoana Radulescu struct sk_buff *skb) 4534ff6846SIoana Radulescu { 4634ff6846SIoana Radulescu skb_checksum_none_assert(skb); 4734ff6846SIoana Radulescu 4834ff6846SIoana Radulescu /* HW checksum validation is disabled, nothing to do here */ 4934ff6846SIoana Radulescu if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 5034ff6846SIoana Radulescu return; 5134ff6846SIoana Radulescu 5234ff6846SIoana Radulescu /* Read checksum validation bits */ 5334ff6846SIoana Radulescu if (!((fd_status & DPAA2_FAS_L3CV) && 5434ff6846SIoana Radulescu (fd_status & DPAA2_FAS_L4CV))) 5534ff6846SIoana Radulescu return; 5634ff6846SIoana Radulescu 5734ff6846SIoana Radulescu /* Inform the stack there's no need to compute L3/L4 csum anymore */ 5834ff6846SIoana Radulescu skb->ip_summed = CHECKSUM_UNNECESSARY; 5934ff6846SIoana Radulescu } 6034ff6846SIoana Radulescu 6134ff6846SIoana Radulescu /* Free a received FD. 6234ff6846SIoana Radulescu * Not to be used for Tx conf FDs or on any other paths. 6334ff6846SIoana Radulescu */ 6434ff6846SIoana Radulescu static void free_rx_fd(struct dpaa2_eth_priv *priv, 6534ff6846SIoana Radulescu const struct dpaa2_fd *fd, 6634ff6846SIoana Radulescu void *vaddr) 6734ff6846SIoana Radulescu { 6834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 6934ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 7034ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 7134ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 7234ff6846SIoana Radulescu void *sg_vaddr; 7334ff6846SIoana Radulescu int i; 7434ff6846SIoana Radulescu 7534ff6846SIoana Radulescu /* If single buffer frame, just free the data buffer */ 7634ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) 7734ff6846SIoana Radulescu goto free_buf; 7834ff6846SIoana Radulescu else if (fd_format != dpaa2_fd_sg) 7934ff6846SIoana Radulescu /* We don't support any other format */ 8034ff6846SIoana Radulescu return; 8134ff6846SIoana Radulescu 8234ff6846SIoana Radulescu /* For S/G frames, we first need to free all SG entries 8334ff6846SIoana Radulescu * except the first one, which was taken care of already 8434ff6846SIoana Radulescu */ 8534ff6846SIoana Radulescu sgt = vaddr + dpaa2_fd_get_offset(fd); 8634ff6846SIoana Radulescu for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 8734ff6846SIoana Radulescu addr = dpaa2_sg_get_addr(&sgt[i]); 8834ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 8927c87486SIoana Ciocoi Radulescu dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 9018c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 9134ff6846SIoana Radulescu 9227c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 9334ff6846SIoana Radulescu if (dpaa2_sg_is_final(&sgt[i])) 9434ff6846SIoana Radulescu break; 9534ff6846SIoana Radulescu } 9634ff6846SIoana Radulescu 9734ff6846SIoana Radulescu free_buf: 9827c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 9934ff6846SIoana Radulescu } 10034ff6846SIoana Radulescu 10134ff6846SIoana Radulescu /* Build a linear skb based on a single-buffer frame descriptor */ 102fdb6ca9eSIoana Ciornei static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, 10334ff6846SIoana Radulescu const struct dpaa2_fd *fd, 10434ff6846SIoana Radulescu void *fd_vaddr) 10534ff6846SIoana Radulescu { 10634ff6846SIoana Radulescu struct sk_buff *skb = NULL; 10734ff6846SIoana Radulescu u16 fd_offset = dpaa2_fd_get_offset(fd); 10834ff6846SIoana Radulescu u32 fd_length = dpaa2_fd_get_len(fd); 10934ff6846SIoana Radulescu 11034ff6846SIoana Radulescu ch->buf_count--; 11134ff6846SIoana Radulescu 11227c87486SIoana Ciocoi Radulescu skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 11334ff6846SIoana Radulescu if (unlikely(!skb)) 11434ff6846SIoana Radulescu return NULL; 11534ff6846SIoana Radulescu 11634ff6846SIoana Radulescu skb_reserve(skb, fd_offset); 11734ff6846SIoana Radulescu skb_put(skb, fd_length); 11834ff6846SIoana Radulescu 11934ff6846SIoana Radulescu return skb; 12034ff6846SIoana Radulescu } 12134ff6846SIoana Radulescu 12234ff6846SIoana Radulescu /* Build a non linear (fragmented) skb based on a S/G table */ 12334ff6846SIoana Radulescu static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, 12434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 12534ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt) 12634ff6846SIoana Radulescu { 12734ff6846SIoana Radulescu struct sk_buff *skb = NULL; 12834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 12934ff6846SIoana Radulescu void *sg_vaddr; 13034ff6846SIoana Radulescu dma_addr_t sg_addr; 13134ff6846SIoana Radulescu u16 sg_offset; 13234ff6846SIoana Radulescu u32 sg_length; 13334ff6846SIoana Radulescu struct page *page, *head_page; 13434ff6846SIoana Radulescu int page_offset; 13534ff6846SIoana Radulescu int i; 13634ff6846SIoana Radulescu 13734ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 13834ff6846SIoana Radulescu struct dpaa2_sg_entry *sge = &sgt[i]; 13934ff6846SIoana Radulescu 14034ff6846SIoana Radulescu /* NOTE: We only support SG entries in dpaa2_sg_single format, 14134ff6846SIoana Radulescu * but this is the only format we may receive from HW anyway 14234ff6846SIoana Radulescu */ 14334ff6846SIoana Radulescu 14434ff6846SIoana Radulescu /* Get the address and length from the S/G entry */ 14534ff6846SIoana Radulescu sg_addr = dpaa2_sg_get_addr(sge); 14634ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 14727c87486SIoana Ciocoi Radulescu dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, 14818c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 14934ff6846SIoana Radulescu 15034ff6846SIoana Radulescu sg_length = dpaa2_sg_get_len(sge); 15134ff6846SIoana Radulescu 15234ff6846SIoana Radulescu if (i == 0) { 15334ff6846SIoana Radulescu /* We build the skb around the first data buffer */ 15427c87486SIoana Ciocoi Radulescu skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 15534ff6846SIoana Radulescu if (unlikely(!skb)) { 15634ff6846SIoana Radulescu /* Free the first SG entry now, since we already 15734ff6846SIoana Radulescu * unmapped it and obtained the virtual address 15834ff6846SIoana Radulescu */ 15927c87486SIoana Ciocoi Radulescu free_pages((unsigned long)sg_vaddr, 0); 16034ff6846SIoana Radulescu 16134ff6846SIoana Radulescu /* We still need to subtract the buffers used 16234ff6846SIoana Radulescu * by this FD from our software counter 16334ff6846SIoana Radulescu */ 16434ff6846SIoana Radulescu while (!dpaa2_sg_is_final(&sgt[i]) && 16534ff6846SIoana Radulescu i < DPAA2_ETH_MAX_SG_ENTRIES) 16634ff6846SIoana Radulescu i++; 16734ff6846SIoana Radulescu break; 16834ff6846SIoana Radulescu } 16934ff6846SIoana Radulescu 17034ff6846SIoana Radulescu sg_offset = dpaa2_sg_get_offset(sge); 17134ff6846SIoana Radulescu skb_reserve(skb, sg_offset); 17234ff6846SIoana Radulescu skb_put(skb, sg_length); 17334ff6846SIoana Radulescu } else { 17434ff6846SIoana Radulescu /* Rest of the data buffers are stored as skb frags */ 17534ff6846SIoana Radulescu page = virt_to_page(sg_vaddr); 17634ff6846SIoana Radulescu head_page = virt_to_head_page(sg_vaddr); 17734ff6846SIoana Radulescu 17834ff6846SIoana Radulescu /* Offset in page (which may be compound). 17934ff6846SIoana Radulescu * Data in subsequent SG entries is stored from the 18034ff6846SIoana Radulescu * beginning of the buffer, so we don't need to add the 18134ff6846SIoana Radulescu * sg_offset. 18234ff6846SIoana Radulescu */ 18334ff6846SIoana Radulescu page_offset = ((unsigned long)sg_vaddr & 18434ff6846SIoana Radulescu (PAGE_SIZE - 1)) + 18534ff6846SIoana Radulescu (page_address(page) - page_address(head_page)); 18634ff6846SIoana Radulescu 18734ff6846SIoana Radulescu skb_add_rx_frag(skb, i - 1, head_page, page_offset, 18834ff6846SIoana Radulescu sg_length, DPAA2_ETH_RX_BUF_SIZE); 18934ff6846SIoana Radulescu } 19034ff6846SIoana Radulescu 19134ff6846SIoana Radulescu if (dpaa2_sg_is_final(sge)) 19234ff6846SIoana Radulescu break; 19334ff6846SIoana Radulescu } 19434ff6846SIoana Radulescu 19534ff6846SIoana Radulescu WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 19634ff6846SIoana Radulescu 19734ff6846SIoana Radulescu /* Count all data buffers + SG table buffer */ 19834ff6846SIoana Radulescu ch->buf_count -= i + 2; 19934ff6846SIoana Radulescu 20034ff6846SIoana Radulescu return skb; 20134ff6846SIoana Radulescu } 20234ff6846SIoana Radulescu 203569375fbSIoana Ciocoi Radulescu /* Free buffers acquired from the buffer pool or which were meant to 204569375fbSIoana Ciocoi Radulescu * be released in the pool 205569375fbSIoana Ciocoi Radulescu */ 206569375fbSIoana Ciocoi Radulescu static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) 207569375fbSIoana Ciocoi Radulescu { 208569375fbSIoana Ciocoi Radulescu struct device *dev = priv->net_dev->dev.parent; 209569375fbSIoana Ciocoi Radulescu void *vaddr; 210569375fbSIoana Ciocoi Radulescu int i; 211569375fbSIoana Ciocoi Radulescu 212569375fbSIoana Ciocoi Radulescu for (i = 0; i < count; i++) { 213569375fbSIoana Ciocoi Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 21427c87486SIoana Ciocoi Radulescu dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, 21518c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 21627c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 217569375fbSIoana Ciocoi Radulescu } 218569375fbSIoana Ciocoi Radulescu } 219569375fbSIoana Ciocoi Radulescu 2205d39dc21SIoana Ciocoi Radulescu static void xdp_release_buf(struct dpaa2_eth_priv *priv, 2215d39dc21SIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 2225d39dc21SIoana Ciocoi Radulescu dma_addr_t addr) 2235d39dc21SIoana Ciocoi Radulescu { 2245d39dc21SIoana Ciocoi Radulescu int err; 2255d39dc21SIoana Ciocoi Radulescu 2265d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; 2275d39dc21SIoana Ciocoi Radulescu if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) 2285d39dc21SIoana Ciocoi Radulescu return; 2295d39dc21SIoana Ciocoi Radulescu 2305d39dc21SIoana Ciocoi Radulescu while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, 2315d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_bufs, 2325d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_cnt)) == -EBUSY) 2335d39dc21SIoana Ciocoi Radulescu cpu_relax(); 2345d39dc21SIoana Ciocoi Radulescu 2355d39dc21SIoana Ciocoi Radulescu if (err) { 2365d39dc21SIoana Ciocoi Radulescu free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); 2375d39dc21SIoana Ciocoi Radulescu ch->buf_count -= ch->xdp.drop_cnt; 2385d39dc21SIoana Ciocoi Radulescu } 2395d39dc21SIoana Ciocoi Radulescu 2405d39dc21SIoana Ciocoi Radulescu ch->xdp.drop_cnt = 0; 2415d39dc21SIoana Ciocoi Radulescu } 2425d39dc21SIoana Ciocoi Radulescu 24399e43521SIoana Ciocoi Radulescu static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, 24499e43521SIoana Ciocoi Radulescu void *buf_start, u16 queue_id) 24599e43521SIoana Ciocoi Radulescu { 24699e43521SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq; 24799e43521SIoana Ciocoi Radulescu struct dpaa2_faead *faead; 24899e43521SIoana Ciocoi Radulescu u32 ctrl, frc; 24999e43521SIoana Ciocoi Radulescu int i, err; 25099e43521SIoana Ciocoi Radulescu 25199e43521SIoana Ciocoi Radulescu /* Mark the egress frame hardware annotation area as valid */ 25299e43521SIoana Ciocoi Radulescu frc = dpaa2_fd_get_frc(fd); 25399e43521SIoana Ciocoi Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 25499e43521SIoana Ciocoi Radulescu dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); 25599e43521SIoana Ciocoi Radulescu 25699e43521SIoana Ciocoi Radulescu /* Instruct hardware to release the FD buffer directly into 25799e43521SIoana Ciocoi Radulescu * the buffer pool once transmission is completed, instead of 25899e43521SIoana Ciocoi Radulescu * sending a Tx confirmation frame to us 25999e43521SIoana Ciocoi Radulescu */ 26099e43521SIoana Ciocoi Radulescu ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; 26199e43521SIoana Ciocoi Radulescu faead = dpaa2_get_faead(buf_start, false); 26299e43521SIoana Ciocoi Radulescu faead->ctrl = cpu_to_le32(ctrl); 26399e43521SIoana Ciocoi Radulescu faead->conf_fqid = 0; 26499e43521SIoana Ciocoi Radulescu 26599e43521SIoana Ciocoi Radulescu fq = &priv->fq[queue_id]; 26699e43521SIoana Ciocoi Radulescu for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 2671fa0f68cSIoana Ciocoi Radulescu err = priv->enqueue(priv, fq, fd, 0); 26899e43521SIoana Ciocoi Radulescu if (err != -EBUSY) 26999e43521SIoana Ciocoi Radulescu break; 27099e43521SIoana Ciocoi Radulescu } 27199e43521SIoana Ciocoi Radulescu 27299e43521SIoana Ciocoi Radulescu return err; 27399e43521SIoana Ciocoi Radulescu } 27499e43521SIoana Ciocoi Radulescu 2757e273a8eSIoana Ciocoi Radulescu static u32 run_xdp(struct dpaa2_eth_priv *priv, 2767e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch, 27799e43521SIoana Ciocoi Radulescu struct dpaa2_eth_fq *rx_fq, 2787e273a8eSIoana Ciocoi Radulescu struct dpaa2_fd *fd, void *vaddr) 2797e273a8eSIoana Ciocoi Radulescu { 2805d39dc21SIoana Ciocoi Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 28199e43521SIoana Ciocoi Radulescu struct rtnl_link_stats64 *percpu_stats; 2827e273a8eSIoana Ciocoi Radulescu struct bpf_prog *xdp_prog; 2837e273a8eSIoana Ciocoi Radulescu struct xdp_buff xdp; 2847e273a8eSIoana Ciocoi Radulescu u32 xdp_act = XDP_PASS; 28599e43521SIoana Ciocoi Radulescu int err; 28699e43521SIoana Ciocoi Radulescu 28799e43521SIoana Ciocoi Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 2887e273a8eSIoana Ciocoi Radulescu 2897e273a8eSIoana Ciocoi Radulescu rcu_read_lock(); 2907e273a8eSIoana Ciocoi Radulescu 2917e273a8eSIoana Ciocoi Radulescu xdp_prog = READ_ONCE(ch->xdp.prog); 2927e273a8eSIoana Ciocoi Radulescu if (!xdp_prog) 2937e273a8eSIoana Ciocoi Radulescu goto out; 2947e273a8eSIoana Ciocoi Radulescu 2957e273a8eSIoana Ciocoi Radulescu xdp.data = vaddr + dpaa2_fd_get_offset(fd); 2967e273a8eSIoana Ciocoi Radulescu xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); 2977b1eea1aSIoana Ciocoi Radulescu xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; 2987e273a8eSIoana Ciocoi Radulescu xdp_set_data_meta_invalid(&xdp); 299d678be1dSIoana Radulescu xdp.rxq = &ch->xdp_rxq; 3007e273a8eSIoana Ciocoi Radulescu 3017e273a8eSIoana Ciocoi Radulescu xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); 3027e273a8eSIoana Ciocoi Radulescu 3037b1eea1aSIoana Ciocoi Radulescu /* xdp.data pointer may have changed */ 3047b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_offset(fd, xdp.data - vaddr); 3057b1eea1aSIoana Ciocoi Radulescu dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); 3067b1eea1aSIoana Ciocoi Radulescu 3077e273a8eSIoana Ciocoi Radulescu switch (xdp_act) { 3087e273a8eSIoana Ciocoi Radulescu case XDP_PASS: 3097e273a8eSIoana Ciocoi Radulescu break; 31099e43521SIoana Ciocoi Radulescu case XDP_TX: 31199e43521SIoana Ciocoi Radulescu err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid); 31299e43521SIoana Ciocoi Radulescu if (err) { 31399e43521SIoana Ciocoi Radulescu xdp_release_buf(priv, ch, addr); 31499e43521SIoana Ciocoi Radulescu percpu_stats->tx_errors++; 315a4a7b762SIoana Ciocoi Radulescu ch->stats.xdp_tx_err++; 31699e43521SIoana Ciocoi Radulescu } else { 31799e43521SIoana Ciocoi Radulescu percpu_stats->tx_packets++; 31899e43521SIoana Ciocoi Radulescu percpu_stats->tx_bytes += dpaa2_fd_get_len(fd); 319a4a7b762SIoana Ciocoi Radulescu ch->stats.xdp_tx++; 32099e43521SIoana Ciocoi Radulescu } 32199e43521SIoana Ciocoi Radulescu break; 3227e273a8eSIoana Ciocoi Radulescu default: 3237e273a8eSIoana Ciocoi Radulescu bpf_warn_invalid_xdp_action(xdp_act); 324c1cb11bcSIoana Ciocoi Radulescu /* fall through */ 3257e273a8eSIoana Ciocoi Radulescu case XDP_ABORTED: 3267e273a8eSIoana Ciocoi Radulescu trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 327c1cb11bcSIoana Ciocoi Radulescu /* fall through */ 3287e273a8eSIoana Ciocoi Radulescu case XDP_DROP: 3295d39dc21SIoana Ciocoi Radulescu xdp_release_buf(priv, ch, addr); 330a4a7b762SIoana Ciocoi Radulescu ch->stats.xdp_drop++; 3317e273a8eSIoana Ciocoi Radulescu break; 332d678be1dSIoana Radulescu case XDP_REDIRECT: 333d678be1dSIoana Radulescu dma_unmap_page(priv->net_dev->dev.parent, addr, 334d678be1dSIoana Radulescu DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); 335d678be1dSIoana Radulescu ch->buf_count--; 336d678be1dSIoana Radulescu xdp.data_hard_start = vaddr; 337d678be1dSIoana Radulescu err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); 338d678be1dSIoana Radulescu if (unlikely(err)) 339d678be1dSIoana Radulescu ch->stats.xdp_drop++; 340d678be1dSIoana Radulescu else 341d678be1dSIoana Radulescu ch->stats.xdp_redirect++; 342d678be1dSIoana Radulescu break; 3437e273a8eSIoana Ciocoi Radulescu } 3447e273a8eSIoana Ciocoi Radulescu 345d678be1dSIoana Radulescu ch->xdp.res |= xdp_act; 3467e273a8eSIoana Ciocoi Radulescu out: 3477e273a8eSIoana Ciocoi Radulescu rcu_read_unlock(); 3487e273a8eSIoana Ciocoi Radulescu return xdp_act; 3497e273a8eSIoana Ciocoi Radulescu } 3507e273a8eSIoana Ciocoi Radulescu 35134ff6846SIoana Radulescu /* Main Rx frame processing routine */ 35234ff6846SIoana Radulescu static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 35334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 35434ff6846SIoana Radulescu const struct dpaa2_fd *fd, 355dbcdf728SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 35634ff6846SIoana Radulescu { 35734ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 35834ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 35934ff6846SIoana Radulescu void *vaddr; 36034ff6846SIoana Radulescu struct sk_buff *skb; 36134ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 36234ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 36334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 36434ff6846SIoana Radulescu struct dpaa2_fas *fas; 36534ff6846SIoana Radulescu void *buf_data; 36634ff6846SIoana Radulescu u32 status = 0; 3677e273a8eSIoana Ciocoi Radulescu u32 xdp_act; 36834ff6846SIoana Radulescu 36934ff6846SIoana Radulescu /* Tracing point */ 37034ff6846SIoana Radulescu trace_dpaa2_rx_fd(priv->net_dev, fd); 37134ff6846SIoana Radulescu 37234ff6846SIoana Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 3735d39dc21SIoana Ciocoi Radulescu dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 37418c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 37534ff6846SIoana Radulescu 37634ff6846SIoana Radulescu fas = dpaa2_get_fas(vaddr, false); 37734ff6846SIoana Radulescu prefetch(fas); 37834ff6846SIoana Radulescu buf_data = vaddr + dpaa2_fd_get_offset(fd); 37934ff6846SIoana Radulescu prefetch(buf_data); 38034ff6846SIoana Radulescu 38134ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 38234ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 38334ff6846SIoana Radulescu 38434ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 38599e43521SIoana Ciocoi Radulescu xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); 3867e273a8eSIoana Ciocoi Radulescu if (xdp_act != XDP_PASS) { 3877e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_packets++; 3887e273a8eSIoana Ciocoi Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 3897e273a8eSIoana Ciocoi Radulescu return; 3907e273a8eSIoana Ciocoi Radulescu } 3917e273a8eSIoana Ciocoi Radulescu 39227c87486SIoana Ciocoi Radulescu dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 39318c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 394fdb6ca9eSIoana Ciornei skb = build_linear_skb(ch, fd, vaddr); 39534ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 3967e273a8eSIoana Ciocoi Radulescu WARN_ON(priv->xdp_prog); 3977e273a8eSIoana Ciocoi Radulescu 39827c87486SIoana Ciocoi Radulescu dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 39918c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 40034ff6846SIoana Radulescu skb = build_frag_skb(priv, ch, buf_data); 40127c87486SIoana Ciocoi Radulescu free_pages((unsigned long)vaddr, 0); 40234ff6846SIoana Radulescu percpu_extras->rx_sg_frames++; 40334ff6846SIoana Radulescu percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 40434ff6846SIoana Radulescu } else { 40534ff6846SIoana Radulescu /* We don't support any other format */ 40634ff6846SIoana Radulescu goto err_frame_format; 40734ff6846SIoana Radulescu } 40834ff6846SIoana Radulescu 40934ff6846SIoana Radulescu if (unlikely(!skb)) 41034ff6846SIoana Radulescu goto err_build_skb; 41134ff6846SIoana Radulescu 41234ff6846SIoana Radulescu prefetch(skb->data); 41334ff6846SIoana Radulescu 41434ff6846SIoana Radulescu /* Get the timestamp value */ 41534ff6846SIoana Radulescu if (priv->rx_tstamp) { 41634ff6846SIoana Radulescu struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 41734ff6846SIoana Radulescu __le64 *ts = dpaa2_get_ts(vaddr, false); 41834ff6846SIoana Radulescu u64 ns; 41934ff6846SIoana Radulescu 42034ff6846SIoana Radulescu memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 42134ff6846SIoana Radulescu 42234ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 42334ff6846SIoana Radulescu shhwtstamps->hwtstamp = ns_to_ktime(ns); 42434ff6846SIoana Radulescu } 42534ff6846SIoana Radulescu 42634ff6846SIoana Radulescu /* Check if we need to validate the L4 csum */ 42734ff6846SIoana Radulescu if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 42834ff6846SIoana Radulescu status = le32_to_cpu(fas->status); 42934ff6846SIoana Radulescu validate_rx_csum(priv, status, skb); 43034ff6846SIoana Radulescu } 43134ff6846SIoana Radulescu 43234ff6846SIoana Radulescu skb->protocol = eth_type_trans(skb, priv->net_dev); 433dbcdf728SIoana Ciocoi Radulescu skb_record_rx_queue(skb, fq->flowid); 43434ff6846SIoana Radulescu 43534ff6846SIoana Radulescu percpu_stats->rx_packets++; 43634ff6846SIoana Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 43734ff6846SIoana Radulescu 4380a25d92cSIoana Ciornei list_add_tail(&skb->list, ch->rx_list); 43934ff6846SIoana Radulescu 44034ff6846SIoana Radulescu return; 44134ff6846SIoana Radulescu 44234ff6846SIoana Radulescu err_build_skb: 44334ff6846SIoana Radulescu free_rx_fd(priv, fd, vaddr); 44434ff6846SIoana Radulescu err_frame_format: 44534ff6846SIoana Radulescu percpu_stats->rx_dropped++; 44634ff6846SIoana Radulescu } 44734ff6846SIoana Radulescu 44834ff6846SIoana Radulescu /* Consume all frames pull-dequeued into the store. This is the simplest way to 44934ff6846SIoana Radulescu * make sure we don't accidentally issue another volatile dequeue which would 45034ff6846SIoana Radulescu * overwrite (leak) frames already in the store. 45134ff6846SIoana Radulescu * 45234ff6846SIoana Radulescu * Observance of NAPI budget is not our concern, leaving that to the caller. 45334ff6846SIoana Radulescu */ 45468049a5fSIoana Ciocoi Radulescu static int consume_frames(struct dpaa2_eth_channel *ch, 455569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq **src) 45634ff6846SIoana Radulescu { 45734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = ch->priv; 45868049a5fSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq = NULL; 45934ff6846SIoana Radulescu struct dpaa2_dq *dq; 46034ff6846SIoana Radulescu const struct dpaa2_fd *fd; 46134ff6846SIoana Radulescu int cleaned = 0; 46234ff6846SIoana Radulescu int is_last; 46334ff6846SIoana Radulescu 46434ff6846SIoana Radulescu do { 46534ff6846SIoana Radulescu dq = dpaa2_io_store_next(ch->store, &is_last); 46634ff6846SIoana Radulescu if (unlikely(!dq)) { 46734ff6846SIoana Radulescu /* If we're here, we *must* have placed a 46834ff6846SIoana Radulescu * volatile dequeue comnmand, so keep reading through 46934ff6846SIoana Radulescu * the store until we get some sort of valid response 47034ff6846SIoana Radulescu * token (either a valid frame or an "empty dequeue") 47134ff6846SIoana Radulescu */ 47234ff6846SIoana Radulescu continue; 47334ff6846SIoana Radulescu } 47434ff6846SIoana Radulescu 47534ff6846SIoana Radulescu fd = dpaa2_dq_fd(dq); 47634ff6846SIoana Radulescu fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 47734ff6846SIoana Radulescu 478dbcdf728SIoana Ciocoi Radulescu fq->consume(priv, ch, fd, fq); 47934ff6846SIoana Radulescu cleaned++; 48034ff6846SIoana Radulescu } while (!is_last); 48134ff6846SIoana Radulescu 48268049a5fSIoana Ciocoi Radulescu if (!cleaned) 48368049a5fSIoana Ciocoi Radulescu return 0; 48468049a5fSIoana Ciocoi Radulescu 48568049a5fSIoana Ciocoi Radulescu fq->stats.frames += cleaned; 48668049a5fSIoana Ciocoi Radulescu 48768049a5fSIoana Ciocoi Radulescu /* A dequeue operation only pulls frames from a single queue 488569dac6aSIoana Ciocoi Radulescu * into the store. Return the frame queue as an out param. 48968049a5fSIoana Ciocoi Radulescu */ 490569dac6aSIoana Ciocoi Radulescu if (src) 491569dac6aSIoana Ciocoi Radulescu *src = fq; 49268049a5fSIoana Ciocoi Radulescu 49334ff6846SIoana Radulescu return cleaned; 49434ff6846SIoana Radulescu } 49534ff6846SIoana Radulescu 49634ff6846SIoana Radulescu /* Configure the egress frame annotation for timestamp update */ 49734ff6846SIoana Radulescu static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) 49834ff6846SIoana Radulescu { 49934ff6846SIoana Radulescu struct dpaa2_faead *faead; 50034ff6846SIoana Radulescu u32 ctrl, frc; 50134ff6846SIoana Radulescu 50234ff6846SIoana Radulescu /* Mark the egress frame annotation area as valid */ 50334ff6846SIoana Radulescu frc = dpaa2_fd_get_frc(fd); 50434ff6846SIoana Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 50534ff6846SIoana Radulescu 50634ff6846SIoana Radulescu /* Set hardware annotation size */ 50734ff6846SIoana Radulescu ctrl = dpaa2_fd_get_ctrl(fd); 50834ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 50934ff6846SIoana Radulescu 51034ff6846SIoana Radulescu /* enable UPD (update prepanded data) bit in FAEAD field of 51134ff6846SIoana Radulescu * hardware frame annotation area 51234ff6846SIoana Radulescu */ 51334ff6846SIoana Radulescu ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 51434ff6846SIoana Radulescu faead = dpaa2_get_faead(buf_start, true); 51534ff6846SIoana Radulescu faead->ctrl = cpu_to_le32(ctrl); 51634ff6846SIoana Radulescu } 51734ff6846SIoana Radulescu 51834ff6846SIoana Radulescu /* Create a frame descriptor based on a fragmented skb */ 51934ff6846SIoana Radulescu static int build_sg_fd(struct dpaa2_eth_priv *priv, 52034ff6846SIoana Radulescu struct sk_buff *skb, 52134ff6846SIoana Radulescu struct dpaa2_fd *fd) 52234ff6846SIoana Radulescu { 52334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 52434ff6846SIoana Radulescu void *sgt_buf = NULL; 52534ff6846SIoana Radulescu dma_addr_t addr; 52634ff6846SIoana Radulescu int nr_frags = skb_shinfo(skb)->nr_frags; 52734ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 52834ff6846SIoana Radulescu int i, err; 52934ff6846SIoana Radulescu int sgt_buf_size; 53034ff6846SIoana Radulescu struct scatterlist *scl, *crt_scl; 53134ff6846SIoana Radulescu int num_sg; 53234ff6846SIoana Radulescu int num_dma_bufs; 53334ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 53434ff6846SIoana Radulescu 53534ff6846SIoana Radulescu /* Create and map scatterlist. 53634ff6846SIoana Radulescu * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 53734ff6846SIoana Radulescu * to go beyond nr_frags+1. 53834ff6846SIoana Radulescu * Note: We don't support chained scatterlists 53934ff6846SIoana Radulescu */ 54034ff6846SIoana Radulescu if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 54134ff6846SIoana Radulescu return -EINVAL; 54234ff6846SIoana Radulescu 54334ff6846SIoana Radulescu scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 54434ff6846SIoana Radulescu if (unlikely(!scl)) 54534ff6846SIoana Radulescu return -ENOMEM; 54634ff6846SIoana Radulescu 54734ff6846SIoana Radulescu sg_init_table(scl, nr_frags + 1); 54834ff6846SIoana Radulescu num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 54934ff6846SIoana Radulescu num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 55034ff6846SIoana Radulescu if (unlikely(!num_dma_bufs)) { 55134ff6846SIoana Radulescu err = -ENOMEM; 55234ff6846SIoana Radulescu goto dma_map_sg_failed; 55334ff6846SIoana Radulescu } 55434ff6846SIoana Radulescu 55534ff6846SIoana Radulescu /* Prepare the HW SGT structure */ 55634ff6846SIoana Radulescu sgt_buf_size = priv->tx_data_offset + 55734ff6846SIoana Radulescu sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 55890bc6d4bSSebastian Andrzej Siewior sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); 55934ff6846SIoana Radulescu if (unlikely(!sgt_buf)) { 56034ff6846SIoana Radulescu err = -ENOMEM; 56134ff6846SIoana Radulescu goto sgt_buf_alloc_failed; 56234ff6846SIoana Radulescu } 56334ff6846SIoana Radulescu sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 56434ff6846SIoana Radulescu memset(sgt_buf, 0, sgt_buf_size); 56534ff6846SIoana Radulescu 56634ff6846SIoana Radulescu sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 56734ff6846SIoana Radulescu 56834ff6846SIoana Radulescu /* Fill in the HW SGT structure. 56934ff6846SIoana Radulescu * 57034ff6846SIoana Radulescu * sgt_buf is zeroed out, so the following fields are implicit 57134ff6846SIoana Radulescu * in all sgt entries: 57234ff6846SIoana Radulescu * - offset is 0 57334ff6846SIoana Radulescu * - format is 'dpaa2_sg_single' 57434ff6846SIoana Radulescu */ 57534ff6846SIoana Radulescu for_each_sg(scl, crt_scl, num_dma_bufs, i) { 57634ff6846SIoana Radulescu dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 57734ff6846SIoana Radulescu dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 57834ff6846SIoana Radulescu } 57934ff6846SIoana Radulescu dpaa2_sg_set_final(&sgt[i - 1], true); 58034ff6846SIoana Radulescu 58134ff6846SIoana Radulescu /* Store the skb backpointer in the SGT buffer. 58234ff6846SIoana Radulescu * Fit the scatterlist and the number of buffers alongside the 58334ff6846SIoana Radulescu * skb backpointer in the software annotation area. We'll need 58434ff6846SIoana Radulescu * all of them on Tx Conf. 58534ff6846SIoana Radulescu */ 58634ff6846SIoana Radulescu swa = (struct dpaa2_eth_swa *)sgt_buf; 587e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SG; 588e3fdf6baSIoana Radulescu swa->sg.skb = skb; 589e3fdf6baSIoana Radulescu swa->sg.scl = scl; 590e3fdf6baSIoana Radulescu swa->sg.num_sg = num_sg; 591e3fdf6baSIoana Radulescu swa->sg.sgt_size = sgt_buf_size; 59234ff6846SIoana Radulescu 59334ff6846SIoana Radulescu /* Separately map the SGT buffer */ 59434ff6846SIoana Radulescu addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 59534ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) { 59634ff6846SIoana Radulescu err = -ENOMEM; 59734ff6846SIoana Radulescu goto dma_map_single_failed; 59834ff6846SIoana Radulescu } 59934ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, priv->tx_data_offset); 60034ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_sg); 60134ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 60234ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 603b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 60434ff6846SIoana Radulescu 60534ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 60634ff6846SIoana Radulescu enable_tx_tstamp(fd, sgt_buf); 60734ff6846SIoana Radulescu 60834ff6846SIoana Radulescu return 0; 60934ff6846SIoana Radulescu 61034ff6846SIoana Radulescu dma_map_single_failed: 61134ff6846SIoana Radulescu skb_free_frag(sgt_buf); 61234ff6846SIoana Radulescu sgt_buf_alloc_failed: 61334ff6846SIoana Radulescu dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 61434ff6846SIoana Radulescu dma_map_sg_failed: 61534ff6846SIoana Radulescu kfree(scl); 61634ff6846SIoana Radulescu return err; 61734ff6846SIoana Radulescu } 61834ff6846SIoana Radulescu 61934ff6846SIoana Radulescu /* Create a frame descriptor based on a linear skb */ 62034ff6846SIoana Radulescu static int build_single_fd(struct dpaa2_eth_priv *priv, 62134ff6846SIoana Radulescu struct sk_buff *skb, 62234ff6846SIoana Radulescu struct dpaa2_fd *fd) 62334ff6846SIoana Radulescu { 62434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 62534ff6846SIoana Radulescu u8 *buffer_start, *aligned_start; 626e3fdf6baSIoana Radulescu struct dpaa2_eth_swa *swa; 62734ff6846SIoana Radulescu dma_addr_t addr; 62834ff6846SIoana Radulescu 62934ff6846SIoana Radulescu buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); 63034ff6846SIoana Radulescu 63134ff6846SIoana Radulescu /* If there's enough room to align the FD address, do it. 63234ff6846SIoana Radulescu * It will help hardware optimize accesses. 63334ff6846SIoana Radulescu */ 63434ff6846SIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 63534ff6846SIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 63634ff6846SIoana Radulescu if (aligned_start >= skb->head) 63734ff6846SIoana Radulescu buffer_start = aligned_start; 63834ff6846SIoana Radulescu 63934ff6846SIoana Radulescu /* Store a backpointer to the skb at the beginning of the buffer 64034ff6846SIoana Radulescu * (in the private data area) such that we can release it 64134ff6846SIoana Radulescu * on Tx confirm 64234ff6846SIoana Radulescu */ 643e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 644e3fdf6baSIoana Radulescu swa->type = DPAA2_ETH_SWA_SINGLE; 645e3fdf6baSIoana Radulescu swa->single.skb = skb; 64634ff6846SIoana Radulescu 64734ff6846SIoana Radulescu addr = dma_map_single(dev, buffer_start, 64834ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 64934ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 65034ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 65134ff6846SIoana Radulescu return -ENOMEM; 65234ff6846SIoana Radulescu 65334ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 65434ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 65534ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 65634ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_single); 657b948c8c6SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 65834ff6846SIoana Radulescu 65934ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 66034ff6846SIoana Radulescu enable_tx_tstamp(fd, buffer_start); 66134ff6846SIoana Radulescu 66234ff6846SIoana Radulescu return 0; 66334ff6846SIoana Radulescu } 66434ff6846SIoana Radulescu 66534ff6846SIoana Radulescu /* FD freeing routine on the Tx path 66634ff6846SIoana Radulescu * 66734ff6846SIoana Radulescu * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 66834ff6846SIoana Radulescu * back-pointed to is also freed. 66934ff6846SIoana Radulescu * This can be called either from dpaa2_eth_tx_conf() or on the error path of 67034ff6846SIoana Radulescu * dpaa2_eth_tx(). 67134ff6846SIoana Radulescu */ 67234ff6846SIoana Radulescu static void free_tx_fd(const struct dpaa2_eth_priv *priv, 673d678be1dSIoana Radulescu struct dpaa2_eth_fq *fq, 6740723a3aeSIoana Ciocoi Radulescu const struct dpaa2_fd *fd, bool in_napi) 67534ff6846SIoana Radulescu { 67634ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 67734ff6846SIoana Radulescu dma_addr_t fd_addr; 678d678be1dSIoana Radulescu struct sk_buff *skb = NULL; 67934ff6846SIoana Radulescu unsigned char *buffer_start; 68034ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 68134ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 682d678be1dSIoana Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 68334ff6846SIoana Radulescu 68434ff6846SIoana Radulescu fd_addr = dpaa2_fd_get_addr(fd); 685e3fdf6baSIoana Radulescu buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 686e3fdf6baSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 68734ff6846SIoana Radulescu 68834ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 689d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_SINGLE) { 690e3fdf6baSIoana Radulescu skb = swa->single.skb; 691d678be1dSIoana Radulescu /* Accessing the skb buffer is safe before dma unmap, 692d678be1dSIoana Radulescu * because we didn't map the actual skb shell. 69334ff6846SIoana Radulescu */ 69434ff6846SIoana Radulescu dma_unmap_single(dev, fd_addr, 69534ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 69634ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 697d678be1dSIoana Radulescu } else { 698d678be1dSIoana Radulescu WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); 699d678be1dSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, 700d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 701d678be1dSIoana Radulescu } 70234ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 703e3fdf6baSIoana Radulescu skb = swa->sg.skb; 70434ff6846SIoana Radulescu 70534ff6846SIoana Radulescu /* Unmap the scatterlist */ 706e3fdf6baSIoana Radulescu dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, 707e3fdf6baSIoana Radulescu DMA_BIDIRECTIONAL); 708e3fdf6baSIoana Radulescu kfree(swa->sg.scl); 70934ff6846SIoana Radulescu 71034ff6846SIoana Radulescu /* Unmap the SGT buffer */ 711e3fdf6baSIoana Radulescu dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, 71234ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 71334ff6846SIoana Radulescu } else { 71434ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "Invalid FD format\n"); 71534ff6846SIoana Radulescu return; 71634ff6846SIoana Radulescu } 71734ff6846SIoana Radulescu 718d678be1dSIoana Radulescu if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { 719d678be1dSIoana Radulescu fq->dq_frames++; 720d678be1dSIoana Radulescu fq->dq_bytes += fd_len; 721d678be1dSIoana Radulescu } 722d678be1dSIoana Radulescu 723d678be1dSIoana Radulescu if (swa->type == DPAA2_ETH_SWA_XDP) { 724d678be1dSIoana Radulescu xdp_return_frame(swa->xdp.xdpf); 725d678be1dSIoana Radulescu return; 726d678be1dSIoana Radulescu } 727d678be1dSIoana Radulescu 72834ff6846SIoana Radulescu /* Get the timestamp value */ 72934ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 73034ff6846SIoana Radulescu struct skb_shared_hwtstamps shhwtstamps; 731e3fdf6baSIoana Radulescu __le64 *ts = dpaa2_get_ts(buffer_start, true); 73234ff6846SIoana Radulescu u64 ns; 73334ff6846SIoana Radulescu 73434ff6846SIoana Radulescu memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 73534ff6846SIoana Radulescu 73634ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 73734ff6846SIoana Radulescu shhwtstamps.hwtstamp = ns_to_ktime(ns); 73834ff6846SIoana Radulescu skb_tstamp_tx(skb, &shhwtstamps); 73934ff6846SIoana Radulescu } 74034ff6846SIoana Radulescu 74134ff6846SIoana Radulescu /* Free SGT buffer allocated on tx */ 74234ff6846SIoana Radulescu if (fd_format != dpaa2_fd_single) 743e3fdf6baSIoana Radulescu skb_free_frag(buffer_start); 74434ff6846SIoana Radulescu 74534ff6846SIoana Radulescu /* Move on with skb release */ 7460723a3aeSIoana Ciocoi Radulescu napi_consume_skb(skb, in_napi); 74734ff6846SIoana Radulescu } 74834ff6846SIoana Radulescu 74934ff6846SIoana Radulescu static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 75034ff6846SIoana Radulescu { 75134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 75234ff6846SIoana Radulescu struct dpaa2_fd fd; 75334ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 75434ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 75534ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 756569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 75734ff6846SIoana Radulescu u16 queue_mapping; 75834ff6846SIoana Radulescu unsigned int needed_headroom; 759569dac6aSIoana Ciocoi Radulescu u32 fd_len; 76034ff6846SIoana Radulescu int err, i; 76134ff6846SIoana Radulescu 76234ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 76334ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 76434ff6846SIoana Radulescu 76534ff6846SIoana Radulescu needed_headroom = dpaa2_eth_needed_headroom(priv, skb); 76634ff6846SIoana Radulescu if (skb_headroom(skb) < needed_headroom) { 76734ff6846SIoana Radulescu struct sk_buff *ns; 76834ff6846SIoana Radulescu 76934ff6846SIoana Radulescu ns = skb_realloc_headroom(skb, needed_headroom); 77034ff6846SIoana Radulescu if (unlikely(!ns)) { 77134ff6846SIoana Radulescu percpu_stats->tx_dropped++; 77234ff6846SIoana Radulescu goto err_alloc_headroom; 77334ff6846SIoana Radulescu } 77434ff6846SIoana Radulescu percpu_extras->tx_reallocs++; 77534ff6846SIoana Radulescu 77634ff6846SIoana Radulescu if (skb->sk) 77734ff6846SIoana Radulescu skb_set_owner_w(ns, skb->sk); 77834ff6846SIoana Radulescu 77934ff6846SIoana Radulescu dev_kfree_skb(skb); 78034ff6846SIoana Radulescu skb = ns; 78134ff6846SIoana Radulescu } 78234ff6846SIoana Radulescu 78334ff6846SIoana Radulescu /* We'll be holding a back-reference to the skb until Tx Confirmation; 78434ff6846SIoana Radulescu * we don't want that overwritten by a concurrent Tx with a cloned skb. 78534ff6846SIoana Radulescu */ 78634ff6846SIoana Radulescu skb = skb_unshare(skb, GFP_ATOMIC); 78734ff6846SIoana Radulescu if (unlikely(!skb)) { 78834ff6846SIoana Radulescu /* skb_unshare() has already freed the skb */ 78934ff6846SIoana Radulescu percpu_stats->tx_dropped++; 79034ff6846SIoana Radulescu return NETDEV_TX_OK; 79134ff6846SIoana Radulescu } 79234ff6846SIoana Radulescu 79334ff6846SIoana Radulescu /* Setup the FD fields */ 79434ff6846SIoana Radulescu memset(&fd, 0, sizeof(fd)); 79534ff6846SIoana Radulescu 79634ff6846SIoana Radulescu if (skb_is_nonlinear(skb)) { 79734ff6846SIoana Radulescu err = build_sg_fd(priv, skb, &fd); 79834ff6846SIoana Radulescu percpu_extras->tx_sg_frames++; 79934ff6846SIoana Radulescu percpu_extras->tx_sg_bytes += skb->len; 80034ff6846SIoana Radulescu } else { 80134ff6846SIoana Radulescu err = build_single_fd(priv, skb, &fd); 80234ff6846SIoana Radulescu } 80334ff6846SIoana Radulescu 80434ff6846SIoana Radulescu if (unlikely(err)) { 80534ff6846SIoana Radulescu percpu_stats->tx_dropped++; 80634ff6846SIoana Radulescu goto err_build_fd; 80734ff6846SIoana Radulescu } 80834ff6846SIoana Radulescu 80934ff6846SIoana Radulescu /* Tracing point */ 81034ff6846SIoana Radulescu trace_dpaa2_tx_fd(net_dev, &fd); 81134ff6846SIoana Radulescu 81234ff6846SIoana Radulescu /* TxConf FQ selection relies on queue id from the stack. 81334ff6846SIoana Radulescu * In case of a forwarded frame from another DPNI interface, we choose 81434ff6846SIoana Radulescu * a queue affined to the same core that processed the Rx frame 81534ff6846SIoana Radulescu */ 81634ff6846SIoana Radulescu queue_mapping = skb_get_queue_mapping(skb); 81734ff6846SIoana Radulescu fq = &priv->fq[queue_mapping]; 8188c838f53SIoana Ciornei 8198c838f53SIoana Ciornei fd_len = dpaa2_fd_get_len(&fd); 8208c838f53SIoana Ciornei nq = netdev_get_tx_queue(net_dev, queue_mapping); 8218c838f53SIoana Ciornei netdev_tx_sent_queue(nq, fd_len); 8228c838f53SIoana Ciornei 8238c838f53SIoana Ciornei /* Everything that happens after this enqueues might race with 8248c838f53SIoana Ciornei * the Tx confirmation callback for this frame 8258c838f53SIoana Ciornei */ 82634ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 8271fa0f68cSIoana Ciocoi Radulescu err = priv->enqueue(priv, fq, &fd, 0); 82834ff6846SIoana Radulescu if (err != -EBUSY) 82934ff6846SIoana Radulescu break; 83034ff6846SIoana Radulescu } 83134ff6846SIoana Radulescu percpu_extras->tx_portal_busy += i; 83234ff6846SIoana Radulescu if (unlikely(err < 0)) { 83334ff6846SIoana Radulescu percpu_stats->tx_errors++; 83434ff6846SIoana Radulescu /* Clean up everything, including freeing the skb */ 835d678be1dSIoana Radulescu free_tx_fd(priv, fq, &fd, false); 8368c838f53SIoana Ciornei netdev_tx_completed_queue(nq, 1, fd_len); 83734ff6846SIoana Radulescu } else { 83834ff6846SIoana Radulescu percpu_stats->tx_packets++; 839569dac6aSIoana Ciocoi Radulescu percpu_stats->tx_bytes += fd_len; 84034ff6846SIoana Radulescu } 84134ff6846SIoana Radulescu 84234ff6846SIoana Radulescu return NETDEV_TX_OK; 84334ff6846SIoana Radulescu 84434ff6846SIoana Radulescu err_build_fd: 84534ff6846SIoana Radulescu err_alloc_headroom: 84634ff6846SIoana Radulescu dev_kfree_skb(skb); 84734ff6846SIoana Radulescu 84834ff6846SIoana Radulescu return NETDEV_TX_OK; 84934ff6846SIoana Radulescu } 85034ff6846SIoana Radulescu 85134ff6846SIoana Radulescu /* Tx confirmation frame processing routine */ 85234ff6846SIoana Radulescu static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 853b00c898cSIoana Ciornei struct dpaa2_eth_channel *ch __always_unused, 85434ff6846SIoana Radulescu const struct dpaa2_fd *fd, 855569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq) 85634ff6846SIoana Radulescu { 85734ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 85834ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 859569dac6aSIoana Ciocoi Radulescu u32 fd_len = dpaa2_fd_get_len(fd); 86034ff6846SIoana Radulescu u32 fd_errors; 86134ff6846SIoana Radulescu 86234ff6846SIoana Radulescu /* Tracing point */ 86334ff6846SIoana Radulescu trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 86434ff6846SIoana Radulescu 86534ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 86634ff6846SIoana Radulescu percpu_extras->tx_conf_frames++; 867569dac6aSIoana Ciocoi Radulescu percpu_extras->tx_conf_bytes += fd_len; 868569dac6aSIoana Ciocoi Radulescu 86934ff6846SIoana Radulescu /* Check frame errors in the FD field */ 87034ff6846SIoana Radulescu fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 871d678be1dSIoana Radulescu free_tx_fd(priv, fq, fd, true); 87234ff6846SIoana Radulescu 87334ff6846SIoana Radulescu if (likely(!fd_errors)) 87434ff6846SIoana Radulescu return; 87534ff6846SIoana Radulescu 87634ff6846SIoana Radulescu if (net_ratelimit()) 87734ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 87834ff6846SIoana Radulescu fd_errors); 87934ff6846SIoana Radulescu 88034ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 88134ff6846SIoana Radulescu /* Tx-conf logically pertains to the egress path. */ 88234ff6846SIoana Radulescu percpu_stats->tx_errors++; 88334ff6846SIoana Radulescu } 88434ff6846SIoana Radulescu 88534ff6846SIoana Radulescu static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 88634ff6846SIoana Radulescu { 88734ff6846SIoana Radulescu int err; 88834ff6846SIoana Radulescu 88934ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 89034ff6846SIoana Radulescu DPNI_OFF_RX_L3_CSUM, enable); 89134ff6846SIoana Radulescu if (err) { 89234ff6846SIoana Radulescu netdev_err(priv->net_dev, 89334ff6846SIoana Radulescu "dpni_set_offload(RX_L3_CSUM) failed\n"); 89434ff6846SIoana Radulescu return err; 89534ff6846SIoana Radulescu } 89634ff6846SIoana Radulescu 89734ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 89834ff6846SIoana Radulescu DPNI_OFF_RX_L4_CSUM, enable); 89934ff6846SIoana Radulescu if (err) { 90034ff6846SIoana Radulescu netdev_err(priv->net_dev, 90134ff6846SIoana Radulescu "dpni_set_offload(RX_L4_CSUM) failed\n"); 90234ff6846SIoana Radulescu return err; 90334ff6846SIoana Radulescu } 90434ff6846SIoana Radulescu 90534ff6846SIoana Radulescu return 0; 90634ff6846SIoana Radulescu } 90734ff6846SIoana Radulescu 90834ff6846SIoana Radulescu static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 90934ff6846SIoana Radulescu { 91034ff6846SIoana Radulescu int err; 91134ff6846SIoana Radulescu 91234ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 91334ff6846SIoana Radulescu DPNI_OFF_TX_L3_CSUM, enable); 91434ff6846SIoana Radulescu if (err) { 91534ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 91634ff6846SIoana Radulescu return err; 91734ff6846SIoana Radulescu } 91834ff6846SIoana Radulescu 91934ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 92034ff6846SIoana Radulescu DPNI_OFF_TX_L4_CSUM, enable); 92134ff6846SIoana Radulescu if (err) { 92234ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 92334ff6846SIoana Radulescu return err; 92434ff6846SIoana Radulescu } 92534ff6846SIoana Radulescu 92634ff6846SIoana Radulescu return 0; 92734ff6846SIoana Radulescu } 92834ff6846SIoana Radulescu 92934ff6846SIoana Radulescu /* Perform a single release command to add buffers 93034ff6846SIoana Radulescu * to the specified buffer pool 93134ff6846SIoana Radulescu */ 93234ff6846SIoana Radulescu static int add_bufs(struct dpaa2_eth_priv *priv, 93334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, u16 bpid) 93434ff6846SIoana Radulescu { 93534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 93634ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 93727c87486SIoana Ciocoi Radulescu struct page *page; 93834ff6846SIoana Radulescu dma_addr_t addr; 93934ff6846SIoana Radulescu int i, err; 94034ff6846SIoana Radulescu 94134ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 94234ff6846SIoana Radulescu /* Allocate buffer visible to WRIOP + skb shared info + 94334ff6846SIoana Radulescu * alignment padding 94434ff6846SIoana Radulescu */ 94527c87486SIoana Ciocoi Radulescu /* allocate one page for each Rx buffer. WRIOP sees 94627c87486SIoana Ciocoi Radulescu * the entire page except for a tailroom reserved for 94727c87486SIoana Ciocoi Radulescu * skb shared info 94827c87486SIoana Ciocoi Radulescu */ 94927c87486SIoana Ciocoi Radulescu page = dev_alloc_pages(0); 95027c87486SIoana Ciocoi Radulescu if (!page) 95134ff6846SIoana Radulescu goto err_alloc; 95234ff6846SIoana Radulescu 95327c87486SIoana Ciocoi Radulescu addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, 95418c2e770SIoana Ciocoi Radulescu DMA_BIDIRECTIONAL); 95534ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 95634ff6846SIoana Radulescu goto err_map; 95734ff6846SIoana Radulescu 95834ff6846SIoana Radulescu buf_array[i] = addr; 95934ff6846SIoana Radulescu 96034ff6846SIoana Radulescu /* tracing point */ 96134ff6846SIoana Radulescu trace_dpaa2_eth_buf_seed(priv->net_dev, 96227c87486SIoana Ciocoi Radulescu page, DPAA2_ETH_RX_BUF_RAW_SIZE, 96334ff6846SIoana Radulescu addr, DPAA2_ETH_RX_BUF_SIZE, 96434ff6846SIoana Radulescu bpid); 96534ff6846SIoana Radulescu } 96634ff6846SIoana Radulescu 96734ff6846SIoana Radulescu release_bufs: 96834ff6846SIoana Radulescu /* In case the portal is busy, retry until successful */ 96934ff6846SIoana Radulescu while ((err = dpaa2_io_service_release(ch->dpio, bpid, 97034ff6846SIoana Radulescu buf_array, i)) == -EBUSY) 97134ff6846SIoana Radulescu cpu_relax(); 97234ff6846SIoana Radulescu 97334ff6846SIoana Radulescu /* If release command failed, clean up and bail out; 97434ff6846SIoana Radulescu * not much else we can do about it 97534ff6846SIoana Radulescu */ 97634ff6846SIoana Radulescu if (err) { 97734ff6846SIoana Radulescu free_bufs(priv, buf_array, i); 97834ff6846SIoana Radulescu return 0; 97934ff6846SIoana Radulescu } 98034ff6846SIoana Radulescu 98134ff6846SIoana Radulescu return i; 98234ff6846SIoana Radulescu 98334ff6846SIoana Radulescu err_map: 98427c87486SIoana Ciocoi Radulescu __free_pages(page, 0); 98534ff6846SIoana Radulescu err_alloc: 98634ff6846SIoana Radulescu /* If we managed to allocate at least some buffers, 98734ff6846SIoana Radulescu * release them to hardware 98834ff6846SIoana Radulescu */ 98934ff6846SIoana Radulescu if (i) 99034ff6846SIoana Radulescu goto release_bufs; 99134ff6846SIoana Radulescu 99234ff6846SIoana Radulescu return 0; 99334ff6846SIoana Radulescu } 99434ff6846SIoana Radulescu 99534ff6846SIoana Radulescu static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 99634ff6846SIoana Radulescu { 99734ff6846SIoana Radulescu int i, j; 99834ff6846SIoana Radulescu int new_count; 99934ff6846SIoana Radulescu 100034ff6846SIoana Radulescu for (j = 0; j < priv->num_channels; j++) { 100134ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_NUM_BUFS; 100234ff6846SIoana Radulescu i += DPAA2_ETH_BUFS_PER_CMD) { 100334ff6846SIoana Radulescu new_count = add_bufs(priv, priv->channel[j], bpid); 100434ff6846SIoana Radulescu priv->channel[j]->buf_count += new_count; 100534ff6846SIoana Radulescu 100634ff6846SIoana Radulescu if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 100734ff6846SIoana Radulescu return -ENOMEM; 100834ff6846SIoana Radulescu } 100934ff6846SIoana Radulescu } 101034ff6846SIoana Radulescu } 101134ff6846SIoana Radulescu 101234ff6846SIoana Radulescu return 0; 101334ff6846SIoana Radulescu } 101434ff6846SIoana Radulescu 101534ff6846SIoana Radulescu /** 101634ff6846SIoana Radulescu * Drain the specified number of buffers from the DPNI's private buffer pool. 101734ff6846SIoana Radulescu * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 101834ff6846SIoana Radulescu */ 101934ff6846SIoana Radulescu static void drain_bufs(struct dpaa2_eth_priv *priv, int count) 102034ff6846SIoana Radulescu { 102134ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 102234ff6846SIoana Radulescu int ret; 102334ff6846SIoana Radulescu 102434ff6846SIoana Radulescu do { 102534ff6846SIoana Radulescu ret = dpaa2_io_service_acquire(NULL, priv->bpid, 102634ff6846SIoana Radulescu buf_array, count); 102734ff6846SIoana Radulescu if (ret < 0) { 102834ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 102934ff6846SIoana Radulescu return; 103034ff6846SIoana Radulescu } 103134ff6846SIoana Radulescu free_bufs(priv, buf_array, ret); 103234ff6846SIoana Radulescu } while (ret); 103334ff6846SIoana Radulescu } 103434ff6846SIoana Radulescu 103534ff6846SIoana Radulescu static void drain_pool(struct dpaa2_eth_priv *priv) 103634ff6846SIoana Radulescu { 103734ff6846SIoana Radulescu int i; 103834ff6846SIoana Radulescu 103934ff6846SIoana Radulescu drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 104034ff6846SIoana Radulescu drain_bufs(priv, 1); 104134ff6846SIoana Radulescu 104234ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 104334ff6846SIoana Radulescu priv->channel[i]->buf_count = 0; 104434ff6846SIoana Radulescu } 104534ff6846SIoana Radulescu 104634ff6846SIoana Radulescu /* Function is called from softirq context only, so we don't need to guard 104734ff6846SIoana Radulescu * the access to percpu count 104834ff6846SIoana Radulescu */ 104934ff6846SIoana Radulescu static int refill_pool(struct dpaa2_eth_priv *priv, 105034ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 105134ff6846SIoana Radulescu u16 bpid) 105234ff6846SIoana Radulescu { 105334ff6846SIoana Radulescu int new_count; 105434ff6846SIoana Radulescu 105534ff6846SIoana Radulescu if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 105634ff6846SIoana Radulescu return 0; 105734ff6846SIoana Radulescu 105834ff6846SIoana Radulescu do { 105934ff6846SIoana Radulescu new_count = add_bufs(priv, ch, bpid); 106034ff6846SIoana Radulescu if (unlikely(!new_count)) { 106134ff6846SIoana Radulescu /* Out of memory; abort for now, we'll try later on */ 106234ff6846SIoana Radulescu break; 106334ff6846SIoana Radulescu } 106434ff6846SIoana Radulescu ch->buf_count += new_count; 106534ff6846SIoana Radulescu } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 106634ff6846SIoana Radulescu 106734ff6846SIoana Radulescu if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 106834ff6846SIoana Radulescu return -ENOMEM; 106934ff6846SIoana Radulescu 107034ff6846SIoana Radulescu return 0; 107134ff6846SIoana Radulescu } 107234ff6846SIoana Radulescu 107334ff6846SIoana Radulescu static int pull_channel(struct dpaa2_eth_channel *ch) 107434ff6846SIoana Radulescu { 107534ff6846SIoana Radulescu int err; 107634ff6846SIoana Radulescu int dequeues = -1; 107734ff6846SIoana Radulescu 107834ff6846SIoana Radulescu /* Retry while portal is busy */ 107934ff6846SIoana Radulescu do { 108034ff6846SIoana Radulescu err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 108134ff6846SIoana Radulescu ch->store); 108234ff6846SIoana Radulescu dequeues++; 108334ff6846SIoana Radulescu cpu_relax(); 108434ff6846SIoana Radulescu } while (err == -EBUSY); 108534ff6846SIoana Radulescu 108634ff6846SIoana Radulescu ch->stats.dequeue_portal_busy += dequeues; 108734ff6846SIoana Radulescu if (unlikely(err)) 108834ff6846SIoana Radulescu ch->stats.pull_err++; 108934ff6846SIoana Radulescu 109034ff6846SIoana Radulescu return err; 109134ff6846SIoana Radulescu } 109234ff6846SIoana Radulescu 109334ff6846SIoana Radulescu /* NAPI poll routine 109434ff6846SIoana Radulescu * 109534ff6846SIoana Radulescu * Frames are dequeued from the QMan channel associated with this NAPI context. 109634ff6846SIoana Radulescu * Rx, Tx confirmation and (if configured) Rx error frames all count 109734ff6846SIoana Radulescu * towards the NAPI budget. 109834ff6846SIoana Radulescu */ 109934ff6846SIoana Radulescu static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 110034ff6846SIoana Radulescu { 110134ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 110234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 110368049a5fSIoana Ciocoi Radulescu int rx_cleaned = 0, txconf_cleaned = 0; 1104569dac6aSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, *txc_fq = NULL; 1105569dac6aSIoana Ciocoi Radulescu struct netdev_queue *nq; 1106569dac6aSIoana Ciocoi Radulescu int store_cleaned, work_done; 11070a25d92cSIoana Ciornei struct list_head rx_list; 110834ff6846SIoana Radulescu int err; 110934ff6846SIoana Radulescu 111034ff6846SIoana Radulescu ch = container_of(napi, struct dpaa2_eth_channel, napi); 1111d678be1dSIoana Radulescu ch->xdp.res = 0; 111234ff6846SIoana Radulescu priv = ch->priv; 111334ff6846SIoana Radulescu 11140a25d92cSIoana Ciornei INIT_LIST_HEAD(&rx_list); 11150a25d92cSIoana Ciornei ch->rx_list = &rx_list; 11160a25d92cSIoana Ciornei 111768049a5fSIoana Ciocoi Radulescu do { 111834ff6846SIoana Radulescu err = pull_channel(ch); 111934ff6846SIoana Radulescu if (unlikely(err)) 112034ff6846SIoana Radulescu break; 112134ff6846SIoana Radulescu 112234ff6846SIoana Radulescu /* Refill pool if appropriate */ 112334ff6846SIoana Radulescu refill_pool(priv, ch, priv->bpid); 112434ff6846SIoana Radulescu 1125569dac6aSIoana Ciocoi Radulescu store_cleaned = consume_frames(ch, &fq); 1126569dac6aSIoana Ciocoi Radulescu if (!store_cleaned) 1127569dac6aSIoana Ciocoi Radulescu break; 1128569dac6aSIoana Ciocoi Radulescu if (fq->type == DPAA2_RX_FQ) { 112968049a5fSIoana Ciocoi Radulescu rx_cleaned += store_cleaned; 1130569dac6aSIoana Ciocoi Radulescu } else { 113168049a5fSIoana Ciocoi Radulescu txconf_cleaned += store_cleaned; 1132569dac6aSIoana Ciocoi Radulescu /* We have a single Tx conf FQ on this channel */ 1133569dac6aSIoana Ciocoi Radulescu txc_fq = fq; 1134569dac6aSIoana Ciocoi Radulescu } 113534ff6846SIoana Radulescu 113668049a5fSIoana Ciocoi Radulescu /* If we either consumed the whole NAPI budget with Rx frames 113768049a5fSIoana Ciocoi Radulescu * or we reached the Tx confirmations threshold, we're done. 113834ff6846SIoana Radulescu */ 113968049a5fSIoana Ciocoi Radulescu if (rx_cleaned >= budget || 1140569dac6aSIoana Ciocoi Radulescu txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { 1141569dac6aSIoana Ciocoi Radulescu work_done = budget; 1142569dac6aSIoana Ciocoi Radulescu goto out; 1143569dac6aSIoana Ciocoi Radulescu } 114468049a5fSIoana Ciocoi Radulescu } while (store_cleaned); 114534ff6846SIoana Radulescu 114668049a5fSIoana Ciocoi Radulescu /* We didn't consume the entire budget, so finish napi and 114768049a5fSIoana Ciocoi Radulescu * re-enable data availability notifications 114868049a5fSIoana Ciocoi Radulescu */ 114968049a5fSIoana Ciocoi Radulescu napi_complete_done(napi, rx_cleaned); 115034ff6846SIoana Radulescu do { 115134ff6846SIoana Radulescu err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 115234ff6846SIoana Radulescu cpu_relax(); 115334ff6846SIoana Radulescu } while (err == -EBUSY); 115434ff6846SIoana Radulescu WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 115534ff6846SIoana Radulescu ch->nctx.desired_cpu); 115634ff6846SIoana Radulescu 1157569dac6aSIoana Ciocoi Radulescu work_done = max(rx_cleaned, 1); 1158569dac6aSIoana Ciocoi Radulescu 1159569dac6aSIoana Ciocoi Radulescu out: 11600a25d92cSIoana Ciornei netif_receive_skb_list(ch->rx_list); 11610a25d92cSIoana Ciornei 1162d678be1dSIoana Radulescu if (txc_fq && txc_fq->dq_frames) { 1163569dac6aSIoana Ciocoi Radulescu nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); 1164569dac6aSIoana Ciocoi Radulescu netdev_tx_completed_queue(nq, txc_fq->dq_frames, 1165569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes); 1166569dac6aSIoana Ciocoi Radulescu txc_fq->dq_frames = 0; 1167569dac6aSIoana Ciocoi Radulescu txc_fq->dq_bytes = 0; 1168569dac6aSIoana Ciocoi Radulescu } 1169569dac6aSIoana Ciocoi Radulescu 1170d678be1dSIoana Radulescu if (ch->xdp.res & XDP_REDIRECT) 1171d678be1dSIoana Radulescu xdp_do_flush_map(); 1172d678be1dSIoana Radulescu 1173569dac6aSIoana Ciocoi Radulescu return work_done; 117434ff6846SIoana Radulescu } 117534ff6846SIoana Radulescu 117634ff6846SIoana Radulescu static void enable_ch_napi(struct dpaa2_eth_priv *priv) 117734ff6846SIoana Radulescu { 117834ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 117934ff6846SIoana Radulescu int i; 118034ff6846SIoana Radulescu 118134ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 118234ff6846SIoana Radulescu ch = priv->channel[i]; 118334ff6846SIoana Radulescu napi_enable(&ch->napi); 118434ff6846SIoana Radulescu } 118534ff6846SIoana Radulescu } 118634ff6846SIoana Radulescu 118734ff6846SIoana Radulescu static void disable_ch_napi(struct dpaa2_eth_priv *priv) 118834ff6846SIoana Radulescu { 118934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 119034ff6846SIoana Radulescu int i; 119134ff6846SIoana Radulescu 119234ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 119334ff6846SIoana Radulescu ch = priv->channel[i]; 119434ff6846SIoana Radulescu napi_disable(&ch->napi); 119534ff6846SIoana Radulescu } 119634ff6846SIoana Radulescu } 119734ff6846SIoana Radulescu 119834ff6846SIoana Radulescu static int link_state_update(struct dpaa2_eth_priv *priv) 119934ff6846SIoana Radulescu { 120085b7a342SIoana Ciornei struct dpni_link_state state = {0}; 120134ff6846SIoana Radulescu int err; 120234ff6846SIoana Radulescu 120334ff6846SIoana Radulescu err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 120434ff6846SIoana Radulescu if (unlikely(err)) { 120534ff6846SIoana Radulescu netdev_err(priv->net_dev, 120634ff6846SIoana Radulescu "dpni_get_link_state() failed\n"); 120734ff6846SIoana Radulescu return err; 120834ff6846SIoana Radulescu } 120934ff6846SIoana Radulescu 121034ff6846SIoana Radulescu /* Chech link state; speed / duplex changes are not treated yet */ 121134ff6846SIoana Radulescu if (priv->link_state.up == state.up) 121234ff6846SIoana Radulescu return 0; 121334ff6846SIoana Radulescu 121434ff6846SIoana Radulescu priv->link_state = state; 121534ff6846SIoana Radulescu if (state.up) { 121634ff6846SIoana Radulescu netif_carrier_on(priv->net_dev); 121734ff6846SIoana Radulescu netif_tx_start_all_queues(priv->net_dev); 121834ff6846SIoana Radulescu } else { 121934ff6846SIoana Radulescu netif_tx_stop_all_queues(priv->net_dev); 122034ff6846SIoana Radulescu netif_carrier_off(priv->net_dev); 122134ff6846SIoana Radulescu } 122234ff6846SIoana Radulescu 122334ff6846SIoana Radulescu netdev_info(priv->net_dev, "Link Event: state %s\n", 122434ff6846SIoana Radulescu state.up ? "up" : "down"); 122534ff6846SIoana Radulescu 122634ff6846SIoana Radulescu return 0; 122734ff6846SIoana Radulescu } 122834ff6846SIoana Radulescu 122934ff6846SIoana Radulescu static int dpaa2_eth_open(struct net_device *net_dev) 123034ff6846SIoana Radulescu { 123134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 123234ff6846SIoana Radulescu int err; 123334ff6846SIoana Radulescu 123434ff6846SIoana Radulescu err = seed_pool(priv, priv->bpid); 123534ff6846SIoana Radulescu if (err) { 123634ff6846SIoana Radulescu /* Not much to do; the buffer pool, though not filled up, 123734ff6846SIoana Radulescu * may still contain some buffers which would enable us 123834ff6846SIoana Radulescu * to limp on. 123934ff6846SIoana Radulescu */ 124034ff6846SIoana Radulescu netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 124134ff6846SIoana Radulescu priv->dpbp_dev->obj_desc.id, priv->bpid); 124234ff6846SIoana Radulescu } 124334ff6846SIoana Radulescu 124434ff6846SIoana Radulescu /* We'll only start the txqs when the link is actually ready; make sure 124534ff6846SIoana Radulescu * we don't race against the link up notification, which may come 124634ff6846SIoana Radulescu * immediately after dpni_enable(); 124734ff6846SIoana Radulescu */ 124834ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 124934ff6846SIoana Radulescu enable_ch_napi(priv); 125034ff6846SIoana Radulescu /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will 125134ff6846SIoana Radulescu * return true and cause 'ip link show' to report the LOWER_UP flag, 125234ff6846SIoana Radulescu * even though the link notification wasn't even received. 125334ff6846SIoana Radulescu */ 125434ff6846SIoana Radulescu netif_carrier_off(net_dev); 125534ff6846SIoana Radulescu 125634ff6846SIoana Radulescu err = dpni_enable(priv->mc_io, 0, priv->mc_token); 125734ff6846SIoana Radulescu if (err < 0) { 125834ff6846SIoana Radulescu netdev_err(net_dev, "dpni_enable() failed\n"); 125934ff6846SIoana Radulescu goto enable_err; 126034ff6846SIoana Radulescu } 126134ff6846SIoana Radulescu 126234ff6846SIoana Radulescu /* If the DPMAC object has already processed the link up interrupt, 126334ff6846SIoana Radulescu * we have to learn the link state ourselves. 126434ff6846SIoana Radulescu */ 126534ff6846SIoana Radulescu err = link_state_update(priv); 126634ff6846SIoana Radulescu if (err < 0) { 126734ff6846SIoana Radulescu netdev_err(net_dev, "Can't update link state\n"); 126834ff6846SIoana Radulescu goto link_state_err; 126934ff6846SIoana Radulescu } 127034ff6846SIoana Radulescu 127134ff6846SIoana Radulescu return 0; 127234ff6846SIoana Radulescu 127334ff6846SIoana Radulescu link_state_err: 127434ff6846SIoana Radulescu enable_err: 127534ff6846SIoana Radulescu disable_ch_napi(priv); 127634ff6846SIoana Radulescu drain_pool(priv); 127734ff6846SIoana Radulescu return err; 127834ff6846SIoana Radulescu } 127934ff6846SIoana Radulescu 128068d74315SIoana Ciocoi Radulescu /* Total number of in-flight frames on ingress queues */ 128168d74315SIoana Ciocoi Radulescu static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) 128234ff6846SIoana Radulescu { 128368d74315SIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq; 128468d74315SIoana Ciocoi Radulescu u32 fcnt = 0, bcnt = 0, total = 0; 128568d74315SIoana Ciocoi Radulescu int i, err; 128634ff6846SIoana Radulescu 128768d74315SIoana Ciocoi Radulescu for (i = 0; i < priv->num_fqs; i++) { 128868d74315SIoana Ciocoi Radulescu fq = &priv->fq[i]; 128968d74315SIoana Ciocoi Radulescu err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); 129068d74315SIoana Ciocoi Radulescu if (err) { 129168d74315SIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "query_fq_count failed"); 129268d74315SIoana Ciocoi Radulescu break; 129368d74315SIoana Ciocoi Radulescu } 129468d74315SIoana Ciocoi Radulescu total += fcnt; 129568d74315SIoana Ciocoi Radulescu } 129634ff6846SIoana Radulescu 129734ff6846SIoana Radulescu return total; 129834ff6846SIoana Radulescu } 129934ff6846SIoana Radulescu 130068d74315SIoana Ciocoi Radulescu static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) 130134ff6846SIoana Radulescu { 130268d74315SIoana Ciocoi Radulescu int retries = 10; 130368d74315SIoana Ciocoi Radulescu u32 pending; 130434ff6846SIoana Radulescu 130568d74315SIoana Ciocoi Radulescu do { 130668d74315SIoana Ciocoi Radulescu pending = ingress_fq_count(priv); 130768d74315SIoana Ciocoi Radulescu if (pending) 130868d74315SIoana Ciocoi Radulescu msleep(100); 130968d74315SIoana Ciocoi Radulescu } while (pending && --retries); 131034ff6846SIoana Radulescu } 131134ff6846SIoana Radulescu 131234ff6846SIoana Radulescu static int dpaa2_eth_stop(struct net_device *net_dev) 131334ff6846SIoana Radulescu { 131434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 131585b7a342SIoana Ciornei int dpni_enabled = 0; 131634ff6846SIoana Radulescu int retries = 10; 131734ff6846SIoana Radulescu 131834ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 131934ff6846SIoana Radulescu netif_carrier_off(net_dev); 132034ff6846SIoana Radulescu 132168d74315SIoana Ciocoi Radulescu /* On dpni_disable(), the MC firmware will: 132268d74315SIoana Ciocoi Radulescu * - stop MAC Rx and wait for all Rx frames to be enqueued to software 132368d74315SIoana Ciocoi Radulescu * - cut off WRIOP dequeues from egress FQs and wait until transmission 132468d74315SIoana Ciocoi Radulescu * of all in flight Tx frames is finished (and corresponding Tx conf 132568d74315SIoana Ciocoi Radulescu * frames are enqueued back to software) 132668d74315SIoana Ciocoi Radulescu * 132768d74315SIoana Ciocoi Radulescu * Before calling dpni_disable(), we wait for all Tx frames to arrive 132868d74315SIoana Ciocoi Radulescu * on WRIOP. After it finishes, wait until all remaining frames on Rx 132968d74315SIoana Ciocoi Radulescu * and Tx conf queues are consumed on NAPI poll. 133034ff6846SIoana Radulescu */ 133168d74315SIoana Ciocoi Radulescu msleep(500); 133268d74315SIoana Ciocoi Radulescu 133334ff6846SIoana Radulescu do { 133434ff6846SIoana Radulescu dpni_disable(priv->mc_io, 0, priv->mc_token); 133534ff6846SIoana Radulescu dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 133634ff6846SIoana Radulescu if (dpni_enabled) 133734ff6846SIoana Radulescu /* Allow the hardware some slack */ 133834ff6846SIoana Radulescu msleep(100); 133934ff6846SIoana Radulescu } while (dpni_enabled && --retries); 134034ff6846SIoana Radulescu if (!retries) { 134134ff6846SIoana Radulescu netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 134234ff6846SIoana Radulescu /* Must go on and disable NAPI nonetheless, so we don't crash at 134334ff6846SIoana Radulescu * the next "ifconfig up" 134434ff6846SIoana Radulescu */ 134534ff6846SIoana Radulescu } 134634ff6846SIoana Radulescu 134768d74315SIoana Ciocoi Radulescu wait_for_fq_empty(priv); 134834ff6846SIoana Radulescu disable_ch_napi(priv); 134934ff6846SIoana Radulescu 135034ff6846SIoana Radulescu /* Empty the buffer pool */ 135134ff6846SIoana Radulescu drain_pool(priv); 135234ff6846SIoana Radulescu 135334ff6846SIoana Radulescu return 0; 135434ff6846SIoana Radulescu } 135534ff6846SIoana Radulescu 135634ff6846SIoana Radulescu static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 135734ff6846SIoana Radulescu { 135834ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 135934ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 136034ff6846SIoana Radulescu int err; 136134ff6846SIoana Radulescu 136234ff6846SIoana Radulescu err = eth_mac_addr(net_dev, addr); 136334ff6846SIoana Radulescu if (err < 0) { 136434ff6846SIoana Radulescu dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 136534ff6846SIoana Radulescu return err; 136634ff6846SIoana Radulescu } 136734ff6846SIoana Radulescu 136834ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 136934ff6846SIoana Radulescu net_dev->dev_addr); 137034ff6846SIoana Radulescu if (err) { 137134ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 137234ff6846SIoana Radulescu return err; 137334ff6846SIoana Radulescu } 137434ff6846SIoana Radulescu 137534ff6846SIoana Radulescu return 0; 137634ff6846SIoana Radulescu } 137734ff6846SIoana Radulescu 137834ff6846SIoana Radulescu /** Fill in counters maintained by the GPP driver. These may be different from 137934ff6846SIoana Radulescu * the hardware counters obtained by ethtool. 138034ff6846SIoana Radulescu */ 138134ff6846SIoana Radulescu static void dpaa2_eth_get_stats(struct net_device *net_dev, 138234ff6846SIoana Radulescu struct rtnl_link_stats64 *stats) 138334ff6846SIoana Radulescu { 138434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 138534ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 138634ff6846SIoana Radulescu u64 *cpustats; 138734ff6846SIoana Radulescu u64 *netstats = (u64 *)stats; 138834ff6846SIoana Radulescu int i, j; 138934ff6846SIoana Radulescu int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 139034ff6846SIoana Radulescu 139134ff6846SIoana Radulescu for_each_possible_cpu(i) { 139234ff6846SIoana Radulescu percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 139334ff6846SIoana Radulescu cpustats = (u64 *)percpu_stats; 139434ff6846SIoana Radulescu for (j = 0; j < num; j++) 139534ff6846SIoana Radulescu netstats[j] += cpustats[j]; 139634ff6846SIoana Radulescu } 139734ff6846SIoana Radulescu } 139834ff6846SIoana Radulescu 139934ff6846SIoana Radulescu /* Copy mac unicast addresses from @net_dev to @priv. 140034ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 140134ff6846SIoana Radulescu */ 140234ff6846SIoana Radulescu static void add_uc_hw_addr(const struct net_device *net_dev, 140334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 140434ff6846SIoana Radulescu { 140534ff6846SIoana Radulescu struct netdev_hw_addr *ha; 140634ff6846SIoana Radulescu int err; 140734ff6846SIoana Radulescu 140834ff6846SIoana Radulescu netdev_for_each_uc_addr(ha, net_dev) { 140934ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 141034ff6846SIoana Radulescu ha->addr); 141134ff6846SIoana Radulescu if (err) 141234ff6846SIoana Radulescu netdev_warn(priv->net_dev, 141334ff6846SIoana Radulescu "Could not add ucast MAC %pM to the filtering table (err %d)\n", 141434ff6846SIoana Radulescu ha->addr, err); 141534ff6846SIoana Radulescu } 141634ff6846SIoana Radulescu } 141734ff6846SIoana Radulescu 141834ff6846SIoana Radulescu /* Copy mac multicast addresses from @net_dev to @priv 141934ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 142034ff6846SIoana Radulescu */ 142134ff6846SIoana Radulescu static void add_mc_hw_addr(const struct net_device *net_dev, 142234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 142334ff6846SIoana Radulescu { 142434ff6846SIoana Radulescu struct netdev_hw_addr *ha; 142534ff6846SIoana Radulescu int err; 142634ff6846SIoana Radulescu 142734ff6846SIoana Radulescu netdev_for_each_mc_addr(ha, net_dev) { 142834ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 142934ff6846SIoana Radulescu ha->addr); 143034ff6846SIoana Radulescu if (err) 143134ff6846SIoana Radulescu netdev_warn(priv->net_dev, 143234ff6846SIoana Radulescu "Could not add mcast MAC %pM to the filtering table (err %d)\n", 143334ff6846SIoana Radulescu ha->addr, err); 143434ff6846SIoana Radulescu } 143534ff6846SIoana Radulescu } 143634ff6846SIoana Radulescu 143734ff6846SIoana Radulescu static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 143834ff6846SIoana Radulescu { 143934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 144034ff6846SIoana Radulescu int uc_count = netdev_uc_count(net_dev); 144134ff6846SIoana Radulescu int mc_count = netdev_mc_count(net_dev); 144234ff6846SIoana Radulescu u8 max_mac = priv->dpni_attrs.mac_filter_entries; 144334ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 144434ff6846SIoana Radulescu u16 mc_token = priv->mc_token; 144534ff6846SIoana Radulescu struct fsl_mc_io *mc_io = priv->mc_io; 144634ff6846SIoana Radulescu int err; 144734ff6846SIoana Radulescu 144834ff6846SIoana Radulescu /* Basic sanity checks; these probably indicate a misconfiguration */ 144934ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 145034ff6846SIoana Radulescu netdev_info(net_dev, 145134ff6846SIoana Radulescu "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 145234ff6846SIoana Radulescu max_mac); 145334ff6846SIoana Radulescu 145434ff6846SIoana Radulescu /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 145534ff6846SIoana Radulescu if (uc_count > max_mac) { 145634ff6846SIoana Radulescu netdev_info(net_dev, 145734ff6846SIoana Radulescu "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 145834ff6846SIoana Radulescu uc_count, max_mac); 145934ff6846SIoana Radulescu goto force_promisc; 146034ff6846SIoana Radulescu } 146134ff6846SIoana Radulescu if (mc_count + uc_count > max_mac) { 146234ff6846SIoana Radulescu netdev_info(net_dev, 146334ff6846SIoana Radulescu "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 146434ff6846SIoana Radulescu uc_count + mc_count, max_mac); 146534ff6846SIoana Radulescu goto force_mc_promisc; 146634ff6846SIoana Radulescu } 146734ff6846SIoana Radulescu 146834ff6846SIoana Radulescu /* Adjust promisc settings due to flag combinations */ 146934ff6846SIoana Radulescu if (net_dev->flags & IFF_PROMISC) 147034ff6846SIoana Radulescu goto force_promisc; 147134ff6846SIoana Radulescu if (net_dev->flags & IFF_ALLMULTI) { 147234ff6846SIoana Radulescu /* First, rebuild unicast filtering table. This should be done 147334ff6846SIoana Radulescu * in promisc mode, in order to avoid frame loss while we 147434ff6846SIoana Radulescu * progressively add entries to the table. 147534ff6846SIoana Radulescu * We don't know whether we had been in promisc already, and 147634ff6846SIoana Radulescu * making an MC call to find out is expensive; so set uc promisc 147734ff6846SIoana Radulescu * nonetheless. 147834ff6846SIoana Radulescu */ 147934ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 148034ff6846SIoana Radulescu if (err) 148134ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc\n"); 148234ff6846SIoana Radulescu 148334ff6846SIoana Radulescu /* Actual uc table reconstruction. */ 148434ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 148534ff6846SIoana Radulescu if (err) 148634ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc filters\n"); 148734ff6846SIoana Radulescu add_uc_hw_addr(net_dev, priv); 148834ff6846SIoana Radulescu 148934ff6846SIoana Radulescu /* Finally, clear uc promisc and set mc promisc as requested. */ 149034ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 149134ff6846SIoana Radulescu if (err) 149234ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc promisc\n"); 149334ff6846SIoana Radulescu goto force_mc_promisc; 149434ff6846SIoana Radulescu } 149534ff6846SIoana Radulescu 149634ff6846SIoana Radulescu /* Neither unicast, nor multicast promisc will be on... eventually. 149734ff6846SIoana Radulescu * For now, rebuild mac filtering tables while forcing both of them on. 149834ff6846SIoana Radulescu */ 149934ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 150034ff6846SIoana Radulescu if (err) 150134ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 150234ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 150334ff6846SIoana Radulescu if (err) 150434ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 150534ff6846SIoana Radulescu 150634ff6846SIoana Radulescu /* Actual mac filtering tables reconstruction */ 150734ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 150834ff6846SIoana Radulescu if (err) 150934ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mac filters\n"); 151034ff6846SIoana Radulescu add_mc_hw_addr(net_dev, priv); 151134ff6846SIoana Radulescu add_uc_hw_addr(net_dev, priv); 151234ff6846SIoana Radulescu 151334ff6846SIoana Radulescu /* Now we can clear both ucast and mcast promisc, without risking 151434ff6846SIoana Radulescu * to drop legitimate frames anymore. 151534ff6846SIoana Radulescu */ 151634ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 151734ff6846SIoana Radulescu if (err) 151834ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear ucast promisc\n"); 151934ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 152034ff6846SIoana Radulescu if (err) 152134ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mcast promisc\n"); 152234ff6846SIoana Radulescu 152334ff6846SIoana Radulescu return; 152434ff6846SIoana Radulescu 152534ff6846SIoana Radulescu force_promisc: 152634ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 152734ff6846SIoana Radulescu if (err) 152834ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set ucast promisc\n"); 152934ff6846SIoana Radulescu force_mc_promisc: 153034ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 153134ff6846SIoana Radulescu if (err) 153234ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mcast promisc\n"); 153334ff6846SIoana Radulescu } 153434ff6846SIoana Radulescu 153534ff6846SIoana Radulescu static int dpaa2_eth_set_features(struct net_device *net_dev, 153634ff6846SIoana Radulescu netdev_features_t features) 153734ff6846SIoana Radulescu { 153834ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 153934ff6846SIoana Radulescu netdev_features_t changed = features ^ net_dev->features; 154034ff6846SIoana Radulescu bool enable; 154134ff6846SIoana Radulescu int err; 154234ff6846SIoana Radulescu 154334ff6846SIoana Radulescu if (changed & NETIF_F_RXCSUM) { 154434ff6846SIoana Radulescu enable = !!(features & NETIF_F_RXCSUM); 154534ff6846SIoana Radulescu err = set_rx_csum(priv, enable); 154634ff6846SIoana Radulescu if (err) 154734ff6846SIoana Radulescu return err; 154834ff6846SIoana Radulescu } 154934ff6846SIoana Radulescu 155034ff6846SIoana Radulescu if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 155134ff6846SIoana Radulescu enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 155234ff6846SIoana Radulescu err = set_tx_csum(priv, enable); 155334ff6846SIoana Radulescu if (err) 155434ff6846SIoana Radulescu return err; 155534ff6846SIoana Radulescu } 155634ff6846SIoana Radulescu 155734ff6846SIoana Radulescu return 0; 155834ff6846SIoana Radulescu } 155934ff6846SIoana Radulescu 156034ff6846SIoana Radulescu static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 156134ff6846SIoana Radulescu { 156234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 156334ff6846SIoana Radulescu struct hwtstamp_config config; 156434ff6846SIoana Radulescu 156534ff6846SIoana Radulescu if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 156634ff6846SIoana Radulescu return -EFAULT; 156734ff6846SIoana Radulescu 156834ff6846SIoana Radulescu switch (config.tx_type) { 156934ff6846SIoana Radulescu case HWTSTAMP_TX_OFF: 157034ff6846SIoana Radulescu priv->tx_tstamp = false; 157134ff6846SIoana Radulescu break; 157234ff6846SIoana Radulescu case HWTSTAMP_TX_ON: 157334ff6846SIoana Radulescu priv->tx_tstamp = true; 157434ff6846SIoana Radulescu break; 157534ff6846SIoana Radulescu default: 157634ff6846SIoana Radulescu return -ERANGE; 157734ff6846SIoana Radulescu } 157834ff6846SIoana Radulescu 157934ff6846SIoana Radulescu if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 158034ff6846SIoana Radulescu priv->rx_tstamp = false; 158134ff6846SIoana Radulescu } else { 158234ff6846SIoana Radulescu priv->rx_tstamp = true; 158334ff6846SIoana Radulescu /* TS is set for all frame types, not only those requested */ 158434ff6846SIoana Radulescu config.rx_filter = HWTSTAMP_FILTER_ALL; 158534ff6846SIoana Radulescu } 158634ff6846SIoana Radulescu 158734ff6846SIoana Radulescu return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 158834ff6846SIoana Radulescu -EFAULT : 0; 158934ff6846SIoana Radulescu } 159034ff6846SIoana Radulescu 159134ff6846SIoana Radulescu static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 159234ff6846SIoana Radulescu { 159334ff6846SIoana Radulescu if (cmd == SIOCSHWTSTAMP) 159434ff6846SIoana Radulescu return dpaa2_eth_ts_ioctl(dev, rq, cmd); 159534ff6846SIoana Radulescu 159634ff6846SIoana Radulescu return -EINVAL; 159734ff6846SIoana Radulescu } 159834ff6846SIoana Radulescu 15997e273a8eSIoana Ciocoi Radulescu static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) 16007e273a8eSIoana Ciocoi Radulescu { 16017e273a8eSIoana Ciocoi Radulescu int mfl, linear_mfl; 16027e273a8eSIoana Ciocoi Radulescu 16037e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 16047e273a8eSIoana Ciocoi Radulescu linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE - 16057b1eea1aSIoana Ciocoi Radulescu dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; 16067e273a8eSIoana Ciocoi Radulescu 16077e273a8eSIoana Ciocoi Radulescu if (mfl > linear_mfl) { 16087e273a8eSIoana Ciocoi Radulescu netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", 16097e273a8eSIoana Ciocoi Radulescu linear_mfl - VLAN_ETH_HLEN); 16107e273a8eSIoana Ciocoi Radulescu return false; 16117e273a8eSIoana Ciocoi Radulescu } 16127e273a8eSIoana Ciocoi Radulescu 16137e273a8eSIoana Ciocoi Radulescu return true; 16147e273a8eSIoana Ciocoi Radulescu } 16157e273a8eSIoana Ciocoi Radulescu 16167e273a8eSIoana Ciocoi Radulescu static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) 16177e273a8eSIoana Ciocoi Radulescu { 16187e273a8eSIoana Ciocoi Radulescu int mfl, err; 16197e273a8eSIoana Ciocoi Radulescu 16207e273a8eSIoana Ciocoi Radulescu /* We enforce a maximum Rx frame length based on MTU only if we have 16217e273a8eSIoana Ciocoi Radulescu * an XDP program attached (in order to avoid Rx S/G frames). 16227e273a8eSIoana Ciocoi Radulescu * Otherwise, we accept all incoming frames as long as they are not 16237e273a8eSIoana Ciocoi Radulescu * larger than maximum size supported in hardware 16247e273a8eSIoana Ciocoi Radulescu */ 16257e273a8eSIoana Ciocoi Radulescu if (has_xdp) 16267e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 16277e273a8eSIoana Ciocoi Radulescu else 16287e273a8eSIoana Ciocoi Radulescu mfl = DPAA2_ETH_MFL; 16297e273a8eSIoana Ciocoi Radulescu 16307e273a8eSIoana Ciocoi Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); 16317e273a8eSIoana Ciocoi Radulescu if (err) { 16327e273a8eSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); 16337e273a8eSIoana Ciocoi Radulescu return err; 16347e273a8eSIoana Ciocoi Radulescu } 16357e273a8eSIoana Ciocoi Radulescu 16367e273a8eSIoana Ciocoi Radulescu return 0; 16377e273a8eSIoana Ciocoi Radulescu } 16387e273a8eSIoana Ciocoi Radulescu 16397e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) 16407e273a8eSIoana Ciocoi Radulescu { 16417e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 16427e273a8eSIoana Ciocoi Radulescu int err; 16437e273a8eSIoana Ciocoi Radulescu 16447e273a8eSIoana Ciocoi Radulescu if (!priv->xdp_prog) 16457e273a8eSIoana Ciocoi Radulescu goto out; 16467e273a8eSIoana Ciocoi Radulescu 16477e273a8eSIoana Ciocoi Radulescu if (!xdp_mtu_valid(priv, new_mtu)) 16487e273a8eSIoana Ciocoi Radulescu return -EINVAL; 16497e273a8eSIoana Ciocoi Radulescu 16507e273a8eSIoana Ciocoi Radulescu err = set_rx_mfl(priv, new_mtu, true); 16517e273a8eSIoana Ciocoi Radulescu if (err) 16527e273a8eSIoana Ciocoi Radulescu return err; 16537e273a8eSIoana Ciocoi Radulescu 16547e273a8eSIoana Ciocoi Radulescu out: 16557e273a8eSIoana Ciocoi Radulescu dev->mtu = new_mtu; 16567e273a8eSIoana Ciocoi Radulescu return 0; 16577e273a8eSIoana Ciocoi Radulescu } 16587e273a8eSIoana Ciocoi Radulescu 16597b1eea1aSIoana Ciocoi Radulescu static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) 16607b1eea1aSIoana Ciocoi Radulescu { 16617b1eea1aSIoana Ciocoi Radulescu struct dpni_buffer_layout buf_layout = {0}; 16627b1eea1aSIoana Ciocoi Radulescu int err; 16637b1eea1aSIoana Ciocoi Radulescu 16647b1eea1aSIoana Ciocoi Radulescu err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, 16657b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 16667b1eea1aSIoana Ciocoi Radulescu if (err) { 16677b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); 16687b1eea1aSIoana Ciocoi Radulescu return err; 16697b1eea1aSIoana Ciocoi Radulescu } 16707b1eea1aSIoana Ciocoi Radulescu 16717b1eea1aSIoana Ciocoi Radulescu /* Reserve extra headroom for XDP header size changes */ 16727b1eea1aSIoana Ciocoi Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + 16737b1eea1aSIoana Ciocoi Radulescu (has_xdp ? XDP_PACKET_HEADROOM : 0); 16747b1eea1aSIoana Ciocoi Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; 16757b1eea1aSIoana Ciocoi Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 16767b1eea1aSIoana Ciocoi Radulescu DPNI_QUEUE_RX, &buf_layout); 16777b1eea1aSIoana Ciocoi Radulescu if (err) { 16787b1eea1aSIoana Ciocoi Radulescu netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); 16797b1eea1aSIoana Ciocoi Radulescu return err; 16807b1eea1aSIoana Ciocoi Radulescu } 16817b1eea1aSIoana Ciocoi Radulescu 16827b1eea1aSIoana Ciocoi Radulescu return 0; 16837b1eea1aSIoana Ciocoi Radulescu } 16847b1eea1aSIoana Ciocoi Radulescu 16857e273a8eSIoana Ciocoi Radulescu static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) 16867e273a8eSIoana Ciocoi Radulescu { 16877e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 16887e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_channel *ch; 16897e273a8eSIoana Ciocoi Radulescu struct bpf_prog *old; 16907e273a8eSIoana Ciocoi Radulescu bool up, need_update; 16917e273a8eSIoana Ciocoi Radulescu int i, err; 16927e273a8eSIoana Ciocoi Radulescu 16937e273a8eSIoana Ciocoi Radulescu if (prog && !xdp_mtu_valid(priv, dev->mtu)) 16947e273a8eSIoana Ciocoi Radulescu return -EINVAL; 16957e273a8eSIoana Ciocoi Radulescu 16967e273a8eSIoana Ciocoi Radulescu if (prog) { 16977e273a8eSIoana Ciocoi Radulescu prog = bpf_prog_add(prog, priv->num_channels); 16987e273a8eSIoana Ciocoi Radulescu if (IS_ERR(prog)) 16997e273a8eSIoana Ciocoi Radulescu return PTR_ERR(prog); 17007e273a8eSIoana Ciocoi Radulescu } 17017e273a8eSIoana Ciocoi Radulescu 17027e273a8eSIoana Ciocoi Radulescu up = netif_running(dev); 17037e273a8eSIoana Ciocoi Radulescu need_update = (!!priv->xdp_prog != !!prog); 17047e273a8eSIoana Ciocoi Radulescu 17057e273a8eSIoana Ciocoi Radulescu if (up) 17067e273a8eSIoana Ciocoi Radulescu dpaa2_eth_stop(dev); 17077e273a8eSIoana Ciocoi Radulescu 17087b1eea1aSIoana Ciocoi Radulescu /* While in xdp mode, enforce a maximum Rx frame size based on MTU. 17097b1eea1aSIoana Ciocoi Radulescu * Also, when switching between xdp/non-xdp modes we need to reconfigure 17107b1eea1aSIoana Ciocoi Radulescu * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, 17117b1eea1aSIoana Ciocoi Radulescu * so we are sure no old format buffers will be used from now on. 17127b1eea1aSIoana Ciocoi Radulescu */ 17137e273a8eSIoana Ciocoi Radulescu if (need_update) { 17147e273a8eSIoana Ciocoi Radulescu err = set_rx_mfl(priv, dev->mtu, !!prog); 17157e273a8eSIoana Ciocoi Radulescu if (err) 17167e273a8eSIoana Ciocoi Radulescu goto out_err; 17177b1eea1aSIoana Ciocoi Radulescu err = update_rx_buffer_headroom(priv, !!prog); 17187b1eea1aSIoana Ciocoi Radulescu if (err) 17197b1eea1aSIoana Ciocoi Radulescu goto out_err; 17207e273a8eSIoana Ciocoi Radulescu } 17217e273a8eSIoana Ciocoi Radulescu 17227e273a8eSIoana Ciocoi Radulescu old = xchg(&priv->xdp_prog, prog); 17237e273a8eSIoana Ciocoi Radulescu if (old) 17247e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 17257e273a8eSIoana Ciocoi Radulescu 17267e273a8eSIoana Ciocoi Radulescu for (i = 0; i < priv->num_channels; i++) { 17277e273a8eSIoana Ciocoi Radulescu ch = priv->channel[i]; 17287e273a8eSIoana Ciocoi Radulescu old = xchg(&ch->xdp.prog, prog); 17297e273a8eSIoana Ciocoi Radulescu if (old) 17307e273a8eSIoana Ciocoi Radulescu bpf_prog_put(old); 17317e273a8eSIoana Ciocoi Radulescu } 17327e273a8eSIoana Ciocoi Radulescu 17337e273a8eSIoana Ciocoi Radulescu if (up) { 17347e273a8eSIoana Ciocoi Radulescu err = dpaa2_eth_open(dev); 17357e273a8eSIoana Ciocoi Radulescu if (err) 17367e273a8eSIoana Ciocoi Radulescu return err; 17377e273a8eSIoana Ciocoi Radulescu } 17387e273a8eSIoana Ciocoi Radulescu 17397e273a8eSIoana Ciocoi Radulescu return 0; 17407e273a8eSIoana Ciocoi Radulescu 17417e273a8eSIoana Ciocoi Radulescu out_err: 17427e273a8eSIoana Ciocoi Radulescu if (prog) 17437e273a8eSIoana Ciocoi Radulescu bpf_prog_sub(prog, priv->num_channels); 17447e273a8eSIoana Ciocoi Radulescu if (up) 17457e273a8eSIoana Ciocoi Radulescu dpaa2_eth_open(dev); 17467e273a8eSIoana Ciocoi Radulescu 17477e273a8eSIoana Ciocoi Radulescu return err; 17487e273a8eSIoana Ciocoi Radulescu } 17497e273a8eSIoana Ciocoi Radulescu 17507e273a8eSIoana Ciocoi Radulescu static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) 17517e273a8eSIoana Ciocoi Radulescu { 17527e273a8eSIoana Ciocoi Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 17537e273a8eSIoana Ciocoi Radulescu 17547e273a8eSIoana Ciocoi Radulescu switch (xdp->command) { 17557e273a8eSIoana Ciocoi Radulescu case XDP_SETUP_PROG: 17567e273a8eSIoana Ciocoi Radulescu return setup_xdp(dev, xdp->prog); 17577e273a8eSIoana Ciocoi Radulescu case XDP_QUERY_PROG: 17587e273a8eSIoana Ciocoi Radulescu xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; 17597e273a8eSIoana Ciocoi Radulescu break; 17607e273a8eSIoana Ciocoi Radulescu default: 17617e273a8eSIoana Ciocoi Radulescu return -EINVAL; 17627e273a8eSIoana Ciocoi Radulescu } 17637e273a8eSIoana Ciocoi Radulescu 17647e273a8eSIoana Ciocoi Radulescu return 0; 17657e273a8eSIoana Ciocoi Radulescu } 17667e273a8eSIoana Ciocoi Radulescu 1767d678be1dSIoana Radulescu static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, 1768d678be1dSIoana Radulescu struct xdp_frame *xdpf) 1769d678be1dSIoana Radulescu { 1770d678be1dSIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1771d678be1dSIoana Radulescu struct device *dev = net_dev->dev.parent; 1772d678be1dSIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 1773d678be1dSIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 1774d678be1dSIoana Radulescu unsigned int needed_headroom; 1775d678be1dSIoana Radulescu struct dpaa2_eth_swa *swa; 1776d678be1dSIoana Radulescu struct dpaa2_eth_fq *fq; 1777d678be1dSIoana Radulescu struct dpaa2_fd fd; 1778d678be1dSIoana Radulescu void *buffer_start, *aligned_start; 1779d678be1dSIoana Radulescu dma_addr_t addr; 1780d678be1dSIoana Radulescu int err, i; 1781d678be1dSIoana Radulescu 1782d678be1dSIoana Radulescu /* We require a minimum headroom to be able to transmit the frame. 1783d678be1dSIoana Radulescu * Otherwise return an error and let the original net_device handle it 1784d678be1dSIoana Radulescu */ 1785d678be1dSIoana Radulescu needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); 1786d678be1dSIoana Radulescu if (xdpf->headroom < needed_headroom) 1787d678be1dSIoana Radulescu return -EINVAL; 1788d678be1dSIoana Radulescu 1789d678be1dSIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 1790d678be1dSIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 1791d678be1dSIoana Radulescu 1792d678be1dSIoana Radulescu /* Setup the FD fields */ 1793d678be1dSIoana Radulescu memset(&fd, 0, sizeof(fd)); 1794d678be1dSIoana Radulescu 1795d678be1dSIoana Radulescu /* Align FD address, if possible */ 1796d678be1dSIoana Radulescu buffer_start = xdpf->data - needed_headroom; 1797d678be1dSIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 1798d678be1dSIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 1799d678be1dSIoana Radulescu if (aligned_start >= xdpf->data - xdpf->headroom) 1800d678be1dSIoana Radulescu buffer_start = aligned_start; 1801d678be1dSIoana Radulescu 1802d678be1dSIoana Radulescu swa = (struct dpaa2_eth_swa *)buffer_start; 1803d678be1dSIoana Radulescu /* fill in necessary fields here */ 1804d678be1dSIoana Radulescu swa->type = DPAA2_ETH_SWA_XDP; 1805d678be1dSIoana Radulescu swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; 1806d678be1dSIoana Radulescu swa->xdp.xdpf = xdpf; 1807d678be1dSIoana Radulescu 1808d678be1dSIoana Radulescu addr = dma_map_single(dev, buffer_start, 1809d678be1dSIoana Radulescu swa->xdp.dma_size, 1810d678be1dSIoana Radulescu DMA_BIDIRECTIONAL); 1811d678be1dSIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) { 1812d678be1dSIoana Radulescu percpu_stats->tx_dropped++; 1813d678be1dSIoana Radulescu return -ENOMEM; 1814d678be1dSIoana Radulescu } 1815d678be1dSIoana Radulescu 1816d678be1dSIoana Radulescu dpaa2_fd_set_addr(&fd, addr); 1817d678be1dSIoana Radulescu dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start); 1818d678be1dSIoana Radulescu dpaa2_fd_set_len(&fd, xdpf->len); 1819d678be1dSIoana Radulescu dpaa2_fd_set_format(&fd, dpaa2_fd_single); 1820d678be1dSIoana Radulescu dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); 1821d678be1dSIoana Radulescu 182264447506SIoana Ciocoi Radulescu fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; 1823d678be1dSIoana Radulescu for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 1824d678be1dSIoana Radulescu err = priv->enqueue(priv, fq, &fd, 0); 1825d678be1dSIoana Radulescu if (err != -EBUSY) 1826d678be1dSIoana Radulescu break; 1827d678be1dSIoana Radulescu } 1828d678be1dSIoana Radulescu percpu_extras->tx_portal_busy += i; 1829d678be1dSIoana Radulescu if (unlikely(err < 0)) { 1830d678be1dSIoana Radulescu percpu_stats->tx_errors++; 1831d678be1dSIoana Radulescu /* let the Rx device handle the cleanup */ 1832d678be1dSIoana Radulescu return err; 1833d678be1dSIoana Radulescu } 1834d678be1dSIoana Radulescu 1835d678be1dSIoana Radulescu percpu_stats->tx_packets++; 1836d678be1dSIoana Radulescu percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); 1837d678be1dSIoana Radulescu 1838d678be1dSIoana Radulescu return 0; 1839d678be1dSIoana Radulescu } 1840d678be1dSIoana Radulescu 1841d678be1dSIoana Radulescu static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, 1842d678be1dSIoana Radulescu struct xdp_frame **frames, u32 flags) 1843d678be1dSIoana Radulescu { 1844d678be1dSIoana Radulescu int drops = 0; 1845d678be1dSIoana Radulescu int i, err; 1846d678be1dSIoana Radulescu 1847d678be1dSIoana Radulescu if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1848d678be1dSIoana Radulescu return -EINVAL; 1849d678be1dSIoana Radulescu 1850d678be1dSIoana Radulescu if (!netif_running(net_dev)) 1851d678be1dSIoana Radulescu return -ENETDOWN; 1852d678be1dSIoana Radulescu 1853d678be1dSIoana Radulescu for (i = 0; i < n; i++) { 1854d678be1dSIoana Radulescu struct xdp_frame *xdpf = frames[i]; 1855d678be1dSIoana Radulescu 1856d678be1dSIoana Radulescu err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf); 1857d678be1dSIoana Radulescu if (err) { 1858d678be1dSIoana Radulescu xdp_return_frame_rx_napi(xdpf); 1859d678be1dSIoana Radulescu drops++; 1860d678be1dSIoana Radulescu } 1861d678be1dSIoana Radulescu } 1862d678be1dSIoana Radulescu 1863d678be1dSIoana Radulescu return n - drops; 1864d678be1dSIoana Radulescu } 1865d678be1dSIoana Radulescu 186606d5b179SIoana Radulescu static int update_xps(struct dpaa2_eth_priv *priv) 186706d5b179SIoana Radulescu { 186806d5b179SIoana Radulescu struct net_device *net_dev = priv->net_dev; 186906d5b179SIoana Radulescu struct cpumask xps_mask; 187006d5b179SIoana Radulescu struct dpaa2_eth_fq *fq; 187106d5b179SIoana Radulescu int i, num_queues; 187206d5b179SIoana Radulescu int err = 0; 187306d5b179SIoana Radulescu 187406d5b179SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 187506d5b179SIoana Radulescu 187606d5b179SIoana Radulescu /* The first <num_queues> entries in priv->fq array are Tx/Tx conf 187706d5b179SIoana Radulescu * queues, so only process those 187806d5b179SIoana Radulescu */ 187906d5b179SIoana Radulescu for (i = 0; i < num_queues; i++) { 188006d5b179SIoana Radulescu fq = &priv->fq[i]; 188106d5b179SIoana Radulescu 188206d5b179SIoana Radulescu cpumask_clear(&xps_mask); 188306d5b179SIoana Radulescu cpumask_set_cpu(fq->target_cpu, &xps_mask); 188406d5b179SIoana Radulescu 188506d5b179SIoana Radulescu err = netif_set_xps_queue(net_dev, &xps_mask, i); 188606d5b179SIoana Radulescu if (err) { 188706d5b179SIoana Radulescu netdev_warn_once(net_dev, "Error setting XPS queue\n"); 188806d5b179SIoana Radulescu break; 188906d5b179SIoana Radulescu } 189006d5b179SIoana Radulescu } 189106d5b179SIoana Radulescu 189206d5b179SIoana Radulescu return err; 189306d5b179SIoana Radulescu } 189406d5b179SIoana Radulescu 189534ff6846SIoana Radulescu static const struct net_device_ops dpaa2_eth_ops = { 189634ff6846SIoana Radulescu .ndo_open = dpaa2_eth_open, 189734ff6846SIoana Radulescu .ndo_start_xmit = dpaa2_eth_tx, 189834ff6846SIoana Radulescu .ndo_stop = dpaa2_eth_stop, 189934ff6846SIoana Radulescu .ndo_set_mac_address = dpaa2_eth_set_addr, 190034ff6846SIoana Radulescu .ndo_get_stats64 = dpaa2_eth_get_stats, 190134ff6846SIoana Radulescu .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 190234ff6846SIoana Radulescu .ndo_set_features = dpaa2_eth_set_features, 190334ff6846SIoana Radulescu .ndo_do_ioctl = dpaa2_eth_ioctl, 19047e273a8eSIoana Ciocoi Radulescu .ndo_change_mtu = dpaa2_eth_change_mtu, 19057e273a8eSIoana Ciocoi Radulescu .ndo_bpf = dpaa2_eth_xdp, 1906d678be1dSIoana Radulescu .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, 190734ff6846SIoana Radulescu }; 190834ff6846SIoana Radulescu 190934ff6846SIoana Radulescu static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) 191034ff6846SIoana Radulescu { 191134ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 191234ff6846SIoana Radulescu 191334ff6846SIoana Radulescu ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 191434ff6846SIoana Radulescu 191534ff6846SIoana Radulescu /* Update NAPI statistics */ 191634ff6846SIoana Radulescu ch->stats.cdan++; 191734ff6846SIoana Radulescu 191834ff6846SIoana Radulescu napi_schedule_irqoff(&ch->napi); 191934ff6846SIoana Radulescu } 192034ff6846SIoana Radulescu 192134ff6846SIoana Radulescu /* Allocate and configure a DPCON object */ 192234ff6846SIoana Radulescu static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) 192334ff6846SIoana Radulescu { 192434ff6846SIoana Radulescu struct fsl_mc_device *dpcon; 192534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 192634ff6846SIoana Radulescu struct dpcon_attr attrs; 192734ff6846SIoana Radulescu int err; 192834ff6846SIoana Radulescu 192934ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 193034ff6846SIoana Radulescu FSL_MC_POOL_DPCON, &dpcon); 193134ff6846SIoana Radulescu if (err) { 1932d7f5a9d8SIoana Ciornei if (err == -ENXIO) 1933d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 1934d7f5a9d8SIoana Ciornei else 193534ff6846SIoana Radulescu dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 1936d7f5a9d8SIoana Ciornei return ERR_PTR(err); 193734ff6846SIoana Radulescu } 193834ff6846SIoana Radulescu 193934ff6846SIoana Radulescu err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 194034ff6846SIoana Radulescu if (err) { 194134ff6846SIoana Radulescu dev_err(dev, "dpcon_open() failed\n"); 194234ff6846SIoana Radulescu goto free; 194334ff6846SIoana Radulescu } 194434ff6846SIoana Radulescu 194534ff6846SIoana Radulescu err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 194634ff6846SIoana Radulescu if (err) { 194734ff6846SIoana Radulescu dev_err(dev, "dpcon_reset() failed\n"); 194834ff6846SIoana Radulescu goto close; 194934ff6846SIoana Radulescu } 195034ff6846SIoana Radulescu 195134ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); 195234ff6846SIoana Radulescu if (err) { 195334ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 195434ff6846SIoana Radulescu goto close; 195534ff6846SIoana Radulescu } 195634ff6846SIoana Radulescu 195734ff6846SIoana Radulescu err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 195834ff6846SIoana Radulescu if (err) { 195934ff6846SIoana Radulescu dev_err(dev, "dpcon_enable() failed\n"); 196034ff6846SIoana Radulescu goto close; 196134ff6846SIoana Radulescu } 196234ff6846SIoana Radulescu 196334ff6846SIoana Radulescu return dpcon; 196434ff6846SIoana Radulescu 196534ff6846SIoana Radulescu close: 196634ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 196734ff6846SIoana Radulescu free: 196834ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 196934ff6846SIoana Radulescu 197034ff6846SIoana Radulescu return NULL; 197134ff6846SIoana Radulescu } 197234ff6846SIoana Radulescu 197334ff6846SIoana Radulescu static void free_dpcon(struct dpaa2_eth_priv *priv, 197434ff6846SIoana Radulescu struct fsl_mc_device *dpcon) 197534ff6846SIoana Radulescu { 197634ff6846SIoana Radulescu dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 197734ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 197834ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 197934ff6846SIoana Radulescu } 198034ff6846SIoana Radulescu 198134ff6846SIoana Radulescu static struct dpaa2_eth_channel * 198234ff6846SIoana Radulescu alloc_channel(struct dpaa2_eth_priv *priv) 198334ff6846SIoana Radulescu { 198434ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 198534ff6846SIoana Radulescu struct dpcon_attr attr; 198634ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 198734ff6846SIoana Radulescu int err; 198834ff6846SIoana Radulescu 198934ff6846SIoana Radulescu channel = kzalloc(sizeof(*channel), GFP_KERNEL); 199034ff6846SIoana Radulescu if (!channel) 199134ff6846SIoana Radulescu return NULL; 199234ff6846SIoana Radulescu 199334ff6846SIoana Radulescu channel->dpcon = setup_dpcon(priv); 1994d7f5a9d8SIoana Ciornei if (IS_ERR_OR_NULL(channel->dpcon)) { 1995bd8460faSIoana Radulescu err = PTR_ERR_OR_ZERO(channel->dpcon); 199634ff6846SIoana Radulescu goto err_setup; 1997d7f5a9d8SIoana Ciornei } 199834ff6846SIoana Radulescu 199934ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 200034ff6846SIoana Radulescu &attr); 200134ff6846SIoana Radulescu if (err) { 200234ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 200334ff6846SIoana Radulescu goto err_get_attr; 200434ff6846SIoana Radulescu } 200534ff6846SIoana Radulescu 200634ff6846SIoana Radulescu channel->dpcon_id = attr.id; 200734ff6846SIoana Radulescu channel->ch_id = attr.qbman_ch_id; 200834ff6846SIoana Radulescu channel->priv = priv; 200934ff6846SIoana Radulescu 201034ff6846SIoana Radulescu return channel; 201134ff6846SIoana Radulescu 201234ff6846SIoana Radulescu err_get_attr: 201334ff6846SIoana Radulescu free_dpcon(priv, channel->dpcon); 201434ff6846SIoana Radulescu err_setup: 201534ff6846SIoana Radulescu kfree(channel); 2016d7f5a9d8SIoana Ciornei return ERR_PTR(err); 201734ff6846SIoana Radulescu } 201834ff6846SIoana Radulescu 201934ff6846SIoana Radulescu static void free_channel(struct dpaa2_eth_priv *priv, 202034ff6846SIoana Radulescu struct dpaa2_eth_channel *channel) 202134ff6846SIoana Radulescu { 202234ff6846SIoana Radulescu free_dpcon(priv, channel->dpcon); 202334ff6846SIoana Radulescu kfree(channel); 202434ff6846SIoana Radulescu } 202534ff6846SIoana Radulescu 202634ff6846SIoana Radulescu /* DPIO setup: allocate and configure QBMan channels, setup core affinity 202734ff6846SIoana Radulescu * and register data availability notifications 202834ff6846SIoana Radulescu */ 202934ff6846SIoana Radulescu static int setup_dpio(struct dpaa2_eth_priv *priv) 203034ff6846SIoana Radulescu { 203134ff6846SIoana Radulescu struct dpaa2_io_notification_ctx *nctx; 203234ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 203334ff6846SIoana Radulescu struct dpcon_notification_cfg dpcon_notif_cfg; 203434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 203534ff6846SIoana Radulescu int i, err; 203634ff6846SIoana Radulescu 203734ff6846SIoana Radulescu /* We want the ability to spread ingress traffic (RX, TX conf) to as 203834ff6846SIoana Radulescu * many cores as possible, so we need one channel for each core 203934ff6846SIoana Radulescu * (unless there's fewer queues than cores, in which case the extra 204034ff6846SIoana Radulescu * channels would be wasted). 204134ff6846SIoana Radulescu * Allocate one channel per core and register it to the core's 204234ff6846SIoana Radulescu * affine DPIO. If not enough channels are available for all cores 204334ff6846SIoana Radulescu * or if some cores don't have an affine DPIO, there will be no 204434ff6846SIoana Radulescu * ingress frame processing on those cores. 204534ff6846SIoana Radulescu */ 204634ff6846SIoana Radulescu cpumask_clear(&priv->dpio_cpumask); 204734ff6846SIoana Radulescu for_each_online_cpu(i) { 204834ff6846SIoana Radulescu /* Try to allocate a channel */ 204934ff6846SIoana Radulescu channel = alloc_channel(priv); 2050d7f5a9d8SIoana Ciornei if (IS_ERR_OR_NULL(channel)) { 2051bd8460faSIoana Radulescu err = PTR_ERR_OR_ZERO(channel); 2052d7f5a9d8SIoana Ciornei if (err != -EPROBE_DEFER) 205334ff6846SIoana Radulescu dev_info(dev, 205434ff6846SIoana Radulescu "No affine channel for cpu %d and above\n", i); 205534ff6846SIoana Radulescu goto err_alloc_ch; 205634ff6846SIoana Radulescu } 205734ff6846SIoana Radulescu 205834ff6846SIoana Radulescu priv->channel[priv->num_channels] = channel; 205934ff6846SIoana Radulescu 206034ff6846SIoana Radulescu nctx = &channel->nctx; 206134ff6846SIoana Radulescu nctx->is_cdan = 1; 206234ff6846SIoana Radulescu nctx->cb = cdan_cb; 206334ff6846SIoana Radulescu nctx->id = channel->ch_id; 206434ff6846SIoana Radulescu nctx->desired_cpu = i; 206534ff6846SIoana Radulescu 206634ff6846SIoana Radulescu /* Register the new context */ 206734ff6846SIoana Radulescu channel->dpio = dpaa2_io_service_select(i); 206847441f7fSIoana Ciornei err = dpaa2_io_service_register(channel->dpio, nctx, dev); 206934ff6846SIoana Radulescu if (err) { 207034ff6846SIoana Radulescu dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 207134ff6846SIoana Radulescu /* If no affine DPIO for this core, there's probably 207234ff6846SIoana Radulescu * none available for next cores either. Signal we want 207334ff6846SIoana Radulescu * to retry later, in case the DPIO devices weren't 207434ff6846SIoana Radulescu * probed yet. 207534ff6846SIoana Radulescu */ 207634ff6846SIoana Radulescu err = -EPROBE_DEFER; 207734ff6846SIoana Radulescu goto err_service_reg; 207834ff6846SIoana Radulescu } 207934ff6846SIoana Radulescu 208034ff6846SIoana Radulescu /* Register DPCON notification with MC */ 208134ff6846SIoana Radulescu dpcon_notif_cfg.dpio_id = nctx->dpio_id; 208234ff6846SIoana Radulescu dpcon_notif_cfg.priority = 0; 208334ff6846SIoana Radulescu dpcon_notif_cfg.user_ctx = nctx->qman64; 208434ff6846SIoana Radulescu err = dpcon_set_notification(priv->mc_io, 0, 208534ff6846SIoana Radulescu channel->dpcon->mc_handle, 208634ff6846SIoana Radulescu &dpcon_notif_cfg); 208734ff6846SIoana Radulescu if (err) { 208834ff6846SIoana Radulescu dev_err(dev, "dpcon_set_notification failed()\n"); 208934ff6846SIoana Radulescu goto err_set_cdan; 209034ff6846SIoana Radulescu } 209134ff6846SIoana Radulescu 209234ff6846SIoana Radulescu /* If we managed to allocate a channel and also found an affine 209334ff6846SIoana Radulescu * DPIO for this core, add it to the final mask 209434ff6846SIoana Radulescu */ 209534ff6846SIoana Radulescu cpumask_set_cpu(i, &priv->dpio_cpumask); 209634ff6846SIoana Radulescu priv->num_channels++; 209734ff6846SIoana Radulescu 209834ff6846SIoana Radulescu /* Stop if we already have enough channels to accommodate all 209934ff6846SIoana Radulescu * RX and TX conf queues 210034ff6846SIoana Radulescu */ 2101b0e4f37bSIoana Ciocoi Radulescu if (priv->num_channels == priv->dpni_attrs.num_queues) 210234ff6846SIoana Radulescu break; 210334ff6846SIoana Radulescu } 210434ff6846SIoana Radulescu 210534ff6846SIoana Radulescu return 0; 210634ff6846SIoana Radulescu 210734ff6846SIoana Radulescu err_set_cdan: 210847441f7fSIoana Ciornei dpaa2_io_service_deregister(channel->dpio, nctx, dev); 210934ff6846SIoana Radulescu err_service_reg: 211034ff6846SIoana Radulescu free_channel(priv, channel); 211134ff6846SIoana Radulescu err_alloc_ch: 2112d7f5a9d8SIoana Ciornei if (err == -EPROBE_DEFER) 2113d7f5a9d8SIoana Ciornei return err; 2114d7f5a9d8SIoana Ciornei 211534ff6846SIoana Radulescu if (cpumask_empty(&priv->dpio_cpumask)) { 211634ff6846SIoana Radulescu dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 2117d7f5a9d8SIoana Ciornei return -ENODEV; 211834ff6846SIoana Radulescu } 211934ff6846SIoana Radulescu 212034ff6846SIoana Radulescu dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 212134ff6846SIoana Radulescu cpumask_pr_args(&priv->dpio_cpumask)); 212234ff6846SIoana Radulescu 212334ff6846SIoana Radulescu return 0; 212434ff6846SIoana Radulescu } 212534ff6846SIoana Radulescu 212634ff6846SIoana Radulescu static void free_dpio(struct dpaa2_eth_priv *priv) 212734ff6846SIoana Radulescu { 212847441f7fSIoana Ciornei struct device *dev = priv->net_dev->dev.parent; 212934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 213047441f7fSIoana Ciornei int i; 213134ff6846SIoana Radulescu 213234ff6846SIoana Radulescu /* deregister CDAN notifications and free channels */ 213334ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 213434ff6846SIoana Radulescu ch = priv->channel[i]; 213547441f7fSIoana Ciornei dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); 213634ff6846SIoana Radulescu free_channel(priv, ch); 213734ff6846SIoana Radulescu } 213834ff6846SIoana Radulescu } 213934ff6846SIoana Radulescu 214034ff6846SIoana Radulescu static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, 214134ff6846SIoana Radulescu int cpu) 214234ff6846SIoana Radulescu { 214334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 214434ff6846SIoana Radulescu int i; 214534ff6846SIoana Radulescu 214634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 214734ff6846SIoana Radulescu if (priv->channel[i]->nctx.desired_cpu == cpu) 214834ff6846SIoana Radulescu return priv->channel[i]; 214934ff6846SIoana Radulescu 215034ff6846SIoana Radulescu /* We should never get here. Issue a warning and return 215134ff6846SIoana Radulescu * the first channel, because it's still better than nothing 215234ff6846SIoana Radulescu */ 215334ff6846SIoana Radulescu dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 215434ff6846SIoana Radulescu 215534ff6846SIoana Radulescu return priv->channel[0]; 215634ff6846SIoana Radulescu } 215734ff6846SIoana Radulescu 215834ff6846SIoana Radulescu static void set_fq_affinity(struct dpaa2_eth_priv *priv) 215934ff6846SIoana Radulescu { 216034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 216134ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 216234ff6846SIoana Radulescu int rx_cpu, txc_cpu; 216306d5b179SIoana Radulescu int i; 216434ff6846SIoana Radulescu 216534ff6846SIoana Radulescu /* For each FQ, pick one channel/CPU to deliver frames to. 216634ff6846SIoana Radulescu * This may well change at runtime, either through irqbalance or 216734ff6846SIoana Radulescu * through direct user intervention. 216834ff6846SIoana Radulescu */ 216934ff6846SIoana Radulescu rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 217034ff6846SIoana Radulescu 217134ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 217234ff6846SIoana Radulescu fq = &priv->fq[i]; 217334ff6846SIoana Radulescu switch (fq->type) { 217434ff6846SIoana Radulescu case DPAA2_RX_FQ: 217534ff6846SIoana Radulescu fq->target_cpu = rx_cpu; 217634ff6846SIoana Radulescu rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 217734ff6846SIoana Radulescu if (rx_cpu >= nr_cpu_ids) 217834ff6846SIoana Radulescu rx_cpu = cpumask_first(&priv->dpio_cpumask); 217934ff6846SIoana Radulescu break; 218034ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 218134ff6846SIoana Radulescu fq->target_cpu = txc_cpu; 218234ff6846SIoana Radulescu txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 218334ff6846SIoana Radulescu if (txc_cpu >= nr_cpu_ids) 218434ff6846SIoana Radulescu txc_cpu = cpumask_first(&priv->dpio_cpumask); 218534ff6846SIoana Radulescu break; 218634ff6846SIoana Radulescu default: 218734ff6846SIoana Radulescu dev_err(dev, "Unknown FQ type: %d\n", fq->type); 218834ff6846SIoana Radulescu } 218934ff6846SIoana Radulescu fq->channel = get_affine_channel(priv, fq->target_cpu); 219034ff6846SIoana Radulescu } 219106d5b179SIoana Radulescu 219206d5b179SIoana Radulescu update_xps(priv); 219334ff6846SIoana Radulescu } 219434ff6846SIoana Radulescu 219534ff6846SIoana Radulescu static void setup_fqs(struct dpaa2_eth_priv *priv) 219634ff6846SIoana Radulescu { 219734ff6846SIoana Radulescu int i; 219834ff6846SIoana Radulescu 219934ff6846SIoana Radulescu /* We have one TxConf FQ per Tx flow. 220034ff6846SIoana Radulescu * The number of Tx and Rx queues is the same. 220134ff6846SIoana Radulescu * Tx queues come first in the fq array. 220234ff6846SIoana Radulescu */ 220334ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 220434ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 220534ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 220634ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 220734ff6846SIoana Radulescu } 220834ff6846SIoana Radulescu 220934ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 221034ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 221134ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 221234ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 221334ff6846SIoana Radulescu } 221434ff6846SIoana Radulescu 221534ff6846SIoana Radulescu /* For each FQ, decide on which core to process incoming frames */ 221634ff6846SIoana Radulescu set_fq_affinity(priv); 221734ff6846SIoana Radulescu } 221834ff6846SIoana Radulescu 221934ff6846SIoana Radulescu /* Allocate and configure one buffer pool for each interface */ 222034ff6846SIoana Radulescu static int setup_dpbp(struct dpaa2_eth_priv *priv) 222134ff6846SIoana Radulescu { 222234ff6846SIoana Radulescu int err; 222334ff6846SIoana Radulescu struct fsl_mc_device *dpbp_dev; 222434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 222534ff6846SIoana Radulescu struct dpbp_attr dpbp_attrs; 222634ff6846SIoana Radulescu 222734ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 222834ff6846SIoana Radulescu &dpbp_dev); 222934ff6846SIoana Radulescu if (err) { 2230d7f5a9d8SIoana Ciornei if (err == -ENXIO) 2231d7f5a9d8SIoana Ciornei err = -EPROBE_DEFER; 2232d7f5a9d8SIoana Ciornei else 223334ff6846SIoana Radulescu dev_err(dev, "DPBP device allocation failed\n"); 223434ff6846SIoana Radulescu return err; 223534ff6846SIoana Radulescu } 223634ff6846SIoana Radulescu 223734ff6846SIoana Radulescu priv->dpbp_dev = dpbp_dev; 223834ff6846SIoana Radulescu 223934ff6846SIoana Radulescu err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 224034ff6846SIoana Radulescu &dpbp_dev->mc_handle); 224134ff6846SIoana Radulescu if (err) { 224234ff6846SIoana Radulescu dev_err(dev, "dpbp_open() failed\n"); 224334ff6846SIoana Radulescu goto err_open; 224434ff6846SIoana Radulescu } 224534ff6846SIoana Radulescu 224634ff6846SIoana Radulescu err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 224734ff6846SIoana Radulescu if (err) { 224834ff6846SIoana Radulescu dev_err(dev, "dpbp_reset() failed\n"); 224934ff6846SIoana Radulescu goto err_reset; 225034ff6846SIoana Radulescu } 225134ff6846SIoana Radulescu 225234ff6846SIoana Radulescu err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 225334ff6846SIoana Radulescu if (err) { 225434ff6846SIoana Radulescu dev_err(dev, "dpbp_enable() failed\n"); 225534ff6846SIoana Radulescu goto err_enable; 225634ff6846SIoana Radulescu } 225734ff6846SIoana Radulescu 225834ff6846SIoana Radulescu err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 225934ff6846SIoana Radulescu &dpbp_attrs); 226034ff6846SIoana Radulescu if (err) { 226134ff6846SIoana Radulescu dev_err(dev, "dpbp_get_attributes() failed\n"); 226234ff6846SIoana Radulescu goto err_get_attr; 226334ff6846SIoana Radulescu } 226434ff6846SIoana Radulescu priv->bpid = dpbp_attrs.bpid; 226534ff6846SIoana Radulescu 226634ff6846SIoana Radulescu return 0; 226734ff6846SIoana Radulescu 226834ff6846SIoana Radulescu err_get_attr: 226934ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 227034ff6846SIoana Radulescu err_enable: 227134ff6846SIoana Radulescu err_reset: 227234ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 227334ff6846SIoana Radulescu err_open: 227434ff6846SIoana Radulescu fsl_mc_object_free(dpbp_dev); 227534ff6846SIoana Radulescu 227634ff6846SIoana Radulescu return err; 227734ff6846SIoana Radulescu } 227834ff6846SIoana Radulescu 227934ff6846SIoana Radulescu static void free_dpbp(struct dpaa2_eth_priv *priv) 228034ff6846SIoana Radulescu { 228134ff6846SIoana Radulescu drain_pool(priv); 228234ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 228334ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 228434ff6846SIoana Radulescu fsl_mc_object_free(priv->dpbp_dev); 228534ff6846SIoana Radulescu } 228634ff6846SIoana Radulescu 228734ff6846SIoana Radulescu static int set_buffer_layout(struct dpaa2_eth_priv *priv) 228834ff6846SIoana Radulescu { 228934ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 229034ff6846SIoana Radulescu struct dpni_buffer_layout buf_layout = {0}; 229127c87486SIoana Ciocoi Radulescu u16 rx_buf_align; 229234ff6846SIoana Radulescu int err; 229334ff6846SIoana Radulescu 229434ff6846SIoana Radulescu /* We need to check for WRIOP version 1.0.0, but depending on the MC 229534ff6846SIoana Radulescu * version, this number is not always provided correctly on rev1. 229634ff6846SIoana Radulescu * We need to check for both alternatives in this situation. 229734ff6846SIoana Radulescu */ 229834ff6846SIoana Radulescu if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 229934ff6846SIoana Radulescu priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 230027c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 230134ff6846SIoana Radulescu else 230227c87486SIoana Ciocoi Radulescu rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 230334ff6846SIoana Radulescu 230434ff6846SIoana Radulescu /* tx buffer */ 230534ff6846SIoana Radulescu buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 230634ff6846SIoana Radulescu buf_layout.pass_timestamp = true; 230734ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 230834ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 230934ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 231034ff6846SIoana Radulescu DPNI_QUEUE_TX, &buf_layout); 231134ff6846SIoana Radulescu if (err) { 231234ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 231334ff6846SIoana Radulescu return err; 231434ff6846SIoana Radulescu } 231534ff6846SIoana Radulescu 231634ff6846SIoana Radulescu /* tx-confirm buffer */ 231734ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 231834ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 231934ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, &buf_layout); 232034ff6846SIoana Radulescu if (err) { 232134ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 232234ff6846SIoana Radulescu return err; 232334ff6846SIoana Radulescu } 232434ff6846SIoana Radulescu 232534ff6846SIoana Radulescu /* Now that we've set our tx buffer layout, retrieve the minimum 232634ff6846SIoana Radulescu * required tx data offset. 232734ff6846SIoana Radulescu */ 232834ff6846SIoana Radulescu err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 232934ff6846SIoana Radulescu &priv->tx_data_offset); 233034ff6846SIoana Radulescu if (err) { 233134ff6846SIoana Radulescu dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 233234ff6846SIoana Radulescu return err; 233334ff6846SIoana Radulescu } 233434ff6846SIoana Radulescu 233534ff6846SIoana Radulescu if ((priv->tx_data_offset % 64) != 0) 233634ff6846SIoana Radulescu dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 233734ff6846SIoana Radulescu priv->tx_data_offset); 233834ff6846SIoana Radulescu 233934ff6846SIoana Radulescu /* rx buffer */ 234034ff6846SIoana Radulescu buf_layout.pass_frame_status = true; 234134ff6846SIoana Radulescu buf_layout.pass_parser_result = true; 234227c87486SIoana Ciocoi Radulescu buf_layout.data_align = rx_buf_align; 234334ff6846SIoana Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 234434ff6846SIoana Radulescu buf_layout.private_data_size = 0; 234534ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 234634ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 234734ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 234834ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 234934ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 235034ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 235134ff6846SIoana Radulescu DPNI_QUEUE_RX, &buf_layout); 235234ff6846SIoana Radulescu if (err) { 235334ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 235434ff6846SIoana Radulescu return err; 235534ff6846SIoana Radulescu } 235634ff6846SIoana Radulescu 235734ff6846SIoana Radulescu return 0; 235834ff6846SIoana Radulescu } 235934ff6846SIoana Radulescu 23601fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 23611fa0f68cSIoana Ciocoi Radulescu #define DPNI_ENQUEUE_FQID_VER_MINOR 9 23621fa0f68cSIoana Ciocoi Radulescu 23631fa0f68cSIoana Ciocoi Radulescu static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, 23641fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 23651fa0f68cSIoana Ciocoi Radulescu struct dpaa2_fd *fd, u8 prio) 23661fa0f68cSIoana Ciocoi Radulescu { 23671fa0f68cSIoana Ciocoi Radulescu return dpaa2_io_service_enqueue_qd(fq->channel->dpio, 23681fa0f68cSIoana Ciocoi Radulescu priv->tx_qdid, prio, 23691fa0f68cSIoana Ciocoi Radulescu fq->tx_qdbin, fd); 23701fa0f68cSIoana Ciocoi Radulescu } 23711fa0f68cSIoana Ciocoi Radulescu 23721fa0f68cSIoana Ciocoi Radulescu static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, 23731fa0f68cSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq, 23741fa0f68cSIoana Ciocoi Radulescu struct dpaa2_fd *fd, 23751fa0f68cSIoana Ciocoi Radulescu u8 prio __always_unused) 23761fa0f68cSIoana Ciocoi Radulescu { 23771fa0f68cSIoana Ciocoi Radulescu return dpaa2_io_service_enqueue_fq(fq->channel->dpio, 23781fa0f68cSIoana Ciocoi Radulescu fq->tx_fqid, fd); 23791fa0f68cSIoana Ciocoi Radulescu } 23801fa0f68cSIoana Ciocoi Radulescu 23811fa0f68cSIoana Ciocoi Radulescu static void set_enqueue_mode(struct dpaa2_eth_priv *priv) 23821fa0f68cSIoana Ciocoi Radulescu { 23831fa0f68cSIoana Ciocoi Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 23841fa0f68cSIoana Ciocoi Radulescu DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 23851fa0f68cSIoana Ciocoi Radulescu priv->enqueue = dpaa2_eth_enqueue_qd; 23861fa0f68cSIoana Ciocoi Radulescu else 23871fa0f68cSIoana Ciocoi Radulescu priv->enqueue = dpaa2_eth_enqueue_fq; 23881fa0f68cSIoana Ciocoi Radulescu } 23891fa0f68cSIoana Ciocoi Radulescu 239034ff6846SIoana Radulescu /* Configure the DPNI object this interface is associated with */ 239134ff6846SIoana Radulescu static int setup_dpni(struct fsl_mc_device *ls_dev) 239234ff6846SIoana Radulescu { 239334ff6846SIoana Radulescu struct device *dev = &ls_dev->dev; 239434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 239534ff6846SIoana Radulescu struct net_device *net_dev; 239634ff6846SIoana Radulescu int err; 239734ff6846SIoana Radulescu 239834ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 239934ff6846SIoana Radulescu priv = netdev_priv(net_dev); 240034ff6846SIoana Radulescu 240134ff6846SIoana Radulescu /* get a handle for the DPNI object */ 240234ff6846SIoana Radulescu err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 240334ff6846SIoana Radulescu if (err) { 240434ff6846SIoana Radulescu dev_err(dev, "dpni_open() failed\n"); 240534ff6846SIoana Radulescu return err; 240634ff6846SIoana Radulescu } 240734ff6846SIoana Radulescu 240834ff6846SIoana Radulescu /* Check if we can work with this DPNI object */ 240934ff6846SIoana Radulescu err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 241034ff6846SIoana Radulescu &priv->dpni_ver_minor); 241134ff6846SIoana Radulescu if (err) { 241234ff6846SIoana Radulescu dev_err(dev, "dpni_get_api_version() failed\n"); 241334ff6846SIoana Radulescu goto close; 241434ff6846SIoana Radulescu } 241534ff6846SIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 241634ff6846SIoana Radulescu dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 241734ff6846SIoana Radulescu priv->dpni_ver_major, priv->dpni_ver_minor, 241834ff6846SIoana Radulescu DPNI_VER_MAJOR, DPNI_VER_MINOR); 241934ff6846SIoana Radulescu err = -ENOTSUPP; 242034ff6846SIoana Radulescu goto close; 242134ff6846SIoana Radulescu } 242234ff6846SIoana Radulescu 242334ff6846SIoana Radulescu ls_dev->mc_io = priv->mc_io; 242434ff6846SIoana Radulescu ls_dev->mc_handle = priv->mc_token; 242534ff6846SIoana Radulescu 242634ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 242734ff6846SIoana Radulescu if (err) { 242834ff6846SIoana Radulescu dev_err(dev, "dpni_reset() failed\n"); 242934ff6846SIoana Radulescu goto close; 243034ff6846SIoana Radulescu } 243134ff6846SIoana Radulescu 243234ff6846SIoana Radulescu err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 243334ff6846SIoana Radulescu &priv->dpni_attrs); 243434ff6846SIoana Radulescu if (err) { 243534ff6846SIoana Radulescu dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 243634ff6846SIoana Radulescu goto close; 243734ff6846SIoana Radulescu } 243834ff6846SIoana Radulescu 243934ff6846SIoana Radulescu err = set_buffer_layout(priv); 244034ff6846SIoana Radulescu if (err) 244134ff6846SIoana Radulescu goto close; 244234ff6846SIoana Radulescu 24431fa0f68cSIoana Ciocoi Radulescu set_enqueue_mode(priv); 24441fa0f68cSIoana Ciocoi Radulescu 2445afb90dbbSIoana Radulescu priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * 2446afb90dbbSIoana Radulescu dpaa2_eth_fs_count(priv), GFP_KERNEL); 2447afb90dbbSIoana Radulescu if (!priv->cls_rules) 2448afb90dbbSIoana Radulescu goto close; 2449afb90dbbSIoana Radulescu 245034ff6846SIoana Radulescu return 0; 245134ff6846SIoana Radulescu 245234ff6846SIoana Radulescu close: 245334ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 245434ff6846SIoana Radulescu 245534ff6846SIoana Radulescu return err; 245634ff6846SIoana Radulescu } 245734ff6846SIoana Radulescu 245834ff6846SIoana Radulescu static void free_dpni(struct dpaa2_eth_priv *priv) 245934ff6846SIoana Radulescu { 246034ff6846SIoana Radulescu int err; 246134ff6846SIoana Radulescu 246234ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 246334ff6846SIoana Radulescu if (err) 246434ff6846SIoana Radulescu netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 246534ff6846SIoana Radulescu err); 246634ff6846SIoana Radulescu 246734ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 246834ff6846SIoana Radulescu } 246934ff6846SIoana Radulescu 247034ff6846SIoana Radulescu static int setup_rx_flow(struct dpaa2_eth_priv *priv, 247134ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 247234ff6846SIoana Radulescu { 247334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 247434ff6846SIoana Radulescu struct dpni_queue queue; 247534ff6846SIoana Radulescu struct dpni_queue_id qid; 247634ff6846SIoana Radulescu struct dpni_taildrop td; 247734ff6846SIoana Radulescu int err; 247834ff6846SIoana Radulescu 247934ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 248034ff6846SIoana Radulescu DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); 248134ff6846SIoana Radulescu if (err) { 248234ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(RX) failed\n"); 248334ff6846SIoana Radulescu return err; 248434ff6846SIoana Radulescu } 248534ff6846SIoana Radulescu 248634ff6846SIoana Radulescu fq->fqid = qid.fqid; 248734ff6846SIoana Radulescu 248834ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 248934ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 249034ff6846SIoana Radulescu queue.destination.priority = 1; 249134ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 249234ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 249334ff6846SIoana Radulescu DPNI_QUEUE_RX, 0, fq->flowid, 249416fa1cf1SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 249534ff6846SIoana Radulescu &queue); 249634ff6846SIoana Radulescu if (err) { 249734ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(RX) failed\n"); 249834ff6846SIoana Radulescu return err; 249934ff6846SIoana Radulescu } 250034ff6846SIoana Radulescu 250134ff6846SIoana Radulescu td.enable = 1; 250234ff6846SIoana Radulescu td.threshold = DPAA2_ETH_TAILDROP_THRESH; 250334ff6846SIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, 250434ff6846SIoana Radulescu DPNI_QUEUE_RX, 0, fq->flowid, &td); 250534ff6846SIoana Radulescu if (err) { 250634ff6846SIoana Radulescu dev_err(dev, "dpni_set_threshold() failed\n"); 250734ff6846SIoana Radulescu return err; 250834ff6846SIoana Radulescu } 250934ff6846SIoana Radulescu 2510d678be1dSIoana Radulescu /* xdp_rxq setup */ 2511d678be1dSIoana Radulescu err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, 2512d678be1dSIoana Radulescu fq->flowid); 2513d678be1dSIoana Radulescu if (err) { 2514d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg failed\n"); 2515d678be1dSIoana Radulescu return err; 2516d678be1dSIoana Radulescu } 2517d678be1dSIoana Radulescu 2518d678be1dSIoana Radulescu err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, 2519d678be1dSIoana Radulescu MEM_TYPE_PAGE_ORDER0, NULL); 2520d678be1dSIoana Radulescu if (err) { 2521d678be1dSIoana Radulescu dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); 2522d678be1dSIoana Radulescu return err; 2523d678be1dSIoana Radulescu } 2524d678be1dSIoana Radulescu 252534ff6846SIoana Radulescu return 0; 252634ff6846SIoana Radulescu } 252734ff6846SIoana Radulescu 252834ff6846SIoana Radulescu static int setup_tx_flow(struct dpaa2_eth_priv *priv, 252934ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 253034ff6846SIoana Radulescu { 253134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 253234ff6846SIoana Radulescu struct dpni_queue queue; 253334ff6846SIoana Radulescu struct dpni_queue_id qid; 253434ff6846SIoana Radulescu int err; 253534ff6846SIoana Radulescu 253634ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 253734ff6846SIoana Radulescu DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); 253834ff6846SIoana Radulescu if (err) { 253934ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX) failed\n"); 254034ff6846SIoana Radulescu return err; 254134ff6846SIoana Radulescu } 254234ff6846SIoana Radulescu 254334ff6846SIoana Radulescu fq->tx_qdbin = qid.qdbin; 25441fa0f68cSIoana Ciocoi Radulescu fq->tx_fqid = qid.fqid; 254534ff6846SIoana Radulescu 254634ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 254734ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 254834ff6846SIoana Radulescu &queue, &qid); 254934ff6846SIoana Radulescu if (err) { 255034ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 255134ff6846SIoana Radulescu return err; 255234ff6846SIoana Radulescu } 255334ff6846SIoana Radulescu 255434ff6846SIoana Radulescu fq->fqid = qid.fqid; 255534ff6846SIoana Radulescu 255634ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 255734ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 255834ff6846SIoana Radulescu queue.destination.priority = 0; 255934ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 256034ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 256134ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 256234ff6846SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 256334ff6846SIoana Radulescu &queue); 256434ff6846SIoana Radulescu if (err) { 256534ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 256634ff6846SIoana Radulescu return err; 256734ff6846SIoana Radulescu } 256834ff6846SIoana Radulescu 256934ff6846SIoana Radulescu return 0; 257034ff6846SIoana Radulescu } 257134ff6846SIoana Radulescu 2572edad8d26SIoana Ciocoi Radulescu /* Supported header fields for Rx hash distribution key */ 2573f76c483aSIoana Radulescu static const struct dpaa2_eth_dist_fields dist_fields[] = { 257434ff6846SIoana Radulescu { 2575edad8d26SIoana Ciocoi Radulescu /* L2 header */ 2576edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_L2DA, 2577edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_ETH, 2578edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_ETH_DA, 25793a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHDST, 2580edad8d26SIoana Ciocoi Radulescu .size = 6, 2581edad8d26SIoana Ciocoi Radulescu }, { 2582afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 2583afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_SA, 25843a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHSRC, 2585afb90dbbSIoana Radulescu .size = 6, 2586afb90dbbSIoana Radulescu }, { 2587afb90dbbSIoana Radulescu /* This is the last ethertype field parsed: 2588afb90dbbSIoana Radulescu * depending on frame format, it can be the MAC ethertype 2589afb90dbbSIoana Radulescu * or the VLAN etype. 2590afb90dbbSIoana Radulescu */ 2591afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 2592afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_TYPE, 25933a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_ETHTYPE, 2594afb90dbbSIoana Radulescu .size = 2, 2595afb90dbbSIoana Radulescu }, { 2596edad8d26SIoana Ciocoi Radulescu /* VLAN header */ 2597edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_VLAN, 2598edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_VLAN, 2599edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_VLAN_TCI, 26003a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_VLAN, 2601edad8d26SIoana Ciocoi Radulescu .size = 2, 2602edad8d26SIoana Ciocoi Radulescu }, { 260334ff6846SIoana Radulescu /* IP header */ 260434ff6846SIoana Radulescu .rxnfc_field = RXH_IP_SRC, 260534ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 260634ff6846SIoana Radulescu .cls_field = NH_FLD_IP_SRC, 26073a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPSRC, 260834ff6846SIoana Radulescu .size = 4, 260934ff6846SIoana Radulescu }, { 261034ff6846SIoana Radulescu .rxnfc_field = RXH_IP_DST, 261134ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 261234ff6846SIoana Radulescu .cls_field = NH_FLD_IP_DST, 26133a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPDST, 261434ff6846SIoana Radulescu .size = 4, 261534ff6846SIoana Radulescu }, { 261634ff6846SIoana Radulescu .rxnfc_field = RXH_L3_PROTO, 261734ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 261834ff6846SIoana Radulescu .cls_field = NH_FLD_IP_PROTO, 26193a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_IPPROTO, 262034ff6846SIoana Radulescu .size = 1, 262134ff6846SIoana Radulescu }, { 262234ff6846SIoana Radulescu /* Using UDP ports, this is functionally equivalent to raw 262334ff6846SIoana Radulescu * byte pairs from L4 header. 262434ff6846SIoana Radulescu */ 262534ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_0_1, 262634ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 262734ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_SRC, 26283a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4SRC, 262934ff6846SIoana Radulescu .size = 2, 263034ff6846SIoana Radulescu }, { 263134ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_2_3, 263234ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 263334ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_DST, 26343a1e6b84SIoana Ciocoi Radulescu .id = DPAA2_ETH_DIST_L4DST, 263534ff6846SIoana Radulescu .size = 2, 263634ff6846SIoana Radulescu }, 263734ff6846SIoana Radulescu }; 263834ff6846SIoana Radulescu 2639df85aeb9SIoana Radulescu /* Configure the Rx hash key using the legacy API */ 2640df85aeb9SIoana Radulescu static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2641df85aeb9SIoana Radulescu { 2642df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 2643df85aeb9SIoana Radulescu struct dpni_rx_tc_dist_cfg dist_cfg; 2644df85aeb9SIoana Radulescu int err; 2645df85aeb9SIoana Radulescu 2646df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 2647df85aeb9SIoana Radulescu 2648df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 2649df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2650df85aeb9SIoana Radulescu dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 2651df85aeb9SIoana Radulescu 2652df85aeb9SIoana Radulescu err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); 2653df85aeb9SIoana Radulescu if (err) 2654df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 2655df85aeb9SIoana Radulescu 2656df85aeb9SIoana Radulescu return err; 2657df85aeb9SIoana Radulescu } 2658df85aeb9SIoana Radulescu 2659df85aeb9SIoana Radulescu /* Configure the Rx hash key using the new API */ 2660df85aeb9SIoana Radulescu static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2661df85aeb9SIoana Radulescu { 2662df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 2663df85aeb9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 2664df85aeb9SIoana Radulescu int err; 2665df85aeb9SIoana Radulescu 2666df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 2667df85aeb9SIoana Radulescu 2668df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 2669df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2670df85aeb9SIoana Radulescu dist_cfg.enable = 1; 2671df85aeb9SIoana Radulescu 2672df85aeb9SIoana Radulescu err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 2673df85aeb9SIoana Radulescu if (err) 2674df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 2675df85aeb9SIoana Radulescu 2676df85aeb9SIoana Radulescu return err; 2677df85aeb9SIoana Radulescu } 2678df85aeb9SIoana Radulescu 26794aaaf9b9SIoana Radulescu /* Configure the Rx flow classification key */ 26804aaaf9b9SIoana Radulescu static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 26814aaaf9b9SIoana Radulescu { 26824aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 26834aaaf9b9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 26844aaaf9b9SIoana Radulescu int err; 26854aaaf9b9SIoana Radulescu 26864aaaf9b9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 26874aaaf9b9SIoana Radulescu 26884aaaf9b9SIoana Radulescu dist_cfg.key_cfg_iova = key; 26894aaaf9b9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 26904aaaf9b9SIoana Radulescu dist_cfg.enable = 1; 26914aaaf9b9SIoana Radulescu 26924aaaf9b9SIoana Radulescu err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 26934aaaf9b9SIoana Radulescu if (err) 26944aaaf9b9SIoana Radulescu dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 26954aaaf9b9SIoana Radulescu 26964aaaf9b9SIoana Radulescu return err; 26974aaaf9b9SIoana Radulescu } 26984aaaf9b9SIoana Radulescu 2699afb90dbbSIoana Radulescu /* Size of the Rx flow classification key */ 27002d680237SIoana Ciocoi Radulescu int dpaa2_eth_cls_key_size(u64 fields) 2701afb90dbbSIoana Radulescu { 2702afb90dbbSIoana Radulescu int i, size = 0; 2703afb90dbbSIoana Radulescu 27042d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 27052d680237SIoana Ciocoi Radulescu if (!(fields & dist_fields[i].id)) 27062d680237SIoana Ciocoi Radulescu continue; 2707afb90dbbSIoana Radulescu size += dist_fields[i].size; 27082d680237SIoana Ciocoi Radulescu } 2709afb90dbbSIoana Radulescu 2710afb90dbbSIoana Radulescu return size; 2711afb90dbbSIoana Radulescu } 2712afb90dbbSIoana Radulescu 2713afb90dbbSIoana Radulescu /* Offset of header field in Rx classification key */ 2714afb90dbbSIoana Radulescu int dpaa2_eth_cls_fld_off(int prot, int field) 2715afb90dbbSIoana Radulescu { 2716afb90dbbSIoana Radulescu int i, off = 0; 2717afb90dbbSIoana Radulescu 2718afb90dbbSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 2719afb90dbbSIoana Radulescu if (dist_fields[i].cls_prot == prot && 2720afb90dbbSIoana Radulescu dist_fields[i].cls_field == field) 2721afb90dbbSIoana Radulescu return off; 2722afb90dbbSIoana Radulescu off += dist_fields[i].size; 2723afb90dbbSIoana Radulescu } 2724afb90dbbSIoana Radulescu 2725afb90dbbSIoana Radulescu WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 2726afb90dbbSIoana Radulescu return 0; 2727afb90dbbSIoana Radulescu } 2728afb90dbbSIoana Radulescu 27292d680237SIoana Ciocoi Radulescu /* Prune unused fields from the classification rule. 27302d680237SIoana Ciocoi Radulescu * Used when masking is not supported 27312d680237SIoana Ciocoi Radulescu */ 27322d680237SIoana Ciocoi Radulescu void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) 27332d680237SIoana Ciocoi Radulescu { 27342d680237SIoana Ciocoi Radulescu int off = 0, new_off = 0; 27352d680237SIoana Ciocoi Radulescu int i, size; 27362d680237SIoana Ciocoi Radulescu 27372d680237SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 27382d680237SIoana Ciocoi Radulescu size = dist_fields[i].size; 27392d680237SIoana Ciocoi Radulescu if (dist_fields[i].id & fields) { 27402d680237SIoana Ciocoi Radulescu memcpy(key_mem + new_off, key_mem + off, size); 27412d680237SIoana Ciocoi Radulescu new_off += size; 27422d680237SIoana Ciocoi Radulescu } 27432d680237SIoana Ciocoi Radulescu off += size; 27442d680237SIoana Ciocoi Radulescu } 27452d680237SIoana Ciocoi Radulescu } 27462d680237SIoana Ciocoi Radulescu 27474aaaf9b9SIoana Radulescu /* Set Rx distribution (hash or flow classification) key 274834ff6846SIoana Radulescu * flags is a combination of RXH_ bits 274934ff6846SIoana Radulescu */ 27503233c151SIoana Ciornei static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 27514aaaf9b9SIoana Radulescu enum dpaa2_eth_rx_dist type, u64 flags) 275234ff6846SIoana Radulescu { 275334ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 275434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 275534ff6846SIoana Radulescu struct dpkg_profile_cfg cls_cfg; 2756edad8d26SIoana Ciocoi Radulescu u32 rx_hash_fields = 0; 2757df85aeb9SIoana Radulescu dma_addr_t key_iova; 275834ff6846SIoana Radulescu u8 *dma_mem; 275934ff6846SIoana Radulescu int i; 276034ff6846SIoana Radulescu int err = 0; 276134ff6846SIoana Radulescu 276234ff6846SIoana Radulescu memset(&cls_cfg, 0, sizeof(cls_cfg)); 276334ff6846SIoana Radulescu 2764f76c483aSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 276534ff6846SIoana Radulescu struct dpkg_extract *key = 276634ff6846SIoana Radulescu &cls_cfg.extracts[cls_cfg.num_extracts]; 276734ff6846SIoana Radulescu 27682d680237SIoana Ciocoi Radulescu /* For both Rx hashing and classification keys 27692d680237SIoana Ciocoi Radulescu * we set only the selected fields. 27704aaaf9b9SIoana Radulescu */ 27713a1e6b84SIoana Ciocoi Radulescu if (!(flags & dist_fields[i].id)) 277234ff6846SIoana Radulescu continue; 27732d680237SIoana Ciocoi Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) 27744aaaf9b9SIoana Radulescu rx_hash_fields |= dist_fields[i].rxnfc_field; 277534ff6846SIoana Radulescu 277634ff6846SIoana Radulescu if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 277734ff6846SIoana Radulescu dev_err(dev, "error adding key extraction rule, too many rules?\n"); 277834ff6846SIoana Radulescu return -E2BIG; 277934ff6846SIoana Radulescu } 278034ff6846SIoana Radulescu 278134ff6846SIoana Radulescu key->type = DPKG_EXTRACT_FROM_HDR; 2782f76c483aSIoana Radulescu key->extract.from_hdr.prot = dist_fields[i].cls_prot; 278334ff6846SIoana Radulescu key->extract.from_hdr.type = DPKG_FULL_FIELD; 2784f76c483aSIoana Radulescu key->extract.from_hdr.field = dist_fields[i].cls_field; 278534ff6846SIoana Radulescu cls_cfg.num_extracts++; 278634ff6846SIoana Radulescu } 278734ff6846SIoana Radulescu 278834ff6846SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 278934ff6846SIoana Radulescu if (!dma_mem) 279034ff6846SIoana Radulescu return -ENOMEM; 279134ff6846SIoana Radulescu 279234ff6846SIoana Radulescu err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 279334ff6846SIoana Radulescu if (err) { 279434ff6846SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 2795df85aeb9SIoana Radulescu goto free_key; 279634ff6846SIoana Radulescu } 279734ff6846SIoana Radulescu 279834ff6846SIoana Radulescu /* Prepare for setting the rx dist */ 2799df85aeb9SIoana Radulescu key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 280034ff6846SIoana Radulescu DMA_TO_DEVICE); 2801df85aeb9SIoana Radulescu if (dma_mapping_error(dev, key_iova)) { 280234ff6846SIoana Radulescu dev_err(dev, "DMA mapping failed\n"); 280334ff6846SIoana Radulescu err = -ENOMEM; 2804df85aeb9SIoana Radulescu goto free_key; 280534ff6846SIoana Radulescu } 280634ff6846SIoana Radulescu 28074aaaf9b9SIoana Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) { 2808df85aeb9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) 2809df85aeb9SIoana Radulescu err = config_legacy_hash_key(priv, key_iova); 2810edad8d26SIoana Ciocoi Radulescu else 2811df85aeb9SIoana Radulescu err = config_hash_key(priv, key_iova); 28124aaaf9b9SIoana Radulescu } else { 28134aaaf9b9SIoana Radulescu err = config_cls_key(priv, key_iova); 28144aaaf9b9SIoana Radulescu } 2815df85aeb9SIoana Radulescu 2816df85aeb9SIoana Radulescu dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 2817df85aeb9SIoana Radulescu DMA_TO_DEVICE); 28184aaaf9b9SIoana Radulescu if (!err && type == DPAA2_ETH_RX_DIST_HASH) 2819edad8d26SIoana Ciocoi Radulescu priv->rx_hash_fields = rx_hash_fields; 282034ff6846SIoana Radulescu 2821df85aeb9SIoana Radulescu free_key: 282234ff6846SIoana Radulescu kfree(dma_mem); 282334ff6846SIoana Radulescu return err; 282434ff6846SIoana Radulescu } 282534ff6846SIoana Radulescu 28264aaaf9b9SIoana Radulescu int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 28274aaaf9b9SIoana Radulescu { 28284aaaf9b9SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 28293a1e6b84SIoana Ciocoi Radulescu u64 key = 0; 28303a1e6b84SIoana Ciocoi Radulescu int i; 28314aaaf9b9SIoana Radulescu 28324aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) 28334aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 28344aaaf9b9SIoana Radulescu 28353a1e6b84SIoana Ciocoi Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 28363a1e6b84SIoana Ciocoi Radulescu if (dist_fields[i].rxnfc_field & flags) 28373a1e6b84SIoana Ciocoi Radulescu key |= dist_fields[i].id; 28383a1e6b84SIoana Ciocoi Radulescu 28393a1e6b84SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); 28404aaaf9b9SIoana Radulescu } 28414aaaf9b9SIoana Radulescu 28422d680237SIoana Ciocoi Radulescu int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) 28432d680237SIoana Ciocoi Radulescu { 28442d680237SIoana Ciocoi Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); 28452d680237SIoana Ciocoi Radulescu } 28462d680237SIoana Ciocoi Radulescu 28472d680237SIoana Ciocoi Radulescu static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) 28484aaaf9b9SIoana Radulescu { 28494aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 2850df8e249bSIoana Ciocoi Radulescu int err; 28514aaaf9b9SIoana Radulescu 28524aaaf9b9SIoana Radulescu /* Check if we actually support Rx flow classification */ 28534aaaf9b9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) { 28544aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls not supported by current MC version\n"); 28554aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 28564aaaf9b9SIoana Radulescu } 28574aaaf9b9SIoana Radulescu 28582d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_enabled(priv)) { 28594aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 28604aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 28614aaaf9b9SIoana Radulescu } 28624aaaf9b9SIoana Radulescu 28634aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) { 28644aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 28654aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 28664aaaf9b9SIoana Radulescu } 28674aaaf9b9SIoana Radulescu 28682d680237SIoana Ciocoi Radulescu /* If there is no support for masking in the classification table, 28692d680237SIoana Ciocoi Radulescu * we don't set a default key, as it will depend on the rules 28702d680237SIoana Ciocoi Radulescu * added by the user at runtime. 28712d680237SIoana Ciocoi Radulescu */ 28722d680237SIoana Ciocoi Radulescu if (!dpaa2_eth_fs_mask_enabled(priv)) 28732d680237SIoana Ciocoi Radulescu goto out; 28742d680237SIoana Ciocoi Radulescu 28752d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); 2876df8e249bSIoana Ciocoi Radulescu if (err) 2877df8e249bSIoana Ciocoi Radulescu return err; 2878df8e249bSIoana Ciocoi Radulescu 28792d680237SIoana Ciocoi Radulescu out: 28804aaaf9b9SIoana Radulescu priv->rx_cls_enabled = 1; 28814aaaf9b9SIoana Radulescu 2882df8e249bSIoana Ciocoi Radulescu return 0; 28834aaaf9b9SIoana Radulescu } 28844aaaf9b9SIoana Radulescu 288534ff6846SIoana Radulescu /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 288634ff6846SIoana Radulescu * frame queues and channels 288734ff6846SIoana Radulescu */ 288834ff6846SIoana Radulescu static int bind_dpni(struct dpaa2_eth_priv *priv) 288934ff6846SIoana Radulescu { 289034ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 289134ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 289234ff6846SIoana Radulescu struct dpni_pools_cfg pools_params; 289334ff6846SIoana Radulescu struct dpni_error_cfg err_cfg; 289434ff6846SIoana Radulescu int err = 0; 289534ff6846SIoana Radulescu int i; 289634ff6846SIoana Radulescu 289734ff6846SIoana Radulescu pools_params.num_dpbp = 1; 289834ff6846SIoana Radulescu pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 289934ff6846SIoana Radulescu pools_params.pools[0].backup_pool = 0; 290034ff6846SIoana Radulescu pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; 290134ff6846SIoana Radulescu err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 290234ff6846SIoana Radulescu if (err) { 290334ff6846SIoana Radulescu dev_err(dev, "dpni_set_pools() failed\n"); 290434ff6846SIoana Radulescu return err; 290534ff6846SIoana Radulescu } 290634ff6846SIoana Radulescu 290734ff6846SIoana Radulescu /* have the interface implicitly distribute traffic based on 290834ff6846SIoana Radulescu * the default hash key 290934ff6846SIoana Radulescu */ 291034ff6846SIoana Radulescu err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 2911edad8d26SIoana Ciocoi Radulescu if (err && err != -EOPNOTSUPP) 291234ff6846SIoana Radulescu dev_err(dev, "Failed to configure hashing\n"); 291334ff6846SIoana Radulescu 29144aaaf9b9SIoana Radulescu /* Configure the flow classification key; it includes all 29154aaaf9b9SIoana Radulescu * supported header fields and cannot be modified at runtime 29164aaaf9b9SIoana Radulescu */ 29172d680237SIoana Ciocoi Radulescu err = dpaa2_eth_set_default_cls(priv); 29184aaaf9b9SIoana Radulescu if (err && err != -EOPNOTSUPP) 29194aaaf9b9SIoana Radulescu dev_err(dev, "Failed to configure Rx classification key\n"); 29204aaaf9b9SIoana Radulescu 292134ff6846SIoana Radulescu /* Configure handling of error frames */ 292234ff6846SIoana Radulescu err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 292334ff6846SIoana Radulescu err_cfg.set_frame_annotation = 1; 292434ff6846SIoana Radulescu err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 292534ff6846SIoana Radulescu err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 292634ff6846SIoana Radulescu &err_cfg); 292734ff6846SIoana Radulescu if (err) { 292834ff6846SIoana Radulescu dev_err(dev, "dpni_set_errors_behavior failed\n"); 292934ff6846SIoana Radulescu return err; 293034ff6846SIoana Radulescu } 293134ff6846SIoana Radulescu 293234ff6846SIoana Radulescu /* Configure Rx and Tx conf queues to generate CDANs */ 293334ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 293434ff6846SIoana Radulescu switch (priv->fq[i].type) { 293534ff6846SIoana Radulescu case DPAA2_RX_FQ: 293634ff6846SIoana Radulescu err = setup_rx_flow(priv, &priv->fq[i]); 293734ff6846SIoana Radulescu break; 293834ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 293934ff6846SIoana Radulescu err = setup_tx_flow(priv, &priv->fq[i]); 294034ff6846SIoana Radulescu break; 294134ff6846SIoana Radulescu default: 294234ff6846SIoana Radulescu dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 294334ff6846SIoana Radulescu return -EINVAL; 294434ff6846SIoana Radulescu } 294534ff6846SIoana Radulescu if (err) 294634ff6846SIoana Radulescu return err; 294734ff6846SIoana Radulescu } 294834ff6846SIoana Radulescu 294934ff6846SIoana Radulescu err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 295034ff6846SIoana Radulescu DPNI_QUEUE_TX, &priv->tx_qdid); 295134ff6846SIoana Radulescu if (err) { 295234ff6846SIoana Radulescu dev_err(dev, "dpni_get_qdid() failed\n"); 295334ff6846SIoana Radulescu return err; 295434ff6846SIoana Radulescu } 295534ff6846SIoana Radulescu 295634ff6846SIoana Radulescu return 0; 295734ff6846SIoana Radulescu } 295834ff6846SIoana Radulescu 295934ff6846SIoana Radulescu /* Allocate rings for storing incoming frame descriptors */ 296034ff6846SIoana Radulescu static int alloc_rings(struct dpaa2_eth_priv *priv) 296134ff6846SIoana Radulescu { 296234ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 296334ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 296434ff6846SIoana Radulescu int i; 296534ff6846SIoana Radulescu 296634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 296734ff6846SIoana Radulescu priv->channel[i]->store = 296834ff6846SIoana Radulescu dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 296934ff6846SIoana Radulescu if (!priv->channel[i]->store) { 297034ff6846SIoana Radulescu netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 297134ff6846SIoana Radulescu goto err_ring; 297234ff6846SIoana Radulescu } 297334ff6846SIoana Radulescu } 297434ff6846SIoana Radulescu 297534ff6846SIoana Radulescu return 0; 297634ff6846SIoana Radulescu 297734ff6846SIoana Radulescu err_ring: 297834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 297934ff6846SIoana Radulescu if (!priv->channel[i]->store) 298034ff6846SIoana Radulescu break; 298134ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 298234ff6846SIoana Radulescu } 298334ff6846SIoana Radulescu 298434ff6846SIoana Radulescu return -ENOMEM; 298534ff6846SIoana Radulescu } 298634ff6846SIoana Radulescu 298734ff6846SIoana Radulescu static void free_rings(struct dpaa2_eth_priv *priv) 298834ff6846SIoana Radulescu { 298934ff6846SIoana Radulescu int i; 299034ff6846SIoana Radulescu 299134ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 299234ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 299334ff6846SIoana Radulescu } 299434ff6846SIoana Radulescu 299534ff6846SIoana Radulescu static int set_mac_addr(struct dpaa2_eth_priv *priv) 299634ff6846SIoana Radulescu { 299734ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 299834ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 299934ff6846SIoana Radulescu u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 300034ff6846SIoana Radulescu int err; 300134ff6846SIoana Radulescu 300234ff6846SIoana Radulescu /* Get firmware address, if any */ 300334ff6846SIoana Radulescu err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 300434ff6846SIoana Radulescu if (err) { 300534ff6846SIoana Radulescu dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 300634ff6846SIoana Radulescu return err; 300734ff6846SIoana Radulescu } 300834ff6846SIoana Radulescu 300934ff6846SIoana Radulescu /* Get DPNI attributes address, if any */ 301034ff6846SIoana Radulescu err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 301134ff6846SIoana Radulescu dpni_mac_addr); 301234ff6846SIoana Radulescu if (err) { 301334ff6846SIoana Radulescu dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 301434ff6846SIoana Radulescu return err; 301534ff6846SIoana Radulescu } 301634ff6846SIoana Radulescu 301734ff6846SIoana Radulescu /* First check if firmware has any address configured by bootloader */ 301834ff6846SIoana Radulescu if (!is_zero_ether_addr(mac_addr)) { 301934ff6846SIoana Radulescu /* If the DPMAC addr != DPNI addr, update it */ 302034ff6846SIoana Radulescu if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 302134ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, 302234ff6846SIoana Radulescu priv->mc_token, 302334ff6846SIoana Radulescu mac_addr); 302434ff6846SIoana Radulescu if (err) { 302534ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 302634ff6846SIoana Radulescu return err; 302734ff6846SIoana Radulescu } 302834ff6846SIoana Radulescu } 302934ff6846SIoana Radulescu memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 303034ff6846SIoana Radulescu } else if (is_zero_ether_addr(dpni_mac_addr)) { 303134ff6846SIoana Radulescu /* No MAC address configured, fill in net_dev->dev_addr 303234ff6846SIoana Radulescu * with a random one 303334ff6846SIoana Radulescu */ 303434ff6846SIoana Radulescu eth_hw_addr_random(net_dev); 303534ff6846SIoana Radulescu dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 303634ff6846SIoana Radulescu 303734ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 303834ff6846SIoana Radulescu net_dev->dev_addr); 303934ff6846SIoana Radulescu if (err) { 304034ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 304134ff6846SIoana Radulescu return err; 304234ff6846SIoana Radulescu } 304334ff6846SIoana Radulescu 304434ff6846SIoana Radulescu /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 304534ff6846SIoana Radulescu * practical purposes, this will be our "permanent" mac address, 304634ff6846SIoana Radulescu * at least until the next reboot. This move will also permit 304734ff6846SIoana Radulescu * register_netdevice() to properly fill up net_dev->perm_addr. 304834ff6846SIoana Radulescu */ 304934ff6846SIoana Radulescu net_dev->addr_assign_type = NET_ADDR_PERM; 305034ff6846SIoana Radulescu } else { 305134ff6846SIoana Radulescu /* NET_ADDR_PERM is default, all we have to do is 305234ff6846SIoana Radulescu * fill in the device addr. 305334ff6846SIoana Radulescu */ 305434ff6846SIoana Radulescu memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 305534ff6846SIoana Radulescu } 305634ff6846SIoana Radulescu 305734ff6846SIoana Radulescu return 0; 305834ff6846SIoana Radulescu } 305934ff6846SIoana Radulescu 306034ff6846SIoana Radulescu static int netdev_init(struct net_device *net_dev) 306134ff6846SIoana Radulescu { 306234ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 306334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 306434ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 306534ff6846SIoana Radulescu u64 supported = 0, not_supported = 0; 306634ff6846SIoana Radulescu u8 bcast_addr[ETH_ALEN]; 306734ff6846SIoana Radulescu u8 num_queues; 306834ff6846SIoana Radulescu int err; 306934ff6846SIoana Radulescu 307034ff6846SIoana Radulescu net_dev->netdev_ops = &dpaa2_eth_ops; 307134ff6846SIoana Radulescu net_dev->ethtool_ops = &dpaa2_ethtool_ops; 307234ff6846SIoana Radulescu 307334ff6846SIoana Radulescu err = set_mac_addr(priv); 307434ff6846SIoana Radulescu if (err) 307534ff6846SIoana Radulescu return err; 307634ff6846SIoana Radulescu 307734ff6846SIoana Radulescu /* Explicitly add the broadcast address to the MAC filtering table */ 307834ff6846SIoana Radulescu eth_broadcast_addr(bcast_addr); 307934ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 308034ff6846SIoana Radulescu if (err) { 308134ff6846SIoana Radulescu dev_err(dev, "dpni_add_mac_addr() failed\n"); 308234ff6846SIoana Radulescu return err; 308334ff6846SIoana Radulescu } 308434ff6846SIoana Radulescu 308534ff6846SIoana Radulescu /* Set MTU upper limit; lower limit is 68B (default value) */ 308634ff6846SIoana Radulescu net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 308734ff6846SIoana Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 308834ff6846SIoana Radulescu DPAA2_ETH_MFL); 308934ff6846SIoana Radulescu if (err) { 309034ff6846SIoana Radulescu dev_err(dev, "dpni_set_max_frame_length() failed\n"); 309134ff6846SIoana Radulescu return err; 309234ff6846SIoana Radulescu } 309334ff6846SIoana Radulescu 309434ff6846SIoana Radulescu /* Set actual number of queues in the net device */ 309534ff6846SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 309634ff6846SIoana Radulescu err = netif_set_real_num_tx_queues(net_dev, num_queues); 309734ff6846SIoana Radulescu if (err) { 309834ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 309934ff6846SIoana Radulescu return err; 310034ff6846SIoana Radulescu } 310134ff6846SIoana Radulescu err = netif_set_real_num_rx_queues(net_dev, num_queues); 310234ff6846SIoana Radulescu if (err) { 310334ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 310434ff6846SIoana Radulescu return err; 310534ff6846SIoana Radulescu } 310634ff6846SIoana Radulescu 310734ff6846SIoana Radulescu /* Capabilities listing */ 310834ff6846SIoana Radulescu supported |= IFF_LIVE_ADDR_CHANGE; 310934ff6846SIoana Radulescu 311034ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER) 311134ff6846SIoana Radulescu not_supported |= IFF_UNICAST_FLT; 311234ff6846SIoana Radulescu else 311334ff6846SIoana Radulescu supported |= IFF_UNICAST_FLT; 311434ff6846SIoana Radulescu 311534ff6846SIoana Radulescu net_dev->priv_flags |= supported; 311634ff6846SIoana Radulescu net_dev->priv_flags &= ~not_supported; 311734ff6846SIoana Radulescu 311834ff6846SIoana Radulescu /* Features */ 311934ff6846SIoana Radulescu net_dev->features = NETIF_F_RXCSUM | 312034ff6846SIoana Radulescu NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 312134ff6846SIoana Radulescu NETIF_F_SG | NETIF_F_HIGHDMA | 312234ff6846SIoana Radulescu NETIF_F_LLTX; 312334ff6846SIoana Radulescu net_dev->hw_features = net_dev->features; 312434ff6846SIoana Radulescu 312534ff6846SIoana Radulescu return 0; 312634ff6846SIoana Radulescu } 312734ff6846SIoana Radulescu 312834ff6846SIoana Radulescu static int poll_link_state(void *arg) 312934ff6846SIoana Radulescu { 313034ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 313134ff6846SIoana Radulescu int err; 313234ff6846SIoana Radulescu 313334ff6846SIoana Radulescu while (!kthread_should_stop()) { 313434ff6846SIoana Radulescu err = link_state_update(priv); 313534ff6846SIoana Radulescu if (unlikely(err)) 313634ff6846SIoana Radulescu return err; 313734ff6846SIoana Radulescu 313834ff6846SIoana Radulescu msleep(DPAA2_ETH_LINK_STATE_REFRESH); 313934ff6846SIoana Radulescu } 314034ff6846SIoana Radulescu 314134ff6846SIoana Radulescu return 0; 314234ff6846SIoana Radulescu } 314334ff6846SIoana Radulescu 314434ff6846SIoana Radulescu static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 314534ff6846SIoana Radulescu { 314634ff6846SIoana Radulescu u32 status = ~0; 314734ff6846SIoana Radulescu struct device *dev = (struct device *)arg; 314834ff6846SIoana Radulescu struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 314934ff6846SIoana Radulescu struct net_device *net_dev = dev_get_drvdata(dev); 315034ff6846SIoana Radulescu int err; 315134ff6846SIoana Radulescu 315234ff6846SIoana Radulescu err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 315334ff6846SIoana Radulescu DPNI_IRQ_INDEX, &status); 315434ff6846SIoana Radulescu if (unlikely(err)) { 315534ff6846SIoana Radulescu netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 315634ff6846SIoana Radulescu return IRQ_HANDLED; 315734ff6846SIoana Radulescu } 315834ff6846SIoana Radulescu 315934ff6846SIoana Radulescu if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 316034ff6846SIoana Radulescu link_state_update(netdev_priv(net_dev)); 316134ff6846SIoana Radulescu 316234ff6846SIoana Radulescu return IRQ_HANDLED; 316334ff6846SIoana Radulescu } 316434ff6846SIoana Radulescu 316534ff6846SIoana Radulescu static int setup_irqs(struct fsl_mc_device *ls_dev) 316634ff6846SIoana Radulescu { 316734ff6846SIoana Radulescu int err = 0; 316834ff6846SIoana Radulescu struct fsl_mc_device_irq *irq; 316934ff6846SIoana Radulescu 317034ff6846SIoana Radulescu err = fsl_mc_allocate_irqs(ls_dev); 317134ff6846SIoana Radulescu if (err) { 317234ff6846SIoana Radulescu dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 317334ff6846SIoana Radulescu return err; 317434ff6846SIoana Radulescu } 317534ff6846SIoana Radulescu 317634ff6846SIoana Radulescu irq = ls_dev->irqs[0]; 317734ff6846SIoana Radulescu err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 317834ff6846SIoana Radulescu NULL, dpni_irq0_handler_thread, 317934ff6846SIoana Radulescu IRQF_NO_SUSPEND | IRQF_ONESHOT, 318034ff6846SIoana Radulescu dev_name(&ls_dev->dev), &ls_dev->dev); 318134ff6846SIoana Radulescu if (err < 0) { 318234ff6846SIoana Radulescu dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 318334ff6846SIoana Radulescu goto free_mc_irq; 318434ff6846SIoana Radulescu } 318534ff6846SIoana Radulescu 318634ff6846SIoana Radulescu err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 318734ff6846SIoana Radulescu DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); 318834ff6846SIoana Radulescu if (err < 0) { 318934ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 319034ff6846SIoana Radulescu goto free_irq; 319134ff6846SIoana Radulescu } 319234ff6846SIoana Radulescu 319334ff6846SIoana Radulescu err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 319434ff6846SIoana Radulescu DPNI_IRQ_INDEX, 1); 319534ff6846SIoana Radulescu if (err < 0) { 319634ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 319734ff6846SIoana Radulescu goto free_irq; 319834ff6846SIoana Radulescu } 319934ff6846SIoana Radulescu 320034ff6846SIoana Radulescu return 0; 320134ff6846SIoana Radulescu 320234ff6846SIoana Radulescu free_irq: 320334ff6846SIoana Radulescu devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 320434ff6846SIoana Radulescu free_mc_irq: 320534ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 320634ff6846SIoana Radulescu 320734ff6846SIoana Radulescu return err; 320834ff6846SIoana Radulescu } 320934ff6846SIoana Radulescu 321034ff6846SIoana Radulescu static void add_ch_napi(struct dpaa2_eth_priv *priv) 321134ff6846SIoana Radulescu { 321234ff6846SIoana Radulescu int i; 321334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 321434ff6846SIoana Radulescu 321534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 321634ff6846SIoana Radulescu ch = priv->channel[i]; 321734ff6846SIoana Radulescu /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 321834ff6846SIoana Radulescu netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 321934ff6846SIoana Radulescu NAPI_POLL_WEIGHT); 322034ff6846SIoana Radulescu } 322134ff6846SIoana Radulescu } 322234ff6846SIoana Radulescu 322334ff6846SIoana Radulescu static void del_ch_napi(struct dpaa2_eth_priv *priv) 322434ff6846SIoana Radulescu { 322534ff6846SIoana Radulescu int i; 322634ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 322734ff6846SIoana Radulescu 322834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 322934ff6846SIoana Radulescu ch = priv->channel[i]; 323034ff6846SIoana Radulescu netif_napi_del(&ch->napi); 323134ff6846SIoana Radulescu } 323234ff6846SIoana Radulescu } 323334ff6846SIoana Radulescu 323434ff6846SIoana Radulescu static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 323534ff6846SIoana Radulescu { 323634ff6846SIoana Radulescu struct device *dev; 323734ff6846SIoana Radulescu struct net_device *net_dev = NULL; 323834ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = NULL; 323934ff6846SIoana Radulescu int err = 0; 324034ff6846SIoana Radulescu 324134ff6846SIoana Radulescu dev = &dpni_dev->dev; 324234ff6846SIoana Radulescu 324334ff6846SIoana Radulescu /* Net device */ 324434ff6846SIoana Radulescu net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); 324534ff6846SIoana Radulescu if (!net_dev) { 324634ff6846SIoana Radulescu dev_err(dev, "alloc_etherdev_mq() failed\n"); 324734ff6846SIoana Radulescu return -ENOMEM; 324834ff6846SIoana Radulescu } 324934ff6846SIoana Radulescu 325034ff6846SIoana Radulescu SET_NETDEV_DEV(net_dev, dev); 325134ff6846SIoana Radulescu dev_set_drvdata(dev, net_dev); 325234ff6846SIoana Radulescu 325334ff6846SIoana Radulescu priv = netdev_priv(net_dev); 325434ff6846SIoana Radulescu priv->net_dev = net_dev; 325534ff6846SIoana Radulescu 325634ff6846SIoana Radulescu priv->iommu_domain = iommu_get_domain_for_dev(dev); 325734ff6846SIoana Radulescu 325834ff6846SIoana Radulescu /* Obtain a MC portal */ 325934ff6846SIoana Radulescu err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 326034ff6846SIoana Radulescu &priv->mc_io); 326134ff6846SIoana Radulescu if (err) { 326234ff6846SIoana Radulescu if (err == -ENXIO) 326334ff6846SIoana Radulescu err = -EPROBE_DEFER; 326434ff6846SIoana Radulescu else 326534ff6846SIoana Radulescu dev_err(dev, "MC portal allocation failed\n"); 326634ff6846SIoana Radulescu goto err_portal_alloc; 326734ff6846SIoana Radulescu } 326834ff6846SIoana Radulescu 326934ff6846SIoana Radulescu /* MC objects initialization and configuration */ 327034ff6846SIoana Radulescu err = setup_dpni(dpni_dev); 327134ff6846SIoana Radulescu if (err) 327234ff6846SIoana Radulescu goto err_dpni_setup; 327334ff6846SIoana Radulescu 327434ff6846SIoana Radulescu err = setup_dpio(priv); 327534ff6846SIoana Radulescu if (err) 327634ff6846SIoana Radulescu goto err_dpio_setup; 327734ff6846SIoana Radulescu 327834ff6846SIoana Radulescu setup_fqs(priv); 327934ff6846SIoana Radulescu 328034ff6846SIoana Radulescu err = setup_dpbp(priv); 328134ff6846SIoana Radulescu if (err) 328234ff6846SIoana Radulescu goto err_dpbp_setup; 328334ff6846SIoana Radulescu 328434ff6846SIoana Radulescu err = bind_dpni(priv); 328534ff6846SIoana Radulescu if (err) 328634ff6846SIoana Radulescu goto err_bind; 328734ff6846SIoana Radulescu 328834ff6846SIoana Radulescu /* Add a NAPI context for each channel */ 328934ff6846SIoana Radulescu add_ch_napi(priv); 329034ff6846SIoana Radulescu 329134ff6846SIoana Radulescu /* Percpu statistics */ 329234ff6846SIoana Radulescu priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 329334ff6846SIoana Radulescu if (!priv->percpu_stats) { 329434ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 329534ff6846SIoana Radulescu err = -ENOMEM; 329634ff6846SIoana Radulescu goto err_alloc_percpu_stats; 329734ff6846SIoana Radulescu } 329834ff6846SIoana Radulescu priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 329934ff6846SIoana Radulescu if (!priv->percpu_extras) { 330034ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 330134ff6846SIoana Radulescu err = -ENOMEM; 330234ff6846SIoana Radulescu goto err_alloc_percpu_extras; 330334ff6846SIoana Radulescu } 330434ff6846SIoana Radulescu 330534ff6846SIoana Radulescu err = netdev_init(net_dev); 330634ff6846SIoana Radulescu if (err) 330734ff6846SIoana Radulescu goto err_netdev_init; 330834ff6846SIoana Radulescu 330934ff6846SIoana Radulescu /* Configure checksum offload based on current interface flags */ 331034ff6846SIoana Radulescu err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 331134ff6846SIoana Radulescu if (err) 331234ff6846SIoana Radulescu goto err_csum; 331334ff6846SIoana Radulescu 331434ff6846SIoana Radulescu err = set_tx_csum(priv, !!(net_dev->features & 331534ff6846SIoana Radulescu (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 331634ff6846SIoana Radulescu if (err) 331734ff6846SIoana Radulescu goto err_csum; 331834ff6846SIoana Radulescu 331934ff6846SIoana Radulescu err = alloc_rings(priv); 332034ff6846SIoana Radulescu if (err) 332134ff6846SIoana Radulescu goto err_alloc_rings; 332234ff6846SIoana Radulescu 332334ff6846SIoana Radulescu err = setup_irqs(dpni_dev); 332434ff6846SIoana Radulescu if (err) { 332534ff6846SIoana Radulescu netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 332634ff6846SIoana Radulescu priv->poll_thread = kthread_run(poll_link_state, priv, 332734ff6846SIoana Radulescu "%s_poll_link", net_dev->name); 332834ff6846SIoana Radulescu if (IS_ERR(priv->poll_thread)) { 332934ff6846SIoana Radulescu dev_err(dev, "Error starting polling thread\n"); 333034ff6846SIoana Radulescu goto err_poll_thread; 333134ff6846SIoana Radulescu } 333234ff6846SIoana Radulescu priv->do_link_poll = true; 333334ff6846SIoana Radulescu } 333434ff6846SIoana Radulescu 333534ff6846SIoana Radulescu err = register_netdev(net_dev); 333634ff6846SIoana Radulescu if (err < 0) { 333734ff6846SIoana Radulescu dev_err(dev, "register_netdev() failed\n"); 333834ff6846SIoana Radulescu goto err_netdev_reg; 333934ff6846SIoana Radulescu } 334034ff6846SIoana Radulescu 3341091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 3342091a19eaSIoana Radulescu dpaa2_dbg_add(priv); 3343091a19eaSIoana Radulescu #endif 3344091a19eaSIoana Radulescu 334534ff6846SIoana Radulescu dev_info(dev, "Probed interface %s\n", net_dev->name); 334634ff6846SIoana Radulescu return 0; 334734ff6846SIoana Radulescu 334834ff6846SIoana Radulescu err_netdev_reg: 334934ff6846SIoana Radulescu if (priv->do_link_poll) 335034ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 335134ff6846SIoana Radulescu else 335234ff6846SIoana Radulescu fsl_mc_free_irqs(dpni_dev); 335334ff6846SIoana Radulescu err_poll_thread: 335434ff6846SIoana Radulescu free_rings(priv); 335534ff6846SIoana Radulescu err_alloc_rings: 335634ff6846SIoana Radulescu err_csum: 335734ff6846SIoana Radulescu err_netdev_init: 335834ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 335934ff6846SIoana Radulescu err_alloc_percpu_extras: 336034ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 336134ff6846SIoana Radulescu err_alloc_percpu_stats: 336234ff6846SIoana Radulescu del_ch_napi(priv); 336334ff6846SIoana Radulescu err_bind: 336434ff6846SIoana Radulescu free_dpbp(priv); 336534ff6846SIoana Radulescu err_dpbp_setup: 336634ff6846SIoana Radulescu free_dpio(priv); 336734ff6846SIoana Radulescu err_dpio_setup: 336834ff6846SIoana Radulescu free_dpni(priv); 336934ff6846SIoana Radulescu err_dpni_setup: 337034ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 337134ff6846SIoana Radulescu err_portal_alloc: 337234ff6846SIoana Radulescu dev_set_drvdata(dev, NULL); 337334ff6846SIoana Radulescu free_netdev(net_dev); 337434ff6846SIoana Radulescu 337534ff6846SIoana Radulescu return err; 337634ff6846SIoana Radulescu } 337734ff6846SIoana Radulescu 337834ff6846SIoana Radulescu static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 337934ff6846SIoana Radulescu { 338034ff6846SIoana Radulescu struct device *dev; 338134ff6846SIoana Radulescu struct net_device *net_dev; 338234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 338334ff6846SIoana Radulescu 338434ff6846SIoana Radulescu dev = &ls_dev->dev; 338534ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 338634ff6846SIoana Radulescu priv = netdev_priv(net_dev); 338734ff6846SIoana Radulescu 3388091a19eaSIoana Radulescu #ifdef CONFIG_DEBUG_FS 3389091a19eaSIoana Radulescu dpaa2_dbg_remove(priv); 3390091a19eaSIoana Radulescu #endif 339134ff6846SIoana Radulescu unregister_netdev(net_dev); 339234ff6846SIoana Radulescu 339334ff6846SIoana Radulescu if (priv->do_link_poll) 339434ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 339534ff6846SIoana Radulescu else 339634ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 339734ff6846SIoana Radulescu 339834ff6846SIoana Radulescu free_rings(priv); 339934ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 340034ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 340134ff6846SIoana Radulescu 340234ff6846SIoana Radulescu del_ch_napi(priv); 340334ff6846SIoana Radulescu free_dpbp(priv); 340434ff6846SIoana Radulescu free_dpio(priv); 340534ff6846SIoana Radulescu free_dpni(priv); 340634ff6846SIoana Radulescu 340734ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 340834ff6846SIoana Radulescu 340934ff6846SIoana Radulescu free_netdev(net_dev); 341034ff6846SIoana Radulescu 341134ff6846SIoana Radulescu dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 341234ff6846SIoana Radulescu 341334ff6846SIoana Radulescu return 0; 341434ff6846SIoana Radulescu } 341534ff6846SIoana Radulescu 341634ff6846SIoana Radulescu static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 341734ff6846SIoana Radulescu { 341834ff6846SIoana Radulescu .vendor = FSL_MC_VENDOR_FREESCALE, 341934ff6846SIoana Radulescu .obj_type = "dpni", 342034ff6846SIoana Radulescu }, 342134ff6846SIoana Radulescu { .vendor = 0x0 } 342234ff6846SIoana Radulescu }; 342334ff6846SIoana Radulescu MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 342434ff6846SIoana Radulescu 342534ff6846SIoana Radulescu static struct fsl_mc_driver dpaa2_eth_driver = { 342634ff6846SIoana Radulescu .driver = { 342734ff6846SIoana Radulescu .name = KBUILD_MODNAME, 342834ff6846SIoana Radulescu .owner = THIS_MODULE, 342934ff6846SIoana Radulescu }, 343034ff6846SIoana Radulescu .probe = dpaa2_eth_probe, 343134ff6846SIoana Radulescu .remove = dpaa2_eth_remove, 343234ff6846SIoana Radulescu .match_id_table = dpaa2_eth_match_id_table 343334ff6846SIoana Radulescu }; 343434ff6846SIoana Radulescu 3435091a19eaSIoana Radulescu static int __init dpaa2_eth_driver_init(void) 3436091a19eaSIoana Radulescu { 3437091a19eaSIoana Radulescu int err; 3438091a19eaSIoana Radulescu 3439091a19eaSIoana Radulescu dpaa2_eth_dbg_init(); 3440091a19eaSIoana Radulescu err = fsl_mc_driver_register(&dpaa2_eth_driver); 3441091a19eaSIoana Radulescu if (err) { 3442091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 3443091a19eaSIoana Radulescu return err; 3444091a19eaSIoana Radulescu } 3445091a19eaSIoana Radulescu 3446091a19eaSIoana Radulescu return 0; 3447091a19eaSIoana Radulescu } 3448091a19eaSIoana Radulescu 3449091a19eaSIoana Radulescu static void __exit dpaa2_eth_driver_exit(void) 3450091a19eaSIoana Radulescu { 3451091a19eaSIoana Radulescu dpaa2_eth_dbg_exit(); 3452091a19eaSIoana Radulescu fsl_mc_driver_unregister(&dpaa2_eth_driver); 3453091a19eaSIoana Radulescu } 3454091a19eaSIoana Radulescu 3455091a19eaSIoana Radulescu module_init(dpaa2_eth_driver_init); 3456091a19eaSIoana Radulescu module_exit(dpaa2_eth_driver_exit); 3457