134ff6846SIoana Radulescu // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 234ff6846SIoana Radulescu /* Copyright 2014-2016 Freescale Semiconductor Inc. 334ff6846SIoana Radulescu * Copyright 2016-2017 NXP 434ff6846SIoana Radulescu */ 534ff6846SIoana Radulescu #include <linux/init.h> 634ff6846SIoana Radulescu #include <linux/module.h> 734ff6846SIoana Radulescu #include <linux/platform_device.h> 834ff6846SIoana Radulescu #include <linux/etherdevice.h> 934ff6846SIoana Radulescu #include <linux/of_net.h> 1034ff6846SIoana Radulescu #include <linux/interrupt.h> 1134ff6846SIoana Radulescu #include <linux/msi.h> 1234ff6846SIoana Radulescu #include <linux/kthread.h> 1334ff6846SIoana Radulescu #include <linux/iommu.h> 1434ff6846SIoana Radulescu #include <linux/net_tstamp.h> 1534ff6846SIoana Radulescu #include <linux/fsl/mc.h> 1634ff6846SIoana Radulescu 1734ff6846SIoana Radulescu #include <net/sock.h> 1834ff6846SIoana Radulescu 1934ff6846SIoana Radulescu #include "dpaa2-eth.h" 2034ff6846SIoana Radulescu 2134ff6846SIoana Radulescu /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 2234ff6846SIoana Radulescu * using trace events only need to #include <trace/events/sched.h> 2334ff6846SIoana Radulescu */ 2434ff6846SIoana Radulescu #define CREATE_TRACE_POINTS 2534ff6846SIoana Radulescu #include "dpaa2-eth-trace.h" 2634ff6846SIoana Radulescu 2734ff6846SIoana Radulescu MODULE_LICENSE("Dual BSD/GPL"); 2834ff6846SIoana Radulescu MODULE_AUTHOR("Freescale Semiconductor, Inc"); 2934ff6846SIoana Radulescu MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 3034ff6846SIoana Radulescu 3134ff6846SIoana Radulescu static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 3234ff6846SIoana Radulescu dma_addr_t iova_addr) 3334ff6846SIoana Radulescu { 3434ff6846SIoana Radulescu phys_addr_t phys_addr; 3534ff6846SIoana Radulescu 3634ff6846SIoana Radulescu phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 3734ff6846SIoana Radulescu 3834ff6846SIoana Radulescu return phys_to_virt(phys_addr); 3934ff6846SIoana Radulescu } 4034ff6846SIoana Radulescu 4134ff6846SIoana Radulescu static void validate_rx_csum(struct dpaa2_eth_priv *priv, 4234ff6846SIoana Radulescu u32 fd_status, 4334ff6846SIoana Radulescu struct sk_buff *skb) 4434ff6846SIoana Radulescu { 4534ff6846SIoana Radulescu skb_checksum_none_assert(skb); 4634ff6846SIoana Radulescu 4734ff6846SIoana Radulescu /* HW checksum validation is disabled, nothing to do here */ 4834ff6846SIoana Radulescu if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 4934ff6846SIoana Radulescu return; 5034ff6846SIoana Radulescu 5134ff6846SIoana Radulescu /* Read checksum validation bits */ 5234ff6846SIoana Radulescu if (!((fd_status & DPAA2_FAS_L3CV) && 5334ff6846SIoana Radulescu (fd_status & DPAA2_FAS_L4CV))) 5434ff6846SIoana Radulescu return; 5534ff6846SIoana Radulescu 5634ff6846SIoana Radulescu /* Inform the stack there's no need to compute L3/L4 csum anymore */ 5734ff6846SIoana Radulescu skb->ip_summed = CHECKSUM_UNNECESSARY; 5834ff6846SIoana Radulescu } 5934ff6846SIoana Radulescu 6034ff6846SIoana Radulescu /* Free a received FD. 6134ff6846SIoana Radulescu * Not to be used for Tx conf FDs or on any other paths. 6234ff6846SIoana Radulescu */ 6334ff6846SIoana Radulescu static void free_rx_fd(struct dpaa2_eth_priv *priv, 6434ff6846SIoana Radulescu const struct dpaa2_fd *fd, 6534ff6846SIoana Radulescu void *vaddr) 6634ff6846SIoana Radulescu { 6734ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 6834ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 6934ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 7034ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 7134ff6846SIoana Radulescu void *sg_vaddr; 7234ff6846SIoana Radulescu int i; 7334ff6846SIoana Radulescu 7434ff6846SIoana Radulescu /* If single buffer frame, just free the data buffer */ 7534ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) 7634ff6846SIoana Radulescu goto free_buf; 7734ff6846SIoana Radulescu else if (fd_format != dpaa2_fd_sg) 7834ff6846SIoana Radulescu /* We don't support any other format */ 7934ff6846SIoana Radulescu return; 8034ff6846SIoana Radulescu 8134ff6846SIoana Radulescu /* For S/G frames, we first need to free all SG entries 8234ff6846SIoana Radulescu * except the first one, which was taken care of already 8334ff6846SIoana Radulescu */ 8434ff6846SIoana Radulescu sgt = vaddr + dpaa2_fd_get_offset(fd); 8534ff6846SIoana Radulescu for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 8634ff6846SIoana Radulescu addr = dpaa2_sg_get_addr(&sgt[i]); 8734ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 8834ff6846SIoana Radulescu dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 8934ff6846SIoana Radulescu DMA_FROM_DEVICE); 9034ff6846SIoana Radulescu 9134ff6846SIoana Radulescu skb_free_frag(sg_vaddr); 9234ff6846SIoana Radulescu if (dpaa2_sg_is_final(&sgt[i])) 9334ff6846SIoana Radulescu break; 9434ff6846SIoana Radulescu } 9534ff6846SIoana Radulescu 9634ff6846SIoana Radulescu free_buf: 9734ff6846SIoana Radulescu skb_free_frag(vaddr); 9834ff6846SIoana Radulescu } 9934ff6846SIoana Radulescu 10034ff6846SIoana Radulescu /* Build a linear skb based on a single-buffer frame descriptor */ 101fdb6ca9eSIoana Ciornei static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, 10234ff6846SIoana Radulescu const struct dpaa2_fd *fd, 10334ff6846SIoana Radulescu void *fd_vaddr) 10434ff6846SIoana Radulescu { 10534ff6846SIoana Radulescu struct sk_buff *skb = NULL; 10634ff6846SIoana Radulescu u16 fd_offset = dpaa2_fd_get_offset(fd); 10734ff6846SIoana Radulescu u32 fd_length = dpaa2_fd_get_len(fd); 10834ff6846SIoana Radulescu 10934ff6846SIoana Radulescu ch->buf_count--; 11034ff6846SIoana Radulescu 11134ff6846SIoana Radulescu skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); 11234ff6846SIoana Radulescu if (unlikely(!skb)) 11334ff6846SIoana Radulescu return NULL; 11434ff6846SIoana Radulescu 11534ff6846SIoana Radulescu skb_reserve(skb, fd_offset); 11634ff6846SIoana Radulescu skb_put(skb, fd_length); 11734ff6846SIoana Radulescu 11834ff6846SIoana Radulescu return skb; 11934ff6846SIoana Radulescu } 12034ff6846SIoana Radulescu 12134ff6846SIoana Radulescu /* Build a non linear (fragmented) skb based on a S/G table */ 12234ff6846SIoana Radulescu static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, 12334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 12434ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt) 12534ff6846SIoana Radulescu { 12634ff6846SIoana Radulescu struct sk_buff *skb = NULL; 12734ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 12834ff6846SIoana Radulescu void *sg_vaddr; 12934ff6846SIoana Radulescu dma_addr_t sg_addr; 13034ff6846SIoana Radulescu u16 sg_offset; 13134ff6846SIoana Radulescu u32 sg_length; 13234ff6846SIoana Radulescu struct page *page, *head_page; 13334ff6846SIoana Radulescu int page_offset; 13434ff6846SIoana Radulescu int i; 13534ff6846SIoana Radulescu 13634ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 13734ff6846SIoana Radulescu struct dpaa2_sg_entry *sge = &sgt[i]; 13834ff6846SIoana Radulescu 13934ff6846SIoana Radulescu /* NOTE: We only support SG entries in dpaa2_sg_single format, 14034ff6846SIoana Radulescu * but this is the only format we may receive from HW anyway 14134ff6846SIoana Radulescu */ 14234ff6846SIoana Radulescu 14334ff6846SIoana Radulescu /* Get the address and length from the S/G entry */ 14434ff6846SIoana Radulescu sg_addr = dpaa2_sg_get_addr(sge); 14534ff6846SIoana Radulescu sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 14634ff6846SIoana Radulescu dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, 14734ff6846SIoana Radulescu DMA_FROM_DEVICE); 14834ff6846SIoana Radulescu 14934ff6846SIoana Radulescu sg_length = dpaa2_sg_get_len(sge); 15034ff6846SIoana Radulescu 15134ff6846SIoana Radulescu if (i == 0) { 15234ff6846SIoana Radulescu /* We build the skb around the first data buffer */ 15334ff6846SIoana Radulescu skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); 15434ff6846SIoana Radulescu if (unlikely(!skb)) { 15534ff6846SIoana Radulescu /* Free the first SG entry now, since we already 15634ff6846SIoana Radulescu * unmapped it and obtained the virtual address 15734ff6846SIoana Radulescu */ 15834ff6846SIoana Radulescu skb_free_frag(sg_vaddr); 15934ff6846SIoana Radulescu 16034ff6846SIoana Radulescu /* We still need to subtract the buffers used 16134ff6846SIoana Radulescu * by this FD from our software counter 16234ff6846SIoana Radulescu */ 16334ff6846SIoana Radulescu while (!dpaa2_sg_is_final(&sgt[i]) && 16434ff6846SIoana Radulescu i < DPAA2_ETH_MAX_SG_ENTRIES) 16534ff6846SIoana Radulescu i++; 16634ff6846SIoana Radulescu break; 16734ff6846SIoana Radulescu } 16834ff6846SIoana Radulescu 16934ff6846SIoana Radulescu sg_offset = dpaa2_sg_get_offset(sge); 17034ff6846SIoana Radulescu skb_reserve(skb, sg_offset); 17134ff6846SIoana Radulescu skb_put(skb, sg_length); 17234ff6846SIoana Radulescu } else { 17334ff6846SIoana Radulescu /* Rest of the data buffers are stored as skb frags */ 17434ff6846SIoana Radulescu page = virt_to_page(sg_vaddr); 17534ff6846SIoana Radulescu head_page = virt_to_head_page(sg_vaddr); 17634ff6846SIoana Radulescu 17734ff6846SIoana Radulescu /* Offset in page (which may be compound). 17834ff6846SIoana Radulescu * Data in subsequent SG entries is stored from the 17934ff6846SIoana Radulescu * beginning of the buffer, so we don't need to add the 18034ff6846SIoana Radulescu * sg_offset. 18134ff6846SIoana Radulescu */ 18234ff6846SIoana Radulescu page_offset = ((unsigned long)sg_vaddr & 18334ff6846SIoana Radulescu (PAGE_SIZE - 1)) + 18434ff6846SIoana Radulescu (page_address(page) - page_address(head_page)); 18534ff6846SIoana Radulescu 18634ff6846SIoana Radulescu skb_add_rx_frag(skb, i - 1, head_page, page_offset, 18734ff6846SIoana Radulescu sg_length, DPAA2_ETH_RX_BUF_SIZE); 18834ff6846SIoana Radulescu } 18934ff6846SIoana Radulescu 19034ff6846SIoana Radulescu if (dpaa2_sg_is_final(sge)) 19134ff6846SIoana Radulescu break; 19234ff6846SIoana Radulescu } 19334ff6846SIoana Radulescu 19434ff6846SIoana Radulescu WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 19534ff6846SIoana Radulescu 19634ff6846SIoana Radulescu /* Count all data buffers + SG table buffer */ 19734ff6846SIoana Radulescu ch->buf_count -= i + 2; 19834ff6846SIoana Radulescu 19934ff6846SIoana Radulescu return skb; 20034ff6846SIoana Radulescu } 20134ff6846SIoana Radulescu 20234ff6846SIoana Radulescu /* Main Rx frame processing routine */ 20334ff6846SIoana Radulescu static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 20434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 20534ff6846SIoana Radulescu const struct dpaa2_fd *fd, 20634ff6846SIoana Radulescu struct napi_struct *napi, 20734ff6846SIoana Radulescu u16 queue_id) 20834ff6846SIoana Radulescu { 20934ff6846SIoana Radulescu dma_addr_t addr = dpaa2_fd_get_addr(fd); 21034ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 21134ff6846SIoana Radulescu void *vaddr; 21234ff6846SIoana Radulescu struct sk_buff *skb; 21334ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 21434ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 21534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 21634ff6846SIoana Radulescu struct dpaa2_fas *fas; 21734ff6846SIoana Radulescu void *buf_data; 21834ff6846SIoana Radulescu u32 status = 0; 21934ff6846SIoana Radulescu 22034ff6846SIoana Radulescu /* Tracing point */ 22134ff6846SIoana Radulescu trace_dpaa2_rx_fd(priv->net_dev, fd); 22234ff6846SIoana Radulescu 22334ff6846SIoana Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 22434ff6846SIoana Radulescu dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); 22534ff6846SIoana Radulescu 22634ff6846SIoana Radulescu fas = dpaa2_get_fas(vaddr, false); 22734ff6846SIoana Radulescu prefetch(fas); 22834ff6846SIoana Radulescu buf_data = vaddr + dpaa2_fd_get_offset(fd); 22934ff6846SIoana Radulescu prefetch(buf_data); 23034ff6846SIoana Radulescu 23134ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 23234ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 23334ff6846SIoana Radulescu 23434ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 235fdb6ca9eSIoana Ciornei skb = build_linear_skb(ch, fd, vaddr); 23634ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 23734ff6846SIoana Radulescu skb = build_frag_skb(priv, ch, buf_data); 23834ff6846SIoana Radulescu skb_free_frag(vaddr); 23934ff6846SIoana Radulescu percpu_extras->rx_sg_frames++; 24034ff6846SIoana Radulescu percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 24134ff6846SIoana Radulescu } else { 24234ff6846SIoana Radulescu /* We don't support any other format */ 24334ff6846SIoana Radulescu goto err_frame_format; 24434ff6846SIoana Radulescu } 24534ff6846SIoana Radulescu 24634ff6846SIoana Radulescu if (unlikely(!skb)) 24734ff6846SIoana Radulescu goto err_build_skb; 24834ff6846SIoana Radulescu 24934ff6846SIoana Radulescu prefetch(skb->data); 25034ff6846SIoana Radulescu 25134ff6846SIoana Radulescu /* Get the timestamp value */ 25234ff6846SIoana Radulescu if (priv->rx_tstamp) { 25334ff6846SIoana Radulescu struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 25434ff6846SIoana Radulescu __le64 *ts = dpaa2_get_ts(vaddr, false); 25534ff6846SIoana Radulescu u64 ns; 25634ff6846SIoana Radulescu 25734ff6846SIoana Radulescu memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 25834ff6846SIoana Radulescu 25934ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 26034ff6846SIoana Radulescu shhwtstamps->hwtstamp = ns_to_ktime(ns); 26134ff6846SIoana Radulescu } 26234ff6846SIoana Radulescu 26334ff6846SIoana Radulescu /* Check if we need to validate the L4 csum */ 26434ff6846SIoana Radulescu if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 26534ff6846SIoana Radulescu status = le32_to_cpu(fas->status); 26634ff6846SIoana Radulescu validate_rx_csum(priv, status, skb); 26734ff6846SIoana Radulescu } 26834ff6846SIoana Radulescu 26934ff6846SIoana Radulescu skb->protocol = eth_type_trans(skb, priv->net_dev); 27034ff6846SIoana Radulescu skb_record_rx_queue(skb, queue_id); 27134ff6846SIoana Radulescu 27234ff6846SIoana Radulescu percpu_stats->rx_packets++; 27334ff6846SIoana Radulescu percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 27434ff6846SIoana Radulescu 27534ff6846SIoana Radulescu napi_gro_receive(napi, skb); 27634ff6846SIoana Radulescu 27734ff6846SIoana Radulescu return; 27834ff6846SIoana Radulescu 27934ff6846SIoana Radulescu err_build_skb: 28034ff6846SIoana Radulescu free_rx_fd(priv, fd, vaddr); 28134ff6846SIoana Radulescu err_frame_format: 28234ff6846SIoana Radulescu percpu_stats->rx_dropped++; 28334ff6846SIoana Radulescu } 28434ff6846SIoana Radulescu 28534ff6846SIoana Radulescu /* Consume all frames pull-dequeued into the store. This is the simplest way to 28634ff6846SIoana Radulescu * make sure we don't accidentally issue another volatile dequeue which would 28734ff6846SIoana Radulescu * overwrite (leak) frames already in the store. 28834ff6846SIoana Radulescu * 28934ff6846SIoana Radulescu * Observance of NAPI budget is not our concern, leaving that to the caller. 29034ff6846SIoana Radulescu */ 29168049a5fSIoana Ciocoi Radulescu static int consume_frames(struct dpaa2_eth_channel *ch, 29268049a5fSIoana Ciocoi Radulescu enum dpaa2_eth_fq_type *type) 29334ff6846SIoana Radulescu { 29434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = ch->priv; 29568049a5fSIoana Ciocoi Radulescu struct dpaa2_eth_fq *fq = NULL; 29634ff6846SIoana Radulescu struct dpaa2_dq *dq; 29734ff6846SIoana Radulescu const struct dpaa2_fd *fd; 29834ff6846SIoana Radulescu int cleaned = 0; 29934ff6846SIoana Radulescu int is_last; 30034ff6846SIoana Radulescu 30134ff6846SIoana Radulescu do { 30234ff6846SIoana Radulescu dq = dpaa2_io_store_next(ch->store, &is_last); 30334ff6846SIoana Radulescu if (unlikely(!dq)) { 30434ff6846SIoana Radulescu /* If we're here, we *must* have placed a 30534ff6846SIoana Radulescu * volatile dequeue comnmand, so keep reading through 30634ff6846SIoana Radulescu * the store until we get some sort of valid response 30734ff6846SIoana Radulescu * token (either a valid frame or an "empty dequeue") 30834ff6846SIoana Radulescu */ 30934ff6846SIoana Radulescu continue; 31034ff6846SIoana Radulescu } 31134ff6846SIoana Radulescu 31234ff6846SIoana Radulescu fd = dpaa2_dq_fd(dq); 31334ff6846SIoana Radulescu fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 31434ff6846SIoana Radulescu 31534ff6846SIoana Radulescu fq->consume(priv, ch, fd, &ch->napi, fq->flowid); 31634ff6846SIoana Radulescu cleaned++; 31734ff6846SIoana Radulescu } while (!is_last); 31834ff6846SIoana Radulescu 31968049a5fSIoana Ciocoi Radulescu if (!cleaned) 32068049a5fSIoana Ciocoi Radulescu return 0; 32168049a5fSIoana Ciocoi Radulescu 32268049a5fSIoana Ciocoi Radulescu fq->stats.frames += cleaned; 32368049a5fSIoana Ciocoi Radulescu ch->stats.frames += cleaned; 32468049a5fSIoana Ciocoi Radulescu 32568049a5fSIoana Ciocoi Radulescu /* A dequeue operation only pulls frames from a single queue 32668049a5fSIoana Ciocoi Radulescu * into the store. Return the frame queue type as an out param. 32768049a5fSIoana Ciocoi Radulescu */ 32868049a5fSIoana Ciocoi Radulescu if (type) 32968049a5fSIoana Ciocoi Radulescu *type = fq->type; 33068049a5fSIoana Ciocoi Radulescu 33134ff6846SIoana Radulescu return cleaned; 33234ff6846SIoana Radulescu } 33334ff6846SIoana Radulescu 33434ff6846SIoana Radulescu /* Configure the egress frame annotation for timestamp update */ 33534ff6846SIoana Radulescu static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) 33634ff6846SIoana Radulescu { 33734ff6846SIoana Radulescu struct dpaa2_faead *faead; 33834ff6846SIoana Radulescu u32 ctrl, frc; 33934ff6846SIoana Radulescu 34034ff6846SIoana Radulescu /* Mark the egress frame annotation area as valid */ 34134ff6846SIoana Radulescu frc = dpaa2_fd_get_frc(fd); 34234ff6846SIoana Radulescu dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 34334ff6846SIoana Radulescu 34434ff6846SIoana Radulescu /* Set hardware annotation size */ 34534ff6846SIoana Radulescu ctrl = dpaa2_fd_get_ctrl(fd); 34634ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 34734ff6846SIoana Radulescu 34834ff6846SIoana Radulescu /* enable UPD (update prepanded data) bit in FAEAD field of 34934ff6846SIoana Radulescu * hardware frame annotation area 35034ff6846SIoana Radulescu */ 35134ff6846SIoana Radulescu ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 35234ff6846SIoana Radulescu faead = dpaa2_get_faead(buf_start, true); 35334ff6846SIoana Radulescu faead->ctrl = cpu_to_le32(ctrl); 35434ff6846SIoana Radulescu } 35534ff6846SIoana Radulescu 35634ff6846SIoana Radulescu /* Create a frame descriptor based on a fragmented skb */ 35734ff6846SIoana Radulescu static int build_sg_fd(struct dpaa2_eth_priv *priv, 35834ff6846SIoana Radulescu struct sk_buff *skb, 35934ff6846SIoana Radulescu struct dpaa2_fd *fd) 36034ff6846SIoana Radulescu { 36134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 36234ff6846SIoana Radulescu void *sgt_buf = NULL; 36334ff6846SIoana Radulescu dma_addr_t addr; 36434ff6846SIoana Radulescu int nr_frags = skb_shinfo(skb)->nr_frags; 36534ff6846SIoana Radulescu struct dpaa2_sg_entry *sgt; 36634ff6846SIoana Radulescu int i, err; 36734ff6846SIoana Radulescu int sgt_buf_size; 36834ff6846SIoana Radulescu struct scatterlist *scl, *crt_scl; 36934ff6846SIoana Radulescu int num_sg; 37034ff6846SIoana Radulescu int num_dma_bufs; 37134ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 37234ff6846SIoana Radulescu 37334ff6846SIoana Radulescu /* Create and map scatterlist. 37434ff6846SIoana Radulescu * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 37534ff6846SIoana Radulescu * to go beyond nr_frags+1. 37634ff6846SIoana Radulescu * Note: We don't support chained scatterlists 37734ff6846SIoana Radulescu */ 37834ff6846SIoana Radulescu if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 37934ff6846SIoana Radulescu return -EINVAL; 38034ff6846SIoana Radulescu 38134ff6846SIoana Radulescu scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 38234ff6846SIoana Radulescu if (unlikely(!scl)) 38334ff6846SIoana Radulescu return -ENOMEM; 38434ff6846SIoana Radulescu 38534ff6846SIoana Radulescu sg_init_table(scl, nr_frags + 1); 38634ff6846SIoana Radulescu num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 38734ff6846SIoana Radulescu num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 38834ff6846SIoana Radulescu if (unlikely(!num_dma_bufs)) { 38934ff6846SIoana Radulescu err = -ENOMEM; 39034ff6846SIoana Radulescu goto dma_map_sg_failed; 39134ff6846SIoana Radulescu } 39234ff6846SIoana Radulescu 39334ff6846SIoana Radulescu /* Prepare the HW SGT structure */ 39434ff6846SIoana Radulescu sgt_buf_size = priv->tx_data_offset + 39534ff6846SIoana Radulescu sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 39634ff6846SIoana Radulescu sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); 39734ff6846SIoana Radulescu if (unlikely(!sgt_buf)) { 39834ff6846SIoana Radulescu err = -ENOMEM; 39934ff6846SIoana Radulescu goto sgt_buf_alloc_failed; 40034ff6846SIoana Radulescu } 40134ff6846SIoana Radulescu sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 40234ff6846SIoana Radulescu memset(sgt_buf, 0, sgt_buf_size); 40334ff6846SIoana Radulescu 40434ff6846SIoana Radulescu sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 40534ff6846SIoana Radulescu 40634ff6846SIoana Radulescu /* Fill in the HW SGT structure. 40734ff6846SIoana Radulescu * 40834ff6846SIoana Radulescu * sgt_buf is zeroed out, so the following fields are implicit 40934ff6846SIoana Radulescu * in all sgt entries: 41034ff6846SIoana Radulescu * - offset is 0 41134ff6846SIoana Radulescu * - format is 'dpaa2_sg_single' 41234ff6846SIoana Radulescu */ 41334ff6846SIoana Radulescu for_each_sg(scl, crt_scl, num_dma_bufs, i) { 41434ff6846SIoana Radulescu dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 41534ff6846SIoana Radulescu dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 41634ff6846SIoana Radulescu } 41734ff6846SIoana Radulescu dpaa2_sg_set_final(&sgt[i - 1], true); 41834ff6846SIoana Radulescu 41934ff6846SIoana Radulescu /* Store the skb backpointer in the SGT buffer. 42034ff6846SIoana Radulescu * Fit the scatterlist and the number of buffers alongside the 42134ff6846SIoana Radulescu * skb backpointer in the software annotation area. We'll need 42234ff6846SIoana Radulescu * all of them on Tx Conf. 42334ff6846SIoana Radulescu */ 42434ff6846SIoana Radulescu swa = (struct dpaa2_eth_swa *)sgt_buf; 42534ff6846SIoana Radulescu swa->skb = skb; 42634ff6846SIoana Radulescu swa->scl = scl; 42734ff6846SIoana Radulescu swa->num_sg = num_sg; 42834ff6846SIoana Radulescu swa->sgt_size = sgt_buf_size; 42934ff6846SIoana Radulescu 43034ff6846SIoana Radulescu /* Separately map the SGT buffer */ 43134ff6846SIoana Radulescu addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 43234ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) { 43334ff6846SIoana Radulescu err = -ENOMEM; 43434ff6846SIoana Radulescu goto dma_map_single_failed; 43534ff6846SIoana Radulescu } 43634ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, priv->tx_data_offset); 43734ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_sg); 43834ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 43934ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 44034ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1); 44134ff6846SIoana Radulescu 44234ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 44334ff6846SIoana Radulescu enable_tx_tstamp(fd, sgt_buf); 44434ff6846SIoana Radulescu 44534ff6846SIoana Radulescu return 0; 44634ff6846SIoana Radulescu 44734ff6846SIoana Radulescu dma_map_single_failed: 44834ff6846SIoana Radulescu skb_free_frag(sgt_buf); 44934ff6846SIoana Radulescu sgt_buf_alloc_failed: 45034ff6846SIoana Radulescu dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 45134ff6846SIoana Radulescu dma_map_sg_failed: 45234ff6846SIoana Radulescu kfree(scl); 45334ff6846SIoana Radulescu return err; 45434ff6846SIoana Radulescu } 45534ff6846SIoana Radulescu 45634ff6846SIoana Radulescu /* Create a frame descriptor based on a linear skb */ 45734ff6846SIoana Radulescu static int build_single_fd(struct dpaa2_eth_priv *priv, 45834ff6846SIoana Radulescu struct sk_buff *skb, 45934ff6846SIoana Radulescu struct dpaa2_fd *fd) 46034ff6846SIoana Radulescu { 46134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 46234ff6846SIoana Radulescu u8 *buffer_start, *aligned_start; 46334ff6846SIoana Radulescu struct sk_buff **skbh; 46434ff6846SIoana Radulescu dma_addr_t addr; 46534ff6846SIoana Radulescu 46634ff6846SIoana Radulescu buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); 46734ff6846SIoana Radulescu 46834ff6846SIoana Radulescu /* If there's enough room to align the FD address, do it. 46934ff6846SIoana Radulescu * It will help hardware optimize accesses. 47034ff6846SIoana Radulescu */ 47134ff6846SIoana Radulescu aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 47234ff6846SIoana Radulescu DPAA2_ETH_TX_BUF_ALIGN); 47334ff6846SIoana Radulescu if (aligned_start >= skb->head) 47434ff6846SIoana Radulescu buffer_start = aligned_start; 47534ff6846SIoana Radulescu 47634ff6846SIoana Radulescu /* Store a backpointer to the skb at the beginning of the buffer 47734ff6846SIoana Radulescu * (in the private data area) such that we can release it 47834ff6846SIoana Radulescu * on Tx confirm 47934ff6846SIoana Radulescu */ 48034ff6846SIoana Radulescu skbh = (struct sk_buff **)buffer_start; 48134ff6846SIoana Radulescu *skbh = skb; 48234ff6846SIoana Radulescu 48334ff6846SIoana Radulescu addr = dma_map_single(dev, buffer_start, 48434ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 48534ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 48634ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 48734ff6846SIoana Radulescu return -ENOMEM; 48834ff6846SIoana Radulescu 48934ff6846SIoana Radulescu dpaa2_fd_set_addr(fd, addr); 49034ff6846SIoana Radulescu dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 49134ff6846SIoana Radulescu dpaa2_fd_set_len(fd, skb->len); 49234ff6846SIoana Radulescu dpaa2_fd_set_format(fd, dpaa2_fd_single); 49334ff6846SIoana Radulescu dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1); 49434ff6846SIoana Radulescu 49534ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 49634ff6846SIoana Radulescu enable_tx_tstamp(fd, buffer_start); 49734ff6846SIoana Radulescu 49834ff6846SIoana Radulescu return 0; 49934ff6846SIoana Radulescu } 50034ff6846SIoana Radulescu 50134ff6846SIoana Radulescu /* FD freeing routine on the Tx path 50234ff6846SIoana Radulescu * 50334ff6846SIoana Radulescu * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 50434ff6846SIoana Radulescu * back-pointed to is also freed. 50534ff6846SIoana Radulescu * This can be called either from dpaa2_eth_tx_conf() or on the error path of 50634ff6846SIoana Radulescu * dpaa2_eth_tx(). 50734ff6846SIoana Radulescu */ 50834ff6846SIoana Radulescu static void free_tx_fd(const struct dpaa2_eth_priv *priv, 50934ff6846SIoana Radulescu const struct dpaa2_fd *fd) 51034ff6846SIoana Radulescu { 51134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 51234ff6846SIoana Radulescu dma_addr_t fd_addr; 51334ff6846SIoana Radulescu struct sk_buff **skbh, *skb; 51434ff6846SIoana Radulescu unsigned char *buffer_start; 51534ff6846SIoana Radulescu struct dpaa2_eth_swa *swa; 51634ff6846SIoana Radulescu u8 fd_format = dpaa2_fd_get_format(fd); 51734ff6846SIoana Radulescu 51834ff6846SIoana Radulescu fd_addr = dpaa2_fd_get_addr(fd); 51934ff6846SIoana Radulescu skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 52034ff6846SIoana Radulescu 52134ff6846SIoana Radulescu if (fd_format == dpaa2_fd_single) { 52234ff6846SIoana Radulescu skb = *skbh; 52334ff6846SIoana Radulescu buffer_start = (unsigned char *)skbh; 52434ff6846SIoana Radulescu /* Accessing the skb buffer is safe before dma unmap, because 52534ff6846SIoana Radulescu * we didn't map the actual skb shell. 52634ff6846SIoana Radulescu */ 52734ff6846SIoana Radulescu dma_unmap_single(dev, fd_addr, 52834ff6846SIoana Radulescu skb_tail_pointer(skb) - buffer_start, 52934ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 53034ff6846SIoana Radulescu } else if (fd_format == dpaa2_fd_sg) { 53134ff6846SIoana Radulescu swa = (struct dpaa2_eth_swa *)skbh; 53234ff6846SIoana Radulescu skb = swa->skb; 53334ff6846SIoana Radulescu 53434ff6846SIoana Radulescu /* Unmap the scatterlist */ 53534ff6846SIoana Radulescu dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); 53634ff6846SIoana Radulescu kfree(swa->scl); 53734ff6846SIoana Radulescu 53834ff6846SIoana Radulescu /* Unmap the SGT buffer */ 53934ff6846SIoana Radulescu dma_unmap_single(dev, fd_addr, swa->sgt_size, 54034ff6846SIoana Radulescu DMA_BIDIRECTIONAL); 54134ff6846SIoana Radulescu } else { 54234ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "Invalid FD format\n"); 54334ff6846SIoana Radulescu return; 54434ff6846SIoana Radulescu } 54534ff6846SIoana Radulescu 54634ff6846SIoana Radulescu /* Get the timestamp value */ 54734ff6846SIoana Radulescu if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 54834ff6846SIoana Radulescu struct skb_shared_hwtstamps shhwtstamps; 54934ff6846SIoana Radulescu __le64 *ts = dpaa2_get_ts(skbh, true); 55034ff6846SIoana Radulescu u64 ns; 55134ff6846SIoana Radulescu 55234ff6846SIoana Radulescu memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 55334ff6846SIoana Radulescu 55434ff6846SIoana Radulescu ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 55534ff6846SIoana Radulescu shhwtstamps.hwtstamp = ns_to_ktime(ns); 55634ff6846SIoana Radulescu skb_tstamp_tx(skb, &shhwtstamps); 55734ff6846SIoana Radulescu } 55834ff6846SIoana Radulescu 55934ff6846SIoana Radulescu /* Free SGT buffer allocated on tx */ 56034ff6846SIoana Radulescu if (fd_format != dpaa2_fd_single) 56134ff6846SIoana Radulescu skb_free_frag(skbh); 56234ff6846SIoana Radulescu 56334ff6846SIoana Radulescu /* Move on with skb release */ 56434ff6846SIoana Radulescu dev_kfree_skb(skb); 56534ff6846SIoana Radulescu } 56634ff6846SIoana Radulescu 56734ff6846SIoana Radulescu static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 56834ff6846SIoana Radulescu { 56934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 57034ff6846SIoana Radulescu struct dpaa2_fd fd; 57134ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 57234ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 57334ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 57434ff6846SIoana Radulescu u16 queue_mapping; 57534ff6846SIoana Radulescu unsigned int needed_headroom; 57634ff6846SIoana Radulescu int err, i; 57734ff6846SIoana Radulescu 57834ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 57934ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 58034ff6846SIoana Radulescu 58134ff6846SIoana Radulescu needed_headroom = dpaa2_eth_needed_headroom(priv, skb); 58234ff6846SIoana Radulescu if (skb_headroom(skb) < needed_headroom) { 58334ff6846SIoana Radulescu struct sk_buff *ns; 58434ff6846SIoana Radulescu 58534ff6846SIoana Radulescu ns = skb_realloc_headroom(skb, needed_headroom); 58634ff6846SIoana Radulescu if (unlikely(!ns)) { 58734ff6846SIoana Radulescu percpu_stats->tx_dropped++; 58834ff6846SIoana Radulescu goto err_alloc_headroom; 58934ff6846SIoana Radulescu } 59034ff6846SIoana Radulescu percpu_extras->tx_reallocs++; 59134ff6846SIoana Radulescu 59234ff6846SIoana Radulescu if (skb->sk) 59334ff6846SIoana Radulescu skb_set_owner_w(ns, skb->sk); 59434ff6846SIoana Radulescu 59534ff6846SIoana Radulescu dev_kfree_skb(skb); 59634ff6846SIoana Radulescu skb = ns; 59734ff6846SIoana Radulescu } 59834ff6846SIoana Radulescu 59934ff6846SIoana Radulescu /* We'll be holding a back-reference to the skb until Tx Confirmation; 60034ff6846SIoana Radulescu * we don't want that overwritten by a concurrent Tx with a cloned skb. 60134ff6846SIoana Radulescu */ 60234ff6846SIoana Radulescu skb = skb_unshare(skb, GFP_ATOMIC); 60334ff6846SIoana Radulescu if (unlikely(!skb)) { 60434ff6846SIoana Radulescu /* skb_unshare() has already freed the skb */ 60534ff6846SIoana Radulescu percpu_stats->tx_dropped++; 60634ff6846SIoana Radulescu return NETDEV_TX_OK; 60734ff6846SIoana Radulescu } 60834ff6846SIoana Radulescu 60934ff6846SIoana Radulescu /* Setup the FD fields */ 61034ff6846SIoana Radulescu memset(&fd, 0, sizeof(fd)); 61134ff6846SIoana Radulescu 61234ff6846SIoana Radulescu if (skb_is_nonlinear(skb)) { 61334ff6846SIoana Radulescu err = build_sg_fd(priv, skb, &fd); 61434ff6846SIoana Radulescu percpu_extras->tx_sg_frames++; 61534ff6846SIoana Radulescu percpu_extras->tx_sg_bytes += skb->len; 61634ff6846SIoana Radulescu } else { 61734ff6846SIoana Radulescu err = build_single_fd(priv, skb, &fd); 61834ff6846SIoana Radulescu } 61934ff6846SIoana Radulescu 62034ff6846SIoana Radulescu if (unlikely(err)) { 62134ff6846SIoana Radulescu percpu_stats->tx_dropped++; 62234ff6846SIoana Radulescu goto err_build_fd; 62334ff6846SIoana Radulescu } 62434ff6846SIoana Radulescu 62534ff6846SIoana Radulescu /* Tracing point */ 62634ff6846SIoana Radulescu trace_dpaa2_tx_fd(net_dev, &fd); 62734ff6846SIoana Radulescu 62834ff6846SIoana Radulescu /* TxConf FQ selection relies on queue id from the stack. 62934ff6846SIoana Radulescu * In case of a forwarded frame from another DPNI interface, we choose 63034ff6846SIoana Radulescu * a queue affined to the same core that processed the Rx frame 63134ff6846SIoana Radulescu */ 63234ff6846SIoana Radulescu queue_mapping = skb_get_queue_mapping(skb); 63334ff6846SIoana Radulescu fq = &priv->fq[queue_mapping]; 63434ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 63534ff6846SIoana Radulescu err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 63634ff6846SIoana Radulescu priv->tx_qdid, 0, 63734ff6846SIoana Radulescu fq->tx_qdbin, &fd); 63834ff6846SIoana Radulescu if (err != -EBUSY) 63934ff6846SIoana Radulescu break; 64034ff6846SIoana Radulescu } 64134ff6846SIoana Radulescu percpu_extras->tx_portal_busy += i; 64234ff6846SIoana Radulescu if (unlikely(err < 0)) { 64334ff6846SIoana Radulescu percpu_stats->tx_errors++; 64434ff6846SIoana Radulescu /* Clean up everything, including freeing the skb */ 64534ff6846SIoana Radulescu free_tx_fd(priv, &fd); 64634ff6846SIoana Radulescu } else { 64734ff6846SIoana Radulescu percpu_stats->tx_packets++; 64834ff6846SIoana Radulescu percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); 64934ff6846SIoana Radulescu } 65034ff6846SIoana Radulescu 65134ff6846SIoana Radulescu return NETDEV_TX_OK; 65234ff6846SIoana Radulescu 65334ff6846SIoana Radulescu err_build_fd: 65434ff6846SIoana Radulescu err_alloc_headroom: 65534ff6846SIoana Radulescu dev_kfree_skb(skb); 65634ff6846SIoana Radulescu 65734ff6846SIoana Radulescu return NETDEV_TX_OK; 65834ff6846SIoana Radulescu } 65934ff6846SIoana Radulescu 66034ff6846SIoana Radulescu /* Tx confirmation frame processing routine */ 66134ff6846SIoana Radulescu static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 662b00c898cSIoana Ciornei struct dpaa2_eth_channel *ch __always_unused, 66334ff6846SIoana Radulescu const struct dpaa2_fd *fd, 66434ff6846SIoana Radulescu struct napi_struct *napi __always_unused, 66534ff6846SIoana Radulescu u16 queue_id __always_unused) 66634ff6846SIoana Radulescu { 66734ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 66834ff6846SIoana Radulescu struct dpaa2_eth_drv_stats *percpu_extras; 66934ff6846SIoana Radulescu u32 fd_errors; 67034ff6846SIoana Radulescu 67134ff6846SIoana Radulescu /* Tracing point */ 67234ff6846SIoana Radulescu trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 67334ff6846SIoana Radulescu 67434ff6846SIoana Radulescu percpu_extras = this_cpu_ptr(priv->percpu_extras); 67534ff6846SIoana Radulescu percpu_extras->tx_conf_frames++; 67634ff6846SIoana Radulescu percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); 67734ff6846SIoana Radulescu 67834ff6846SIoana Radulescu /* Check frame errors in the FD field */ 67934ff6846SIoana Radulescu fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 68034ff6846SIoana Radulescu free_tx_fd(priv, fd); 68134ff6846SIoana Radulescu 68234ff6846SIoana Radulescu if (likely(!fd_errors)) 68334ff6846SIoana Radulescu return; 68434ff6846SIoana Radulescu 68534ff6846SIoana Radulescu if (net_ratelimit()) 68634ff6846SIoana Radulescu netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 68734ff6846SIoana Radulescu fd_errors); 68834ff6846SIoana Radulescu 68934ff6846SIoana Radulescu percpu_stats = this_cpu_ptr(priv->percpu_stats); 69034ff6846SIoana Radulescu /* Tx-conf logically pertains to the egress path. */ 69134ff6846SIoana Radulescu percpu_stats->tx_errors++; 69234ff6846SIoana Radulescu } 69334ff6846SIoana Radulescu 69434ff6846SIoana Radulescu static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 69534ff6846SIoana Radulescu { 69634ff6846SIoana Radulescu int err; 69734ff6846SIoana Radulescu 69834ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 69934ff6846SIoana Radulescu DPNI_OFF_RX_L3_CSUM, enable); 70034ff6846SIoana Radulescu if (err) { 70134ff6846SIoana Radulescu netdev_err(priv->net_dev, 70234ff6846SIoana Radulescu "dpni_set_offload(RX_L3_CSUM) failed\n"); 70334ff6846SIoana Radulescu return err; 70434ff6846SIoana Radulescu } 70534ff6846SIoana Radulescu 70634ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 70734ff6846SIoana Radulescu DPNI_OFF_RX_L4_CSUM, enable); 70834ff6846SIoana Radulescu if (err) { 70934ff6846SIoana Radulescu netdev_err(priv->net_dev, 71034ff6846SIoana Radulescu "dpni_set_offload(RX_L4_CSUM) failed\n"); 71134ff6846SIoana Radulescu return err; 71234ff6846SIoana Radulescu } 71334ff6846SIoana Radulescu 71434ff6846SIoana Radulescu return 0; 71534ff6846SIoana Radulescu } 71634ff6846SIoana Radulescu 71734ff6846SIoana Radulescu static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 71834ff6846SIoana Radulescu { 71934ff6846SIoana Radulescu int err; 72034ff6846SIoana Radulescu 72134ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 72234ff6846SIoana Radulescu DPNI_OFF_TX_L3_CSUM, enable); 72334ff6846SIoana Radulescu if (err) { 72434ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 72534ff6846SIoana Radulescu return err; 72634ff6846SIoana Radulescu } 72734ff6846SIoana Radulescu 72834ff6846SIoana Radulescu err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 72934ff6846SIoana Radulescu DPNI_OFF_TX_L4_CSUM, enable); 73034ff6846SIoana Radulescu if (err) { 73134ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 73234ff6846SIoana Radulescu return err; 73334ff6846SIoana Radulescu } 73434ff6846SIoana Radulescu 73534ff6846SIoana Radulescu return 0; 73634ff6846SIoana Radulescu } 73734ff6846SIoana Radulescu 73834ff6846SIoana Radulescu /* Free buffers acquired from the buffer pool or which were meant to 73934ff6846SIoana Radulescu * be released in the pool 74034ff6846SIoana Radulescu */ 74134ff6846SIoana Radulescu static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) 74234ff6846SIoana Radulescu { 74334ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 74434ff6846SIoana Radulescu void *vaddr; 74534ff6846SIoana Radulescu int i; 74634ff6846SIoana Radulescu 74734ff6846SIoana Radulescu for (i = 0; i < count; i++) { 74834ff6846SIoana Radulescu vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 74934ff6846SIoana Radulescu dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, 75034ff6846SIoana Radulescu DMA_FROM_DEVICE); 75134ff6846SIoana Radulescu skb_free_frag(vaddr); 75234ff6846SIoana Radulescu } 75334ff6846SIoana Radulescu } 75434ff6846SIoana Radulescu 75534ff6846SIoana Radulescu /* Perform a single release command to add buffers 75634ff6846SIoana Radulescu * to the specified buffer pool 75734ff6846SIoana Radulescu */ 75834ff6846SIoana Radulescu static int add_bufs(struct dpaa2_eth_priv *priv, 75934ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, u16 bpid) 76034ff6846SIoana Radulescu { 76134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 76234ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 76334ff6846SIoana Radulescu void *buf; 76434ff6846SIoana Radulescu dma_addr_t addr; 76534ff6846SIoana Radulescu int i, err; 76634ff6846SIoana Radulescu 76734ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 76834ff6846SIoana Radulescu /* Allocate buffer visible to WRIOP + skb shared info + 76934ff6846SIoana Radulescu * alignment padding 77034ff6846SIoana Radulescu */ 77134ff6846SIoana Radulescu buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); 77234ff6846SIoana Radulescu if (unlikely(!buf)) 77334ff6846SIoana Radulescu goto err_alloc; 77434ff6846SIoana Radulescu 77534ff6846SIoana Radulescu buf = PTR_ALIGN(buf, priv->rx_buf_align); 77634ff6846SIoana Radulescu 77734ff6846SIoana Radulescu addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, 77834ff6846SIoana Radulescu DMA_FROM_DEVICE); 77934ff6846SIoana Radulescu if (unlikely(dma_mapping_error(dev, addr))) 78034ff6846SIoana Radulescu goto err_map; 78134ff6846SIoana Radulescu 78234ff6846SIoana Radulescu buf_array[i] = addr; 78334ff6846SIoana Radulescu 78434ff6846SIoana Radulescu /* tracing point */ 78534ff6846SIoana Radulescu trace_dpaa2_eth_buf_seed(priv->net_dev, 78634ff6846SIoana Radulescu buf, dpaa2_eth_buf_raw_size(priv), 78734ff6846SIoana Radulescu addr, DPAA2_ETH_RX_BUF_SIZE, 78834ff6846SIoana Radulescu bpid); 78934ff6846SIoana Radulescu } 79034ff6846SIoana Radulescu 79134ff6846SIoana Radulescu release_bufs: 79234ff6846SIoana Radulescu /* In case the portal is busy, retry until successful */ 79334ff6846SIoana Radulescu while ((err = dpaa2_io_service_release(ch->dpio, bpid, 79434ff6846SIoana Radulescu buf_array, i)) == -EBUSY) 79534ff6846SIoana Radulescu cpu_relax(); 79634ff6846SIoana Radulescu 79734ff6846SIoana Radulescu /* If release command failed, clean up and bail out; 79834ff6846SIoana Radulescu * not much else we can do about it 79934ff6846SIoana Radulescu */ 80034ff6846SIoana Radulescu if (err) { 80134ff6846SIoana Radulescu free_bufs(priv, buf_array, i); 80234ff6846SIoana Radulescu return 0; 80334ff6846SIoana Radulescu } 80434ff6846SIoana Radulescu 80534ff6846SIoana Radulescu return i; 80634ff6846SIoana Radulescu 80734ff6846SIoana Radulescu err_map: 80834ff6846SIoana Radulescu skb_free_frag(buf); 80934ff6846SIoana Radulescu err_alloc: 81034ff6846SIoana Radulescu /* If we managed to allocate at least some buffers, 81134ff6846SIoana Radulescu * release them to hardware 81234ff6846SIoana Radulescu */ 81334ff6846SIoana Radulescu if (i) 81434ff6846SIoana Radulescu goto release_bufs; 81534ff6846SIoana Radulescu 81634ff6846SIoana Radulescu return 0; 81734ff6846SIoana Radulescu } 81834ff6846SIoana Radulescu 81934ff6846SIoana Radulescu static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 82034ff6846SIoana Radulescu { 82134ff6846SIoana Radulescu int i, j; 82234ff6846SIoana Radulescu int new_count; 82334ff6846SIoana Radulescu 82434ff6846SIoana Radulescu /* This is the lazy seeding of Rx buffer pools. 82534ff6846SIoana Radulescu * dpaa2_add_bufs() is also used on the Rx hotpath and calls 82634ff6846SIoana Radulescu * napi_alloc_frag(). The trouble with that is that it in turn ends up 82734ff6846SIoana Radulescu * calling this_cpu_ptr(), which mandates execution in atomic context. 82834ff6846SIoana Radulescu * Rather than splitting up the code, do a one-off preempt disable. 82934ff6846SIoana Radulescu */ 83034ff6846SIoana Radulescu preempt_disable(); 83134ff6846SIoana Radulescu for (j = 0; j < priv->num_channels; j++) { 83234ff6846SIoana Radulescu for (i = 0; i < DPAA2_ETH_NUM_BUFS; 83334ff6846SIoana Radulescu i += DPAA2_ETH_BUFS_PER_CMD) { 83434ff6846SIoana Radulescu new_count = add_bufs(priv, priv->channel[j], bpid); 83534ff6846SIoana Radulescu priv->channel[j]->buf_count += new_count; 83634ff6846SIoana Radulescu 83734ff6846SIoana Radulescu if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 83834ff6846SIoana Radulescu preempt_enable(); 83934ff6846SIoana Radulescu return -ENOMEM; 84034ff6846SIoana Radulescu } 84134ff6846SIoana Radulescu } 84234ff6846SIoana Radulescu } 84334ff6846SIoana Radulescu preempt_enable(); 84434ff6846SIoana Radulescu 84534ff6846SIoana Radulescu return 0; 84634ff6846SIoana Radulescu } 84734ff6846SIoana Radulescu 84834ff6846SIoana Radulescu /** 84934ff6846SIoana Radulescu * Drain the specified number of buffers from the DPNI's private buffer pool. 85034ff6846SIoana Radulescu * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 85134ff6846SIoana Radulescu */ 85234ff6846SIoana Radulescu static void drain_bufs(struct dpaa2_eth_priv *priv, int count) 85334ff6846SIoana Radulescu { 85434ff6846SIoana Radulescu u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 85534ff6846SIoana Radulescu int ret; 85634ff6846SIoana Radulescu 85734ff6846SIoana Radulescu do { 85834ff6846SIoana Radulescu ret = dpaa2_io_service_acquire(NULL, priv->bpid, 85934ff6846SIoana Radulescu buf_array, count); 86034ff6846SIoana Radulescu if (ret < 0) { 86134ff6846SIoana Radulescu netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 86234ff6846SIoana Radulescu return; 86334ff6846SIoana Radulescu } 86434ff6846SIoana Radulescu free_bufs(priv, buf_array, ret); 86534ff6846SIoana Radulescu } while (ret); 86634ff6846SIoana Radulescu } 86734ff6846SIoana Radulescu 86834ff6846SIoana Radulescu static void drain_pool(struct dpaa2_eth_priv *priv) 86934ff6846SIoana Radulescu { 87034ff6846SIoana Radulescu int i; 87134ff6846SIoana Radulescu 87234ff6846SIoana Radulescu drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 87334ff6846SIoana Radulescu drain_bufs(priv, 1); 87434ff6846SIoana Radulescu 87534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 87634ff6846SIoana Radulescu priv->channel[i]->buf_count = 0; 87734ff6846SIoana Radulescu } 87834ff6846SIoana Radulescu 87934ff6846SIoana Radulescu /* Function is called from softirq context only, so we don't need to guard 88034ff6846SIoana Radulescu * the access to percpu count 88134ff6846SIoana Radulescu */ 88234ff6846SIoana Radulescu static int refill_pool(struct dpaa2_eth_priv *priv, 88334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch, 88434ff6846SIoana Radulescu u16 bpid) 88534ff6846SIoana Radulescu { 88634ff6846SIoana Radulescu int new_count; 88734ff6846SIoana Radulescu 88834ff6846SIoana Radulescu if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 88934ff6846SIoana Radulescu return 0; 89034ff6846SIoana Radulescu 89134ff6846SIoana Radulescu do { 89234ff6846SIoana Radulescu new_count = add_bufs(priv, ch, bpid); 89334ff6846SIoana Radulescu if (unlikely(!new_count)) { 89434ff6846SIoana Radulescu /* Out of memory; abort for now, we'll try later on */ 89534ff6846SIoana Radulescu break; 89634ff6846SIoana Radulescu } 89734ff6846SIoana Radulescu ch->buf_count += new_count; 89834ff6846SIoana Radulescu } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 89934ff6846SIoana Radulescu 90034ff6846SIoana Radulescu if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 90134ff6846SIoana Radulescu return -ENOMEM; 90234ff6846SIoana Radulescu 90334ff6846SIoana Radulescu return 0; 90434ff6846SIoana Radulescu } 90534ff6846SIoana Radulescu 90634ff6846SIoana Radulescu static int pull_channel(struct dpaa2_eth_channel *ch) 90734ff6846SIoana Radulescu { 90834ff6846SIoana Radulescu int err; 90934ff6846SIoana Radulescu int dequeues = -1; 91034ff6846SIoana Radulescu 91134ff6846SIoana Radulescu /* Retry while portal is busy */ 91234ff6846SIoana Radulescu do { 91334ff6846SIoana Radulescu err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 91434ff6846SIoana Radulescu ch->store); 91534ff6846SIoana Radulescu dequeues++; 91634ff6846SIoana Radulescu cpu_relax(); 91734ff6846SIoana Radulescu } while (err == -EBUSY); 91834ff6846SIoana Radulescu 91934ff6846SIoana Radulescu ch->stats.dequeue_portal_busy += dequeues; 92034ff6846SIoana Radulescu if (unlikely(err)) 92134ff6846SIoana Radulescu ch->stats.pull_err++; 92234ff6846SIoana Radulescu 92334ff6846SIoana Radulescu return err; 92434ff6846SIoana Radulescu } 92534ff6846SIoana Radulescu 92634ff6846SIoana Radulescu /* NAPI poll routine 92734ff6846SIoana Radulescu * 92834ff6846SIoana Radulescu * Frames are dequeued from the QMan channel associated with this NAPI context. 92934ff6846SIoana Radulescu * Rx, Tx confirmation and (if configured) Rx error frames all count 93034ff6846SIoana Radulescu * towards the NAPI budget. 93134ff6846SIoana Radulescu */ 93234ff6846SIoana Radulescu static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 93334ff6846SIoana Radulescu { 93434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 93534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 93668049a5fSIoana Ciocoi Radulescu int rx_cleaned = 0, txconf_cleaned = 0; 93785b7a342SIoana Ciornei enum dpaa2_eth_fq_type type = 0; 93868049a5fSIoana Ciocoi Radulescu int store_cleaned; 93934ff6846SIoana Radulescu int err; 94034ff6846SIoana Radulescu 94134ff6846SIoana Radulescu ch = container_of(napi, struct dpaa2_eth_channel, napi); 94234ff6846SIoana Radulescu priv = ch->priv; 94334ff6846SIoana Radulescu 94468049a5fSIoana Ciocoi Radulescu do { 94534ff6846SIoana Radulescu err = pull_channel(ch); 94634ff6846SIoana Radulescu if (unlikely(err)) 94734ff6846SIoana Radulescu break; 94834ff6846SIoana Radulescu 94934ff6846SIoana Radulescu /* Refill pool if appropriate */ 95034ff6846SIoana Radulescu refill_pool(priv, ch, priv->bpid); 95134ff6846SIoana Radulescu 95268049a5fSIoana Ciocoi Radulescu store_cleaned = consume_frames(ch, &type); 95368049a5fSIoana Ciocoi Radulescu if (type == DPAA2_RX_FQ) 95468049a5fSIoana Ciocoi Radulescu rx_cleaned += store_cleaned; 95568049a5fSIoana Ciocoi Radulescu else 95668049a5fSIoana Ciocoi Radulescu txconf_cleaned += store_cleaned; 95734ff6846SIoana Radulescu 95868049a5fSIoana Ciocoi Radulescu /* If we either consumed the whole NAPI budget with Rx frames 95968049a5fSIoana Ciocoi Radulescu * or we reached the Tx confirmations threshold, we're done. 96034ff6846SIoana Radulescu */ 96168049a5fSIoana Ciocoi Radulescu if (rx_cleaned >= budget || 96268049a5fSIoana Ciocoi Radulescu txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) 96368049a5fSIoana Ciocoi Radulescu return budget; 96468049a5fSIoana Ciocoi Radulescu } while (store_cleaned); 96534ff6846SIoana Radulescu 96668049a5fSIoana Ciocoi Radulescu /* We didn't consume the entire budget, so finish napi and 96768049a5fSIoana Ciocoi Radulescu * re-enable data availability notifications 96868049a5fSIoana Ciocoi Radulescu */ 96968049a5fSIoana Ciocoi Radulescu napi_complete_done(napi, rx_cleaned); 97034ff6846SIoana Radulescu do { 97134ff6846SIoana Radulescu err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 97234ff6846SIoana Radulescu cpu_relax(); 97334ff6846SIoana Radulescu } while (err == -EBUSY); 97434ff6846SIoana Radulescu WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 97534ff6846SIoana Radulescu ch->nctx.desired_cpu); 97634ff6846SIoana Radulescu 97768049a5fSIoana Ciocoi Radulescu return max(rx_cleaned, 1); 97834ff6846SIoana Radulescu } 97934ff6846SIoana Radulescu 98034ff6846SIoana Radulescu static void enable_ch_napi(struct dpaa2_eth_priv *priv) 98134ff6846SIoana Radulescu { 98234ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 98334ff6846SIoana Radulescu int i; 98434ff6846SIoana Radulescu 98534ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 98634ff6846SIoana Radulescu ch = priv->channel[i]; 98734ff6846SIoana Radulescu napi_enable(&ch->napi); 98834ff6846SIoana Radulescu } 98934ff6846SIoana Radulescu } 99034ff6846SIoana Radulescu 99134ff6846SIoana Radulescu static void disable_ch_napi(struct dpaa2_eth_priv *priv) 99234ff6846SIoana Radulescu { 99334ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 99434ff6846SIoana Radulescu int i; 99534ff6846SIoana Radulescu 99634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 99734ff6846SIoana Radulescu ch = priv->channel[i]; 99834ff6846SIoana Radulescu napi_disable(&ch->napi); 99934ff6846SIoana Radulescu } 100034ff6846SIoana Radulescu } 100134ff6846SIoana Radulescu 100234ff6846SIoana Radulescu static int link_state_update(struct dpaa2_eth_priv *priv) 100334ff6846SIoana Radulescu { 100485b7a342SIoana Ciornei struct dpni_link_state state = {0}; 100534ff6846SIoana Radulescu int err; 100634ff6846SIoana Radulescu 100734ff6846SIoana Radulescu err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 100834ff6846SIoana Radulescu if (unlikely(err)) { 100934ff6846SIoana Radulescu netdev_err(priv->net_dev, 101034ff6846SIoana Radulescu "dpni_get_link_state() failed\n"); 101134ff6846SIoana Radulescu return err; 101234ff6846SIoana Radulescu } 101334ff6846SIoana Radulescu 101434ff6846SIoana Radulescu /* Chech link state; speed / duplex changes are not treated yet */ 101534ff6846SIoana Radulescu if (priv->link_state.up == state.up) 101634ff6846SIoana Radulescu return 0; 101734ff6846SIoana Radulescu 101834ff6846SIoana Radulescu priv->link_state = state; 101934ff6846SIoana Radulescu if (state.up) { 102034ff6846SIoana Radulescu netif_carrier_on(priv->net_dev); 102134ff6846SIoana Radulescu netif_tx_start_all_queues(priv->net_dev); 102234ff6846SIoana Radulescu } else { 102334ff6846SIoana Radulescu netif_tx_stop_all_queues(priv->net_dev); 102434ff6846SIoana Radulescu netif_carrier_off(priv->net_dev); 102534ff6846SIoana Radulescu } 102634ff6846SIoana Radulescu 102734ff6846SIoana Radulescu netdev_info(priv->net_dev, "Link Event: state %s\n", 102834ff6846SIoana Radulescu state.up ? "up" : "down"); 102934ff6846SIoana Radulescu 103034ff6846SIoana Radulescu return 0; 103134ff6846SIoana Radulescu } 103234ff6846SIoana Radulescu 103334ff6846SIoana Radulescu static int dpaa2_eth_open(struct net_device *net_dev) 103434ff6846SIoana Radulescu { 103534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 103634ff6846SIoana Radulescu int err; 103734ff6846SIoana Radulescu 103834ff6846SIoana Radulescu err = seed_pool(priv, priv->bpid); 103934ff6846SIoana Radulescu if (err) { 104034ff6846SIoana Radulescu /* Not much to do; the buffer pool, though not filled up, 104134ff6846SIoana Radulescu * may still contain some buffers which would enable us 104234ff6846SIoana Radulescu * to limp on. 104334ff6846SIoana Radulescu */ 104434ff6846SIoana Radulescu netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 104534ff6846SIoana Radulescu priv->dpbp_dev->obj_desc.id, priv->bpid); 104634ff6846SIoana Radulescu } 104734ff6846SIoana Radulescu 104834ff6846SIoana Radulescu /* We'll only start the txqs when the link is actually ready; make sure 104934ff6846SIoana Radulescu * we don't race against the link up notification, which may come 105034ff6846SIoana Radulescu * immediately after dpni_enable(); 105134ff6846SIoana Radulescu */ 105234ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 105334ff6846SIoana Radulescu enable_ch_napi(priv); 105434ff6846SIoana Radulescu /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will 105534ff6846SIoana Radulescu * return true and cause 'ip link show' to report the LOWER_UP flag, 105634ff6846SIoana Radulescu * even though the link notification wasn't even received. 105734ff6846SIoana Radulescu */ 105834ff6846SIoana Radulescu netif_carrier_off(net_dev); 105934ff6846SIoana Radulescu 106034ff6846SIoana Radulescu err = dpni_enable(priv->mc_io, 0, priv->mc_token); 106134ff6846SIoana Radulescu if (err < 0) { 106234ff6846SIoana Radulescu netdev_err(net_dev, "dpni_enable() failed\n"); 106334ff6846SIoana Radulescu goto enable_err; 106434ff6846SIoana Radulescu } 106534ff6846SIoana Radulescu 106634ff6846SIoana Radulescu /* If the DPMAC object has already processed the link up interrupt, 106734ff6846SIoana Radulescu * we have to learn the link state ourselves. 106834ff6846SIoana Radulescu */ 106934ff6846SIoana Radulescu err = link_state_update(priv); 107034ff6846SIoana Radulescu if (err < 0) { 107134ff6846SIoana Radulescu netdev_err(net_dev, "Can't update link state\n"); 107234ff6846SIoana Radulescu goto link_state_err; 107334ff6846SIoana Radulescu } 107434ff6846SIoana Radulescu 107534ff6846SIoana Radulescu return 0; 107634ff6846SIoana Radulescu 107734ff6846SIoana Radulescu link_state_err: 107834ff6846SIoana Radulescu enable_err: 107934ff6846SIoana Radulescu disable_ch_napi(priv); 108034ff6846SIoana Radulescu drain_pool(priv); 108134ff6846SIoana Radulescu return err; 108234ff6846SIoana Radulescu } 108334ff6846SIoana Radulescu 108434ff6846SIoana Radulescu /* The DPIO store must be empty when we call this, 108534ff6846SIoana Radulescu * at the end of every NAPI cycle. 108634ff6846SIoana Radulescu */ 1087fdb6ca9eSIoana Ciornei static u32 drain_channel(struct dpaa2_eth_channel *ch) 108834ff6846SIoana Radulescu { 108934ff6846SIoana Radulescu u32 drained = 0, total = 0; 109034ff6846SIoana Radulescu 109134ff6846SIoana Radulescu do { 109234ff6846SIoana Radulescu pull_channel(ch); 109368049a5fSIoana Ciocoi Radulescu drained = consume_frames(ch, NULL); 109434ff6846SIoana Radulescu total += drained; 109534ff6846SIoana Radulescu } while (drained); 109634ff6846SIoana Radulescu 109734ff6846SIoana Radulescu return total; 109834ff6846SIoana Radulescu } 109934ff6846SIoana Radulescu 110034ff6846SIoana Radulescu static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) 110134ff6846SIoana Radulescu { 110234ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 110334ff6846SIoana Radulescu int i; 110434ff6846SIoana Radulescu u32 drained = 0; 110534ff6846SIoana Radulescu 110634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 110734ff6846SIoana Radulescu ch = priv->channel[i]; 1108fdb6ca9eSIoana Ciornei drained += drain_channel(ch); 110934ff6846SIoana Radulescu } 111034ff6846SIoana Radulescu 111134ff6846SIoana Radulescu return drained; 111234ff6846SIoana Radulescu } 111334ff6846SIoana Radulescu 111434ff6846SIoana Radulescu static int dpaa2_eth_stop(struct net_device *net_dev) 111534ff6846SIoana Radulescu { 111634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 111785b7a342SIoana Ciornei int dpni_enabled = 0; 111834ff6846SIoana Radulescu int retries = 10; 111934ff6846SIoana Radulescu u32 drained; 112034ff6846SIoana Radulescu 112134ff6846SIoana Radulescu netif_tx_stop_all_queues(net_dev); 112234ff6846SIoana Radulescu netif_carrier_off(net_dev); 112334ff6846SIoana Radulescu 112434ff6846SIoana Radulescu /* Loop while dpni_disable() attempts to drain the egress FQs 112534ff6846SIoana Radulescu * and confirm them back to us. 112634ff6846SIoana Radulescu */ 112734ff6846SIoana Radulescu do { 112834ff6846SIoana Radulescu dpni_disable(priv->mc_io, 0, priv->mc_token); 112934ff6846SIoana Radulescu dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 113034ff6846SIoana Radulescu if (dpni_enabled) 113134ff6846SIoana Radulescu /* Allow the hardware some slack */ 113234ff6846SIoana Radulescu msleep(100); 113334ff6846SIoana Radulescu } while (dpni_enabled && --retries); 113434ff6846SIoana Radulescu if (!retries) { 113534ff6846SIoana Radulescu netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 113634ff6846SIoana Radulescu /* Must go on and disable NAPI nonetheless, so we don't crash at 113734ff6846SIoana Radulescu * the next "ifconfig up" 113834ff6846SIoana Radulescu */ 113934ff6846SIoana Radulescu } 114034ff6846SIoana Radulescu 114134ff6846SIoana Radulescu /* Wait for NAPI to complete on every core and disable it. 114234ff6846SIoana Radulescu * In particular, this will also prevent NAPI from being rescheduled if 114334ff6846SIoana Radulescu * a new CDAN is serviced, effectively discarding the CDAN. We therefore 114434ff6846SIoana Radulescu * don't even need to disarm the channels, except perhaps for the case 114534ff6846SIoana Radulescu * of a huge coalescing value. 114634ff6846SIoana Radulescu */ 114734ff6846SIoana Radulescu disable_ch_napi(priv); 114834ff6846SIoana Radulescu 114934ff6846SIoana Radulescu /* Manually drain the Rx and TxConf queues */ 115034ff6846SIoana Radulescu drained = drain_ingress_frames(priv); 115134ff6846SIoana Radulescu if (drained) 115234ff6846SIoana Radulescu netdev_dbg(net_dev, "Drained %d frames.\n", drained); 115334ff6846SIoana Radulescu 115434ff6846SIoana Radulescu /* Empty the buffer pool */ 115534ff6846SIoana Radulescu drain_pool(priv); 115634ff6846SIoana Radulescu 115734ff6846SIoana Radulescu return 0; 115834ff6846SIoana Radulescu } 115934ff6846SIoana Radulescu 116034ff6846SIoana Radulescu static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 116134ff6846SIoana Radulescu { 116234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 116334ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 116434ff6846SIoana Radulescu int err; 116534ff6846SIoana Radulescu 116634ff6846SIoana Radulescu err = eth_mac_addr(net_dev, addr); 116734ff6846SIoana Radulescu if (err < 0) { 116834ff6846SIoana Radulescu dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 116934ff6846SIoana Radulescu return err; 117034ff6846SIoana Radulescu } 117134ff6846SIoana Radulescu 117234ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 117334ff6846SIoana Radulescu net_dev->dev_addr); 117434ff6846SIoana Radulescu if (err) { 117534ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 117634ff6846SIoana Radulescu return err; 117734ff6846SIoana Radulescu } 117834ff6846SIoana Radulescu 117934ff6846SIoana Radulescu return 0; 118034ff6846SIoana Radulescu } 118134ff6846SIoana Radulescu 118234ff6846SIoana Radulescu /** Fill in counters maintained by the GPP driver. These may be different from 118334ff6846SIoana Radulescu * the hardware counters obtained by ethtool. 118434ff6846SIoana Radulescu */ 118534ff6846SIoana Radulescu static void dpaa2_eth_get_stats(struct net_device *net_dev, 118634ff6846SIoana Radulescu struct rtnl_link_stats64 *stats) 118734ff6846SIoana Radulescu { 118834ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 118934ff6846SIoana Radulescu struct rtnl_link_stats64 *percpu_stats; 119034ff6846SIoana Radulescu u64 *cpustats; 119134ff6846SIoana Radulescu u64 *netstats = (u64 *)stats; 119234ff6846SIoana Radulescu int i, j; 119334ff6846SIoana Radulescu int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 119434ff6846SIoana Radulescu 119534ff6846SIoana Radulescu for_each_possible_cpu(i) { 119634ff6846SIoana Radulescu percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 119734ff6846SIoana Radulescu cpustats = (u64 *)percpu_stats; 119834ff6846SIoana Radulescu for (j = 0; j < num; j++) 119934ff6846SIoana Radulescu netstats[j] += cpustats[j]; 120034ff6846SIoana Radulescu } 120134ff6846SIoana Radulescu } 120234ff6846SIoana Radulescu 120334ff6846SIoana Radulescu /* Copy mac unicast addresses from @net_dev to @priv. 120434ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 120534ff6846SIoana Radulescu */ 120634ff6846SIoana Radulescu static void add_uc_hw_addr(const struct net_device *net_dev, 120734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 120834ff6846SIoana Radulescu { 120934ff6846SIoana Radulescu struct netdev_hw_addr *ha; 121034ff6846SIoana Radulescu int err; 121134ff6846SIoana Radulescu 121234ff6846SIoana Radulescu netdev_for_each_uc_addr(ha, net_dev) { 121334ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 121434ff6846SIoana Radulescu ha->addr); 121534ff6846SIoana Radulescu if (err) 121634ff6846SIoana Radulescu netdev_warn(priv->net_dev, 121734ff6846SIoana Radulescu "Could not add ucast MAC %pM to the filtering table (err %d)\n", 121834ff6846SIoana Radulescu ha->addr, err); 121934ff6846SIoana Radulescu } 122034ff6846SIoana Radulescu } 122134ff6846SIoana Radulescu 122234ff6846SIoana Radulescu /* Copy mac multicast addresses from @net_dev to @priv 122334ff6846SIoana Radulescu * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 122434ff6846SIoana Radulescu */ 122534ff6846SIoana Radulescu static void add_mc_hw_addr(const struct net_device *net_dev, 122634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv) 122734ff6846SIoana Radulescu { 122834ff6846SIoana Radulescu struct netdev_hw_addr *ha; 122934ff6846SIoana Radulescu int err; 123034ff6846SIoana Radulescu 123134ff6846SIoana Radulescu netdev_for_each_mc_addr(ha, net_dev) { 123234ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 123334ff6846SIoana Radulescu ha->addr); 123434ff6846SIoana Radulescu if (err) 123534ff6846SIoana Radulescu netdev_warn(priv->net_dev, 123634ff6846SIoana Radulescu "Could not add mcast MAC %pM to the filtering table (err %d)\n", 123734ff6846SIoana Radulescu ha->addr, err); 123834ff6846SIoana Radulescu } 123934ff6846SIoana Radulescu } 124034ff6846SIoana Radulescu 124134ff6846SIoana Radulescu static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 124234ff6846SIoana Radulescu { 124334ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 124434ff6846SIoana Radulescu int uc_count = netdev_uc_count(net_dev); 124534ff6846SIoana Radulescu int mc_count = netdev_mc_count(net_dev); 124634ff6846SIoana Radulescu u8 max_mac = priv->dpni_attrs.mac_filter_entries; 124734ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 124834ff6846SIoana Radulescu u16 mc_token = priv->mc_token; 124934ff6846SIoana Radulescu struct fsl_mc_io *mc_io = priv->mc_io; 125034ff6846SIoana Radulescu int err; 125134ff6846SIoana Radulescu 125234ff6846SIoana Radulescu /* Basic sanity checks; these probably indicate a misconfiguration */ 125334ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 125434ff6846SIoana Radulescu netdev_info(net_dev, 125534ff6846SIoana Radulescu "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 125634ff6846SIoana Radulescu max_mac); 125734ff6846SIoana Radulescu 125834ff6846SIoana Radulescu /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 125934ff6846SIoana Radulescu if (uc_count > max_mac) { 126034ff6846SIoana Radulescu netdev_info(net_dev, 126134ff6846SIoana Radulescu "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 126234ff6846SIoana Radulescu uc_count, max_mac); 126334ff6846SIoana Radulescu goto force_promisc; 126434ff6846SIoana Radulescu } 126534ff6846SIoana Radulescu if (mc_count + uc_count > max_mac) { 126634ff6846SIoana Radulescu netdev_info(net_dev, 126734ff6846SIoana Radulescu "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 126834ff6846SIoana Radulescu uc_count + mc_count, max_mac); 126934ff6846SIoana Radulescu goto force_mc_promisc; 127034ff6846SIoana Radulescu } 127134ff6846SIoana Radulescu 127234ff6846SIoana Radulescu /* Adjust promisc settings due to flag combinations */ 127334ff6846SIoana Radulescu if (net_dev->flags & IFF_PROMISC) 127434ff6846SIoana Radulescu goto force_promisc; 127534ff6846SIoana Radulescu if (net_dev->flags & IFF_ALLMULTI) { 127634ff6846SIoana Radulescu /* First, rebuild unicast filtering table. This should be done 127734ff6846SIoana Radulescu * in promisc mode, in order to avoid frame loss while we 127834ff6846SIoana Radulescu * progressively add entries to the table. 127934ff6846SIoana Radulescu * We don't know whether we had been in promisc already, and 128034ff6846SIoana Radulescu * making an MC call to find out is expensive; so set uc promisc 128134ff6846SIoana Radulescu * nonetheless. 128234ff6846SIoana Radulescu */ 128334ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 128434ff6846SIoana Radulescu if (err) 128534ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc\n"); 128634ff6846SIoana Radulescu 128734ff6846SIoana Radulescu /* Actual uc table reconstruction. */ 128834ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 128934ff6846SIoana Radulescu if (err) 129034ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc filters\n"); 129134ff6846SIoana Radulescu add_uc_hw_addr(net_dev, priv); 129234ff6846SIoana Radulescu 129334ff6846SIoana Radulescu /* Finally, clear uc promisc and set mc promisc as requested. */ 129434ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 129534ff6846SIoana Radulescu if (err) 129634ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear uc promisc\n"); 129734ff6846SIoana Radulescu goto force_mc_promisc; 129834ff6846SIoana Radulescu } 129934ff6846SIoana Radulescu 130034ff6846SIoana Radulescu /* Neither unicast, nor multicast promisc will be on... eventually. 130134ff6846SIoana Radulescu * For now, rebuild mac filtering tables while forcing both of them on. 130234ff6846SIoana Radulescu */ 130334ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 130434ff6846SIoana Radulescu if (err) 130534ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 130634ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 130734ff6846SIoana Radulescu if (err) 130834ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 130934ff6846SIoana Radulescu 131034ff6846SIoana Radulescu /* Actual mac filtering tables reconstruction */ 131134ff6846SIoana Radulescu err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 131234ff6846SIoana Radulescu if (err) 131334ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mac filters\n"); 131434ff6846SIoana Radulescu add_mc_hw_addr(net_dev, priv); 131534ff6846SIoana Radulescu add_uc_hw_addr(net_dev, priv); 131634ff6846SIoana Radulescu 131734ff6846SIoana Radulescu /* Now we can clear both ucast and mcast promisc, without risking 131834ff6846SIoana Radulescu * to drop legitimate frames anymore. 131934ff6846SIoana Radulescu */ 132034ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 132134ff6846SIoana Radulescu if (err) 132234ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear ucast promisc\n"); 132334ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 132434ff6846SIoana Radulescu if (err) 132534ff6846SIoana Radulescu netdev_warn(net_dev, "Can't clear mcast promisc\n"); 132634ff6846SIoana Radulescu 132734ff6846SIoana Radulescu return; 132834ff6846SIoana Radulescu 132934ff6846SIoana Radulescu force_promisc: 133034ff6846SIoana Radulescu err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 133134ff6846SIoana Radulescu if (err) 133234ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set ucast promisc\n"); 133334ff6846SIoana Radulescu force_mc_promisc: 133434ff6846SIoana Radulescu err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 133534ff6846SIoana Radulescu if (err) 133634ff6846SIoana Radulescu netdev_warn(net_dev, "Can't set mcast promisc\n"); 133734ff6846SIoana Radulescu } 133834ff6846SIoana Radulescu 133934ff6846SIoana Radulescu static int dpaa2_eth_set_features(struct net_device *net_dev, 134034ff6846SIoana Radulescu netdev_features_t features) 134134ff6846SIoana Radulescu { 134234ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 134334ff6846SIoana Radulescu netdev_features_t changed = features ^ net_dev->features; 134434ff6846SIoana Radulescu bool enable; 134534ff6846SIoana Radulescu int err; 134634ff6846SIoana Radulescu 134734ff6846SIoana Radulescu if (changed & NETIF_F_RXCSUM) { 134834ff6846SIoana Radulescu enable = !!(features & NETIF_F_RXCSUM); 134934ff6846SIoana Radulescu err = set_rx_csum(priv, enable); 135034ff6846SIoana Radulescu if (err) 135134ff6846SIoana Radulescu return err; 135234ff6846SIoana Radulescu } 135334ff6846SIoana Radulescu 135434ff6846SIoana Radulescu if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 135534ff6846SIoana Radulescu enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 135634ff6846SIoana Radulescu err = set_tx_csum(priv, enable); 135734ff6846SIoana Radulescu if (err) 135834ff6846SIoana Radulescu return err; 135934ff6846SIoana Radulescu } 136034ff6846SIoana Radulescu 136134ff6846SIoana Radulescu return 0; 136234ff6846SIoana Radulescu } 136334ff6846SIoana Radulescu 136434ff6846SIoana Radulescu static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 136534ff6846SIoana Radulescu { 136634ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(dev); 136734ff6846SIoana Radulescu struct hwtstamp_config config; 136834ff6846SIoana Radulescu 136934ff6846SIoana Radulescu if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 137034ff6846SIoana Radulescu return -EFAULT; 137134ff6846SIoana Radulescu 137234ff6846SIoana Radulescu switch (config.tx_type) { 137334ff6846SIoana Radulescu case HWTSTAMP_TX_OFF: 137434ff6846SIoana Radulescu priv->tx_tstamp = false; 137534ff6846SIoana Radulescu break; 137634ff6846SIoana Radulescu case HWTSTAMP_TX_ON: 137734ff6846SIoana Radulescu priv->tx_tstamp = true; 137834ff6846SIoana Radulescu break; 137934ff6846SIoana Radulescu default: 138034ff6846SIoana Radulescu return -ERANGE; 138134ff6846SIoana Radulescu } 138234ff6846SIoana Radulescu 138334ff6846SIoana Radulescu if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 138434ff6846SIoana Radulescu priv->rx_tstamp = false; 138534ff6846SIoana Radulescu } else { 138634ff6846SIoana Radulescu priv->rx_tstamp = true; 138734ff6846SIoana Radulescu /* TS is set for all frame types, not only those requested */ 138834ff6846SIoana Radulescu config.rx_filter = HWTSTAMP_FILTER_ALL; 138934ff6846SIoana Radulescu } 139034ff6846SIoana Radulescu 139134ff6846SIoana Radulescu return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 139234ff6846SIoana Radulescu -EFAULT : 0; 139334ff6846SIoana Radulescu } 139434ff6846SIoana Radulescu 139534ff6846SIoana Radulescu static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 139634ff6846SIoana Radulescu { 139734ff6846SIoana Radulescu if (cmd == SIOCSHWTSTAMP) 139834ff6846SIoana Radulescu return dpaa2_eth_ts_ioctl(dev, rq, cmd); 139934ff6846SIoana Radulescu 140034ff6846SIoana Radulescu return -EINVAL; 140134ff6846SIoana Radulescu } 140234ff6846SIoana Radulescu 140334ff6846SIoana Radulescu static const struct net_device_ops dpaa2_eth_ops = { 140434ff6846SIoana Radulescu .ndo_open = dpaa2_eth_open, 140534ff6846SIoana Radulescu .ndo_start_xmit = dpaa2_eth_tx, 140634ff6846SIoana Radulescu .ndo_stop = dpaa2_eth_stop, 140734ff6846SIoana Radulescu .ndo_set_mac_address = dpaa2_eth_set_addr, 140834ff6846SIoana Radulescu .ndo_get_stats64 = dpaa2_eth_get_stats, 140934ff6846SIoana Radulescu .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 141034ff6846SIoana Radulescu .ndo_set_features = dpaa2_eth_set_features, 141134ff6846SIoana Radulescu .ndo_do_ioctl = dpaa2_eth_ioctl, 141234ff6846SIoana Radulescu }; 141334ff6846SIoana Radulescu 141434ff6846SIoana Radulescu static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) 141534ff6846SIoana Radulescu { 141634ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 141734ff6846SIoana Radulescu 141834ff6846SIoana Radulescu ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 141934ff6846SIoana Radulescu 142034ff6846SIoana Radulescu /* Update NAPI statistics */ 142134ff6846SIoana Radulescu ch->stats.cdan++; 142234ff6846SIoana Radulescu 142334ff6846SIoana Radulescu napi_schedule_irqoff(&ch->napi); 142434ff6846SIoana Radulescu } 142534ff6846SIoana Radulescu 142634ff6846SIoana Radulescu /* Allocate and configure a DPCON object */ 142734ff6846SIoana Radulescu static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) 142834ff6846SIoana Radulescu { 142934ff6846SIoana Radulescu struct fsl_mc_device *dpcon; 143034ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 143134ff6846SIoana Radulescu struct dpcon_attr attrs; 143234ff6846SIoana Radulescu int err; 143334ff6846SIoana Radulescu 143434ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 143534ff6846SIoana Radulescu FSL_MC_POOL_DPCON, &dpcon); 143634ff6846SIoana Radulescu if (err) { 143734ff6846SIoana Radulescu dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 143834ff6846SIoana Radulescu return NULL; 143934ff6846SIoana Radulescu } 144034ff6846SIoana Radulescu 144134ff6846SIoana Radulescu err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 144234ff6846SIoana Radulescu if (err) { 144334ff6846SIoana Radulescu dev_err(dev, "dpcon_open() failed\n"); 144434ff6846SIoana Radulescu goto free; 144534ff6846SIoana Radulescu } 144634ff6846SIoana Radulescu 144734ff6846SIoana Radulescu err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 144834ff6846SIoana Radulescu if (err) { 144934ff6846SIoana Radulescu dev_err(dev, "dpcon_reset() failed\n"); 145034ff6846SIoana Radulescu goto close; 145134ff6846SIoana Radulescu } 145234ff6846SIoana Radulescu 145334ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); 145434ff6846SIoana Radulescu if (err) { 145534ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 145634ff6846SIoana Radulescu goto close; 145734ff6846SIoana Radulescu } 145834ff6846SIoana Radulescu 145934ff6846SIoana Radulescu err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 146034ff6846SIoana Radulescu if (err) { 146134ff6846SIoana Radulescu dev_err(dev, "dpcon_enable() failed\n"); 146234ff6846SIoana Radulescu goto close; 146334ff6846SIoana Radulescu } 146434ff6846SIoana Radulescu 146534ff6846SIoana Radulescu return dpcon; 146634ff6846SIoana Radulescu 146734ff6846SIoana Radulescu close: 146834ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 146934ff6846SIoana Radulescu free: 147034ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 147134ff6846SIoana Radulescu 147234ff6846SIoana Radulescu return NULL; 147334ff6846SIoana Radulescu } 147434ff6846SIoana Radulescu 147534ff6846SIoana Radulescu static void free_dpcon(struct dpaa2_eth_priv *priv, 147634ff6846SIoana Radulescu struct fsl_mc_device *dpcon) 147734ff6846SIoana Radulescu { 147834ff6846SIoana Radulescu dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 147934ff6846SIoana Radulescu dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 148034ff6846SIoana Radulescu fsl_mc_object_free(dpcon); 148134ff6846SIoana Radulescu } 148234ff6846SIoana Radulescu 148334ff6846SIoana Radulescu static struct dpaa2_eth_channel * 148434ff6846SIoana Radulescu alloc_channel(struct dpaa2_eth_priv *priv) 148534ff6846SIoana Radulescu { 148634ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 148734ff6846SIoana Radulescu struct dpcon_attr attr; 148834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 148934ff6846SIoana Radulescu int err; 149034ff6846SIoana Radulescu 149134ff6846SIoana Radulescu channel = kzalloc(sizeof(*channel), GFP_KERNEL); 149234ff6846SIoana Radulescu if (!channel) 149334ff6846SIoana Radulescu return NULL; 149434ff6846SIoana Radulescu 149534ff6846SIoana Radulescu channel->dpcon = setup_dpcon(priv); 149634ff6846SIoana Radulescu if (!channel->dpcon) 149734ff6846SIoana Radulescu goto err_setup; 149834ff6846SIoana Radulescu 149934ff6846SIoana Radulescu err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 150034ff6846SIoana Radulescu &attr); 150134ff6846SIoana Radulescu if (err) { 150234ff6846SIoana Radulescu dev_err(dev, "dpcon_get_attributes() failed\n"); 150334ff6846SIoana Radulescu goto err_get_attr; 150434ff6846SIoana Radulescu } 150534ff6846SIoana Radulescu 150634ff6846SIoana Radulescu channel->dpcon_id = attr.id; 150734ff6846SIoana Radulescu channel->ch_id = attr.qbman_ch_id; 150834ff6846SIoana Radulescu channel->priv = priv; 150934ff6846SIoana Radulescu 151034ff6846SIoana Radulescu return channel; 151134ff6846SIoana Radulescu 151234ff6846SIoana Radulescu err_get_attr: 151334ff6846SIoana Radulescu free_dpcon(priv, channel->dpcon); 151434ff6846SIoana Radulescu err_setup: 151534ff6846SIoana Radulescu kfree(channel); 151634ff6846SIoana Radulescu return NULL; 151734ff6846SIoana Radulescu } 151834ff6846SIoana Radulescu 151934ff6846SIoana Radulescu static void free_channel(struct dpaa2_eth_priv *priv, 152034ff6846SIoana Radulescu struct dpaa2_eth_channel *channel) 152134ff6846SIoana Radulescu { 152234ff6846SIoana Radulescu free_dpcon(priv, channel->dpcon); 152334ff6846SIoana Radulescu kfree(channel); 152434ff6846SIoana Radulescu } 152534ff6846SIoana Radulescu 152634ff6846SIoana Radulescu /* DPIO setup: allocate and configure QBMan channels, setup core affinity 152734ff6846SIoana Radulescu * and register data availability notifications 152834ff6846SIoana Radulescu */ 152934ff6846SIoana Radulescu static int setup_dpio(struct dpaa2_eth_priv *priv) 153034ff6846SIoana Radulescu { 153134ff6846SIoana Radulescu struct dpaa2_io_notification_ctx *nctx; 153234ff6846SIoana Radulescu struct dpaa2_eth_channel *channel; 153334ff6846SIoana Radulescu struct dpcon_notification_cfg dpcon_notif_cfg; 153434ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 153534ff6846SIoana Radulescu int i, err; 153634ff6846SIoana Radulescu 153734ff6846SIoana Radulescu /* We want the ability to spread ingress traffic (RX, TX conf) to as 153834ff6846SIoana Radulescu * many cores as possible, so we need one channel for each core 153934ff6846SIoana Radulescu * (unless there's fewer queues than cores, in which case the extra 154034ff6846SIoana Radulescu * channels would be wasted). 154134ff6846SIoana Radulescu * Allocate one channel per core and register it to the core's 154234ff6846SIoana Radulescu * affine DPIO. If not enough channels are available for all cores 154334ff6846SIoana Radulescu * or if some cores don't have an affine DPIO, there will be no 154434ff6846SIoana Radulescu * ingress frame processing on those cores. 154534ff6846SIoana Radulescu */ 154634ff6846SIoana Radulescu cpumask_clear(&priv->dpio_cpumask); 154734ff6846SIoana Radulescu for_each_online_cpu(i) { 154834ff6846SIoana Radulescu /* Try to allocate a channel */ 154934ff6846SIoana Radulescu channel = alloc_channel(priv); 155034ff6846SIoana Radulescu if (!channel) { 155134ff6846SIoana Radulescu dev_info(dev, 155234ff6846SIoana Radulescu "No affine channel for cpu %d and above\n", i); 155334ff6846SIoana Radulescu err = -ENODEV; 155434ff6846SIoana Radulescu goto err_alloc_ch; 155534ff6846SIoana Radulescu } 155634ff6846SIoana Radulescu 155734ff6846SIoana Radulescu priv->channel[priv->num_channels] = channel; 155834ff6846SIoana Radulescu 155934ff6846SIoana Radulescu nctx = &channel->nctx; 156034ff6846SIoana Radulescu nctx->is_cdan = 1; 156134ff6846SIoana Radulescu nctx->cb = cdan_cb; 156234ff6846SIoana Radulescu nctx->id = channel->ch_id; 156334ff6846SIoana Radulescu nctx->desired_cpu = i; 156434ff6846SIoana Radulescu 156534ff6846SIoana Radulescu /* Register the new context */ 156634ff6846SIoana Radulescu channel->dpio = dpaa2_io_service_select(i); 156734ff6846SIoana Radulescu err = dpaa2_io_service_register(channel->dpio, nctx); 156834ff6846SIoana Radulescu if (err) { 156934ff6846SIoana Radulescu dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 157034ff6846SIoana Radulescu /* If no affine DPIO for this core, there's probably 157134ff6846SIoana Radulescu * none available for next cores either. Signal we want 157234ff6846SIoana Radulescu * to retry later, in case the DPIO devices weren't 157334ff6846SIoana Radulescu * probed yet. 157434ff6846SIoana Radulescu */ 157534ff6846SIoana Radulescu err = -EPROBE_DEFER; 157634ff6846SIoana Radulescu goto err_service_reg; 157734ff6846SIoana Radulescu } 157834ff6846SIoana Radulescu 157934ff6846SIoana Radulescu /* Register DPCON notification with MC */ 158034ff6846SIoana Radulescu dpcon_notif_cfg.dpio_id = nctx->dpio_id; 158134ff6846SIoana Radulescu dpcon_notif_cfg.priority = 0; 158234ff6846SIoana Radulescu dpcon_notif_cfg.user_ctx = nctx->qman64; 158334ff6846SIoana Radulescu err = dpcon_set_notification(priv->mc_io, 0, 158434ff6846SIoana Radulescu channel->dpcon->mc_handle, 158534ff6846SIoana Radulescu &dpcon_notif_cfg); 158634ff6846SIoana Radulescu if (err) { 158734ff6846SIoana Radulescu dev_err(dev, "dpcon_set_notification failed()\n"); 158834ff6846SIoana Radulescu goto err_set_cdan; 158934ff6846SIoana Radulescu } 159034ff6846SIoana Radulescu 159134ff6846SIoana Radulescu /* If we managed to allocate a channel and also found an affine 159234ff6846SIoana Radulescu * DPIO for this core, add it to the final mask 159334ff6846SIoana Radulescu */ 159434ff6846SIoana Radulescu cpumask_set_cpu(i, &priv->dpio_cpumask); 159534ff6846SIoana Radulescu priv->num_channels++; 159634ff6846SIoana Radulescu 159734ff6846SIoana Radulescu /* Stop if we already have enough channels to accommodate all 159834ff6846SIoana Radulescu * RX and TX conf queues 159934ff6846SIoana Radulescu */ 160034ff6846SIoana Radulescu if (priv->num_channels == dpaa2_eth_queue_count(priv)) 160134ff6846SIoana Radulescu break; 160234ff6846SIoana Radulescu } 160334ff6846SIoana Radulescu 160434ff6846SIoana Radulescu return 0; 160534ff6846SIoana Radulescu 160634ff6846SIoana Radulescu err_set_cdan: 160734ff6846SIoana Radulescu dpaa2_io_service_deregister(channel->dpio, nctx); 160834ff6846SIoana Radulescu err_service_reg: 160934ff6846SIoana Radulescu free_channel(priv, channel); 161034ff6846SIoana Radulescu err_alloc_ch: 161134ff6846SIoana Radulescu if (cpumask_empty(&priv->dpio_cpumask)) { 161234ff6846SIoana Radulescu dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 161334ff6846SIoana Radulescu return err; 161434ff6846SIoana Radulescu } 161534ff6846SIoana Radulescu 161634ff6846SIoana Radulescu dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 161734ff6846SIoana Radulescu cpumask_pr_args(&priv->dpio_cpumask)); 161834ff6846SIoana Radulescu 161934ff6846SIoana Radulescu return 0; 162034ff6846SIoana Radulescu } 162134ff6846SIoana Radulescu 162234ff6846SIoana Radulescu static void free_dpio(struct dpaa2_eth_priv *priv) 162334ff6846SIoana Radulescu { 162434ff6846SIoana Radulescu int i; 162534ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 162634ff6846SIoana Radulescu 162734ff6846SIoana Radulescu /* deregister CDAN notifications and free channels */ 162834ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 162934ff6846SIoana Radulescu ch = priv->channel[i]; 163034ff6846SIoana Radulescu dpaa2_io_service_deregister(ch->dpio, &ch->nctx); 163134ff6846SIoana Radulescu free_channel(priv, ch); 163234ff6846SIoana Radulescu } 163334ff6846SIoana Radulescu } 163434ff6846SIoana Radulescu 163534ff6846SIoana Radulescu static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, 163634ff6846SIoana Radulescu int cpu) 163734ff6846SIoana Radulescu { 163834ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 163934ff6846SIoana Radulescu int i; 164034ff6846SIoana Radulescu 164134ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 164234ff6846SIoana Radulescu if (priv->channel[i]->nctx.desired_cpu == cpu) 164334ff6846SIoana Radulescu return priv->channel[i]; 164434ff6846SIoana Radulescu 164534ff6846SIoana Radulescu /* We should never get here. Issue a warning and return 164634ff6846SIoana Radulescu * the first channel, because it's still better than nothing 164734ff6846SIoana Radulescu */ 164834ff6846SIoana Radulescu dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 164934ff6846SIoana Radulescu 165034ff6846SIoana Radulescu return priv->channel[0]; 165134ff6846SIoana Radulescu } 165234ff6846SIoana Radulescu 165334ff6846SIoana Radulescu static void set_fq_affinity(struct dpaa2_eth_priv *priv) 165434ff6846SIoana Radulescu { 165534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 165634ff6846SIoana Radulescu struct cpumask xps_mask; 165734ff6846SIoana Radulescu struct dpaa2_eth_fq *fq; 165834ff6846SIoana Radulescu int rx_cpu, txc_cpu; 165934ff6846SIoana Radulescu int i, err; 166034ff6846SIoana Radulescu 166134ff6846SIoana Radulescu /* For each FQ, pick one channel/CPU to deliver frames to. 166234ff6846SIoana Radulescu * This may well change at runtime, either through irqbalance or 166334ff6846SIoana Radulescu * through direct user intervention. 166434ff6846SIoana Radulescu */ 166534ff6846SIoana Radulescu rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 166634ff6846SIoana Radulescu 166734ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 166834ff6846SIoana Radulescu fq = &priv->fq[i]; 166934ff6846SIoana Radulescu switch (fq->type) { 167034ff6846SIoana Radulescu case DPAA2_RX_FQ: 167134ff6846SIoana Radulescu fq->target_cpu = rx_cpu; 167234ff6846SIoana Radulescu rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 167334ff6846SIoana Radulescu if (rx_cpu >= nr_cpu_ids) 167434ff6846SIoana Radulescu rx_cpu = cpumask_first(&priv->dpio_cpumask); 167534ff6846SIoana Radulescu break; 167634ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 167734ff6846SIoana Radulescu fq->target_cpu = txc_cpu; 167834ff6846SIoana Radulescu 167934ff6846SIoana Radulescu /* Tell the stack to affine to txc_cpu the Tx queue 168034ff6846SIoana Radulescu * associated with the confirmation one 168134ff6846SIoana Radulescu */ 168234ff6846SIoana Radulescu cpumask_clear(&xps_mask); 168334ff6846SIoana Radulescu cpumask_set_cpu(txc_cpu, &xps_mask); 168434ff6846SIoana Radulescu err = netif_set_xps_queue(priv->net_dev, &xps_mask, 168534ff6846SIoana Radulescu fq->flowid); 168634ff6846SIoana Radulescu if (err) 168734ff6846SIoana Radulescu dev_err(dev, "Error setting XPS queue\n"); 168834ff6846SIoana Radulescu 168934ff6846SIoana Radulescu txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 169034ff6846SIoana Radulescu if (txc_cpu >= nr_cpu_ids) 169134ff6846SIoana Radulescu txc_cpu = cpumask_first(&priv->dpio_cpumask); 169234ff6846SIoana Radulescu break; 169334ff6846SIoana Radulescu default: 169434ff6846SIoana Radulescu dev_err(dev, "Unknown FQ type: %d\n", fq->type); 169534ff6846SIoana Radulescu } 169634ff6846SIoana Radulescu fq->channel = get_affine_channel(priv, fq->target_cpu); 169734ff6846SIoana Radulescu } 169834ff6846SIoana Radulescu } 169934ff6846SIoana Radulescu 170034ff6846SIoana Radulescu static void setup_fqs(struct dpaa2_eth_priv *priv) 170134ff6846SIoana Radulescu { 170234ff6846SIoana Radulescu int i; 170334ff6846SIoana Radulescu 170434ff6846SIoana Radulescu /* We have one TxConf FQ per Tx flow. 170534ff6846SIoana Radulescu * The number of Tx and Rx queues is the same. 170634ff6846SIoana Radulescu * Tx queues come first in the fq array. 170734ff6846SIoana Radulescu */ 170834ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 170934ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 171034ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 171134ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 171234ff6846SIoana Radulescu } 171334ff6846SIoana Radulescu 171434ff6846SIoana Radulescu for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 171534ff6846SIoana Radulescu priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 171634ff6846SIoana Radulescu priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 171734ff6846SIoana Radulescu priv->fq[priv->num_fqs++].flowid = (u16)i; 171834ff6846SIoana Radulescu } 171934ff6846SIoana Radulescu 172034ff6846SIoana Radulescu /* For each FQ, decide on which core to process incoming frames */ 172134ff6846SIoana Radulescu set_fq_affinity(priv); 172234ff6846SIoana Radulescu } 172334ff6846SIoana Radulescu 172434ff6846SIoana Radulescu /* Allocate and configure one buffer pool for each interface */ 172534ff6846SIoana Radulescu static int setup_dpbp(struct dpaa2_eth_priv *priv) 172634ff6846SIoana Radulescu { 172734ff6846SIoana Radulescu int err; 172834ff6846SIoana Radulescu struct fsl_mc_device *dpbp_dev; 172934ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 173034ff6846SIoana Radulescu struct dpbp_attr dpbp_attrs; 173134ff6846SIoana Radulescu 173234ff6846SIoana Radulescu err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 173334ff6846SIoana Radulescu &dpbp_dev); 173434ff6846SIoana Radulescu if (err) { 173534ff6846SIoana Radulescu dev_err(dev, "DPBP device allocation failed\n"); 173634ff6846SIoana Radulescu return err; 173734ff6846SIoana Radulescu } 173834ff6846SIoana Radulescu 173934ff6846SIoana Radulescu priv->dpbp_dev = dpbp_dev; 174034ff6846SIoana Radulescu 174134ff6846SIoana Radulescu err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 174234ff6846SIoana Radulescu &dpbp_dev->mc_handle); 174334ff6846SIoana Radulescu if (err) { 174434ff6846SIoana Radulescu dev_err(dev, "dpbp_open() failed\n"); 174534ff6846SIoana Radulescu goto err_open; 174634ff6846SIoana Radulescu } 174734ff6846SIoana Radulescu 174834ff6846SIoana Radulescu err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 174934ff6846SIoana Radulescu if (err) { 175034ff6846SIoana Radulescu dev_err(dev, "dpbp_reset() failed\n"); 175134ff6846SIoana Radulescu goto err_reset; 175234ff6846SIoana Radulescu } 175334ff6846SIoana Radulescu 175434ff6846SIoana Radulescu err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 175534ff6846SIoana Radulescu if (err) { 175634ff6846SIoana Radulescu dev_err(dev, "dpbp_enable() failed\n"); 175734ff6846SIoana Radulescu goto err_enable; 175834ff6846SIoana Radulescu } 175934ff6846SIoana Radulescu 176034ff6846SIoana Radulescu err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 176134ff6846SIoana Radulescu &dpbp_attrs); 176234ff6846SIoana Radulescu if (err) { 176334ff6846SIoana Radulescu dev_err(dev, "dpbp_get_attributes() failed\n"); 176434ff6846SIoana Radulescu goto err_get_attr; 176534ff6846SIoana Radulescu } 176634ff6846SIoana Radulescu priv->bpid = dpbp_attrs.bpid; 176734ff6846SIoana Radulescu 176834ff6846SIoana Radulescu return 0; 176934ff6846SIoana Radulescu 177034ff6846SIoana Radulescu err_get_attr: 177134ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 177234ff6846SIoana Radulescu err_enable: 177334ff6846SIoana Radulescu err_reset: 177434ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 177534ff6846SIoana Radulescu err_open: 177634ff6846SIoana Radulescu fsl_mc_object_free(dpbp_dev); 177734ff6846SIoana Radulescu 177834ff6846SIoana Radulescu return err; 177934ff6846SIoana Radulescu } 178034ff6846SIoana Radulescu 178134ff6846SIoana Radulescu static void free_dpbp(struct dpaa2_eth_priv *priv) 178234ff6846SIoana Radulescu { 178334ff6846SIoana Radulescu drain_pool(priv); 178434ff6846SIoana Radulescu dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 178534ff6846SIoana Radulescu dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 178634ff6846SIoana Radulescu fsl_mc_object_free(priv->dpbp_dev); 178734ff6846SIoana Radulescu } 178834ff6846SIoana Radulescu 178934ff6846SIoana Radulescu static int set_buffer_layout(struct dpaa2_eth_priv *priv) 179034ff6846SIoana Radulescu { 179134ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 179234ff6846SIoana Radulescu struct dpni_buffer_layout buf_layout = {0}; 179334ff6846SIoana Radulescu int err; 179434ff6846SIoana Radulescu 179534ff6846SIoana Radulescu /* We need to check for WRIOP version 1.0.0, but depending on the MC 179634ff6846SIoana Radulescu * version, this number is not always provided correctly on rev1. 179734ff6846SIoana Radulescu * We need to check for both alternatives in this situation. 179834ff6846SIoana Radulescu */ 179934ff6846SIoana Radulescu if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 180034ff6846SIoana Radulescu priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 180134ff6846SIoana Radulescu priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 180234ff6846SIoana Radulescu else 180334ff6846SIoana Radulescu priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 180434ff6846SIoana Radulescu 180534ff6846SIoana Radulescu /* tx buffer */ 180634ff6846SIoana Radulescu buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 180734ff6846SIoana Radulescu buf_layout.pass_timestamp = true; 180834ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 180934ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 181034ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 181134ff6846SIoana Radulescu DPNI_QUEUE_TX, &buf_layout); 181234ff6846SIoana Radulescu if (err) { 181334ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 181434ff6846SIoana Radulescu return err; 181534ff6846SIoana Radulescu } 181634ff6846SIoana Radulescu 181734ff6846SIoana Radulescu /* tx-confirm buffer */ 181834ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 181934ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 182034ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, &buf_layout); 182134ff6846SIoana Radulescu if (err) { 182234ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 182334ff6846SIoana Radulescu return err; 182434ff6846SIoana Radulescu } 182534ff6846SIoana Radulescu 182634ff6846SIoana Radulescu /* Now that we've set our tx buffer layout, retrieve the minimum 182734ff6846SIoana Radulescu * required tx data offset. 182834ff6846SIoana Radulescu */ 182934ff6846SIoana Radulescu err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 183034ff6846SIoana Radulescu &priv->tx_data_offset); 183134ff6846SIoana Radulescu if (err) { 183234ff6846SIoana Radulescu dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 183334ff6846SIoana Radulescu return err; 183434ff6846SIoana Radulescu } 183534ff6846SIoana Radulescu 183634ff6846SIoana Radulescu if ((priv->tx_data_offset % 64) != 0) 183734ff6846SIoana Radulescu dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 183834ff6846SIoana Radulescu priv->tx_data_offset); 183934ff6846SIoana Radulescu 184034ff6846SIoana Radulescu /* rx buffer */ 184134ff6846SIoana Radulescu buf_layout.pass_frame_status = true; 184234ff6846SIoana Radulescu buf_layout.pass_parser_result = true; 184334ff6846SIoana Radulescu buf_layout.data_align = priv->rx_buf_align; 184434ff6846SIoana Radulescu buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 184534ff6846SIoana Radulescu buf_layout.private_data_size = 0; 184634ff6846SIoana Radulescu buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 184734ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 184834ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 184934ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 185034ff6846SIoana Radulescu DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 185134ff6846SIoana Radulescu err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 185234ff6846SIoana Radulescu DPNI_QUEUE_RX, &buf_layout); 185334ff6846SIoana Radulescu if (err) { 185434ff6846SIoana Radulescu dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 185534ff6846SIoana Radulescu return err; 185634ff6846SIoana Radulescu } 185734ff6846SIoana Radulescu 185834ff6846SIoana Radulescu return 0; 185934ff6846SIoana Radulescu } 186034ff6846SIoana Radulescu 186134ff6846SIoana Radulescu /* Configure the DPNI object this interface is associated with */ 186234ff6846SIoana Radulescu static int setup_dpni(struct fsl_mc_device *ls_dev) 186334ff6846SIoana Radulescu { 186434ff6846SIoana Radulescu struct device *dev = &ls_dev->dev; 186534ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 186634ff6846SIoana Radulescu struct net_device *net_dev; 186734ff6846SIoana Radulescu int err; 186834ff6846SIoana Radulescu 186934ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 187034ff6846SIoana Radulescu priv = netdev_priv(net_dev); 187134ff6846SIoana Radulescu 187234ff6846SIoana Radulescu /* get a handle for the DPNI object */ 187334ff6846SIoana Radulescu err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 187434ff6846SIoana Radulescu if (err) { 187534ff6846SIoana Radulescu dev_err(dev, "dpni_open() failed\n"); 187634ff6846SIoana Radulescu return err; 187734ff6846SIoana Radulescu } 187834ff6846SIoana Radulescu 187934ff6846SIoana Radulescu /* Check if we can work with this DPNI object */ 188034ff6846SIoana Radulescu err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 188134ff6846SIoana Radulescu &priv->dpni_ver_minor); 188234ff6846SIoana Radulescu if (err) { 188334ff6846SIoana Radulescu dev_err(dev, "dpni_get_api_version() failed\n"); 188434ff6846SIoana Radulescu goto close; 188534ff6846SIoana Radulescu } 188634ff6846SIoana Radulescu if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 188734ff6846SIoana Radulescu dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 188834ff6846SIoana Radulescu priv->dpni_ver_major, priv->dpni_ver_minor, 188934ff6846SIoana Radulescu DPNI_VER_MAJOR, DPNI_VER_MINOR); 189034ff6846SIoana Radulescu err = -ENOTSUPP; 189134ff6846SIoana Radulescu goto close; 189234ff6846SIoana Radulescu } 189334ff6846SIoana Radulescu 189434ff6846SIoana Radulescu ls_dev->mc_io = priv->mc_io; 189534ff6846SIoana Radulescu ls_dev->mc_handle = priv->mc_token; 189634ff6846SIoana Radulescu 189734ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 189834ff6846SIoana Radulescu if (err) { 189934ff6846SIoana Radulescu dev_err(dev, "dpni_reset() failed\n"); 190034ff6846SIoana Radulescu goto close; 190134ff6846SIoana Radulescu } 190234ff6846SIoana Radulescu 190334ff6846SIoana Radulescu err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 190434ff6846SIoana Radulescu &priv->dpni_attrs); 190534ff6846SIoana Radulescu if (err) { 190634ff6846SIoana Radulescu dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 190734ff6846SIoana Radulescu goto close; 190834ff6846SIoana Radulescu } 190934ff6846SIoana Radulescu 191034ff6846SIoana Radulescu err = set_buffer_layout(priv); 191134ff6846SIoana Radulescu if (err) 191234ff6846SIoana Radulescu goto close; 191334ff6846SIoana Radulescu 1914afb90dbbSIoana Radulescu priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * 1915afb90dbbSIoana Radulescu dpaa2_eth_fs_count(priv), GFP_KERNEL); 1916afb90dbbSIoana Radulescu if (!priv->cls_rules) 1917afb90dbbSIoana Radulescu goto close; 1918afb90dbbSIoana Radulescu 191934ff6846SIoana Radulescu return 0; 192034ff6846SIoana Radulescu 192134ff6846SIoana Radulescu close: 192234ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 192334ff6846SIoana Radulescu 192434ff6846SIoana Radulescu return err; 192534ff6846SIoana Radulescu } 192634ff6846SIoana Radulescu 192734ff6846SIoana Radulescu static void free_dpni(struct dpaa2_eth_priv *priv) 192834ff6846SIoana Radulescu { 192934ff6846SIoana Radulescu int err; 193034ff6846SIoana Radulescu 193134ff6846SIoana Radulescu err = dpni_reset(priv->mc_io, 0, priv->mc_token); 193234ff6846SIoana Radulescu if (err) 193334ff6846SIoana Radulescu netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 193434ff6846SIoana Radulescu err); 193534ff6846SIoana Radulescu 193634ff6846SIoana Radulescu dpni_close(priv->mc_io, 0, priv->mc_token); 193734ff6846SIoana Radulescu } 193834ff6846SIoana Radulescu 193934ff6846SIoana Radulescu static int setup_rx_flow(struct dpaa2_eth_priv *priv, 194034ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 194134ff6846SIoana Radulescu { 194234ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 194334ff6846SIoana Radulescu struct dpni_queue queue; 194434ff6846SIoana Radulescu struct dpni_queue_id qid; 194534ff6846SIoana Radulescu struct dpni_taildrop td; 194634ff6846SIoana Radulescu int err; 194734ff6846SIoana Radulescu 194834ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 194934ff6846SIoana Radulescu DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); 195034ff6846SIoana Radulescu if (err) { 195134ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(RX) failed\n"); 195234ff6846SIoana Radulescu return err; 195334ff6846SIoana Radulescu } 195434ff6846SIoana Radulescu 195534ff6846SIoana Radulescu fq->fqid = qid.fqid; 195634ff6846SIoana Radulescu 195734ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 195834ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 195934ff6846SIoana Radulescu queue.destination.priority = 1; 196034ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 196134ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 196234ff6846SIoana Radulescu DPNI_QUEUE_RX, 0, fq->flowid, 196334ff6846SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 196434ff6846SIoana Radulescu &queue); 196534ff6846SIoana Radulescu if (err) { 196634ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(RX) failed\n"); 196734ff6846SIoana Radulescu return err; 196834ff6846SIoana Radulescu } 196934ff6846SIoana Radulescu 197034ff6846SIoana Radulescu td.enable = 1; 197134ff6846SIoana Radulescu td.threshold = DPAA2_ETH_TAILDROP_THRESH; 197234ff6846SIoana Radulescu err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, 197334ff6846SIoana Radulescu DPNI_QUEUE_RX, 0, fq->flowid, &td); 197434ff6846SIoana Radulescu if (err) { 197534ff6846SIoana Radulescu dev_err(dev, "dpni_set_threshold() failed\n"); 197634ff6846SIoana Radulescu return err; 197734ff6846SIoana Radulescu } 197834ff6846SIoana Radulescu 197934ff6846SIoana Radulescu return 0; 198034ff6846SIoana Radulescu } 198134ff6846SIoana Radulescu 198234ff6846SIoana Radulescu static int setup_tx_flow(struct dpaa2_eth_priv *priv, 198334ff6846SIoana Radulescu struct dpaa2_eth_fq *fq) 198434ff6846SIoana Radulescu { 198534ff6846SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 198634ff6846SIoana Radulescu struct dpni_queue queue; 198734ff6846SIoana Radulescu struct dpni_queue_id qid; 198834ff6846SIoana Radulescu int err; 198934ff6846SIoana Radulescu 199034ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 199134ff6846SIoana Radulescu DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); 199234ff6846SIoana Radulescu if (err) { 199334ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX) failed\n"); 199434ff6846SIoana Radulescu return err; 199534ff6846SIoana Radulescu } 199634ff6846SIoana Radulescu 199734ff6846SIoana Radulescu fq->tx_qdbin = qid.qdbin; 199834ff6846SIoana Radulescu 199934ff6846SIoana Radulescu err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 200034ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 200134ff6846SIoana Radulescu &queue, &qid); 200234ff6846SIoana Radulescu if (err) { 200334ff6846SIoana Radulescu dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 200434ff6846SIoana Radulescu return err; 200534ff6846SIoana Radulescu } 200634ff6846SIoana Radulescu 200734ff6846SIoana Radulescu fq->fqid = qid.fqid; 200834ff6846SIoana Radulescu 200934ff6846SIoana Radulescu queue.destination.id = fq->channel->dpcon_id; 201034ff6846SIoana Radulescu queue.destination.type = DPNI_DEST_DPCON; 201134ff6846SIoana Radulescu queue.destination.priority = 0; 201234ff6846SIoana Radulescu queue.user_context = (u64)(uintptr_t)fq; 201334ff6846SIoana Radulescu err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 201434ff6846SIoana Radulescu DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 201534ff6846SIoana Radulescu DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 201634ff6846SIoana Radulescu &queue); 201734ff6846SIoana Radulescu if (err) { 201834ff6846SIoana Radulescu dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 201934ff6846SIoana Radulescu return err; 202034ff6846SIoana Radulescu } 202134ff6846SIoana Radulescu 202234ff6846SIoana Radulescu return 0; 202334ff6846SIoana Radulescu } 202434ff6846SIoana Radulescu 2025edad8d26SIoana Ciocoi Radulescu /* Supported header fields for Rx hash distribution key */ 2026f76c483aSIoana Radulescu static const struct dpaa2_eth_dist_fields dist_fields[] = { 202734ff6846SIoana Radulescu { 2028edad8d26SIoana Ciocoi Radulescu /* L2 header */ 2029edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_L2DA, 2030edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_ETH, 2031edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_ETH_DA, 2032edad8d26SIoana Ciocoi Radulescu .size = 6, 2033edad8d26SIoana Ciocoi Radulescu }, { 2034afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 2035afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_SA, 2036afb90dbbSIoana Radulescu .size = 6, 2037afb90dbbSIoana Radulescu }, { 2038afb90dbbSIoana Radulescu /* This is the last ethertype field parsed: 2039afb90dbbSIoana Radulescu * depending on frame format, it can be the MAC ethertype 2040afb90dbbSIoana Radulescu * or the VLAN etype. 2041afb90dbbSIoana Radulescu */ 2042afb90dbbSIoana Radulescu .cls_prot = NET_PROT_ETH, 2043afb90dbbSIoana Radulescu .cls_field = NH_FLD_ETH_TYPE, 2044afb90dbbSIoana Radulescu .size = 2, 2045afb90dbbSIoana Radulescu }, { 2046edad8d26SIoana Ciocoi Radulescu /* VLAN header */ 2047edad8d26SIoana Ciocoi Radulescu .rxnfc_field = RXH_VLAN, 2048edad8d26SIoana Ciocoi Radulescu .cls_prot = NET_PROT_VLAN, 2049edad8d26SIoana Ciocoi Radulescu .cls_field = NH_FLD_VLAN_TCI, 2050edad8d26SIoana Ciocoi Radulescu .size = 2, 2051edad8d26SIoana Ciocoi Radulescu }, { 205234ff6846SIoana Radulescu /* IP header */ 205334ff6846SIoana Radulescu .rxnfc_field = RXH_IP_SRC, 205434ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 205534ff6846SIoana Radulescu .cls_field = NH_FLD_IP_SRC, 205634ff6846SIoana Radulescu .size = 4, 205734ff6846SIoana Radulescu }, { 205834ff6846SIoana Radulescu .rxnfc_field = RXH_IP_DST, 205934ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 206034ff6846SIoana Radulescu .cls_field = NH_FLD_IP_DST, 206134ff6846SIoana Radulescu .size = 4, 206234ff6846SIoana Radulescu }, { 206334ff6846SIoana Radulescu .rxnfc_field = RXH_L3_PROTO, 206434ff6846SIoana Radulescu .cls_prot = NET_PROT_IP, 206534ff6846SIoana Radulescu .cls_field = NH_FLD_IP_PROTO, 206634ff6846SIoana Radulescu .size = 1, 206734ff6846SIoana Radulescu }, { 206834ff6846SIoana Radulescu /* Using UDP ports, this is functionally equivalent to raw 206934ff6846SIoana Radulescu * byte pairs from L4 header. 207034ff6846SIoana Radulescu */ 207134ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_0_1, 207234ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 207334ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_SRC, 207434ff6846SIoana Radulescu .size = 2, 207534ff6846SIoana Radulescu }, { 207634ff6846SIoana Radulescu .rxnfc_field = RXH_L4_B_2_3, 207734ff6846SIoana Radulescu .cls_prot = NET_PROT_UDP, 207834ff6846SIoana Radulescu .cls_field = NH_FLD_UDP_PORT_DST, 207934ff6846SIoana Radulescu .size = 2, 208034ff6846SIoana Radulescu }, 208134ff6846SIoana Radulescu }; 208234ff6846SIoana Radulescu 2083df85aeb9SIoana Radulescu /* Configure the Rx hash key using the legacy API */ 2084df85aeb9SIoana Radulescu static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2085df85aeb9SIoana Radulescu { 2086df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 2087df85aeb9SIoana Radulescu struct dpni_rx_tc_dist_cfg dist_cfg; 2088df85aeb9SIoana Radulescu int err; 2089df85aeb9SIoana Radulescu 2090df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 2091df85aeb9SIoana Radulescu 2092df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 2093df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2094df85aeb9SIoana Radulescu dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 2095df85aeb9SIoana Radulescu 2096df85aeb9SIoana Radulescu err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); 2097df85aeb9SIoana Radulescu if (err) 2098df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 2099df85aeb9SIoana Radulescu 2100df85aeb9SIoana Radulescu return err; 2101df85aeb9SIoana Radulescu } 2102df85aeb9SIoana Radulescu 2103df85aeb9SIoana Radulescu /* Configure the Rx hash key using the new API */ 2104df85aeb9SIoana Radulescu static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2105df85aeb9SIoana Radulescu { 2106df85aeb9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 2107df85aeb9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 2108df85aeb9SIoana Radulescu int err; 2109df85aeb9SIoana Radulescu 2110df85aeb9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 2111df85aeb9SIoana Radulescu 2112df85aeb9SIoana Radulescu dist_cfg.key_cfg_iova = key; 2113df85aeb9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2114df85aeb9SIoana Radulescu dist_cfg.enable = 1; 2115df85aeb9SIoana Radulescu 2116df85aeb9SIoana Radulescu err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 2117df85aeb9SIoana Radulescu if (err) 2118df85aeb9SIoana Radulescu dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 2119df85aeb9SIoana Radulescu 2120df85aeb9SIoana Radulescu return err; 2121df85aeb9SIoana Radulescu } 2122df85aeb9SIoana Radulescu 21234aaaf9b9SIoana Radulescu /* Configure the Rx flow classification key */ 21244aaaf9b9SIoana Radulescu static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 21254aaaf9b9SIoana Radulescu { 21264aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 21274aaaf9b9SIoana Radulescu struct dpni_rx_dist_cfg dist_cfg; 21284aaaf9b9SIoana Radulescu int err; 21294aaaf9b9SIoana Radulescu 21304aaaf9b9SIoana Radulescu memset(&dist_cfg, 0, sizeof(dist_cfg)); 21314aaaf9b9SIoana Radulescu 21324aaaf9b9SIoana Radulescu dist_cfg.key_cfg_iova = key; 21334aaaf9b9SIoana Radulescu dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 21344aaaf9b9SIoana Radulescu dist_cfg.enable = 1; 21354aaaf9b9SIoana Radulescu 21364aaaf9b9SIoana Radulescu err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 21374aaaf9b9SIoana Radulescu if (err) 21384aaaf9b9SIoana Radulescu dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 21394aaaf9b9SIoana Radulescu 21404aaaf9b9SIoana Radulescu return err; 21414aaaf9b9SIoana Radulescu } 21424aaaf9b9SIoana Radulescu 2143afb90dbbSIoana Radulescu /* Size of the Rx flow classification key */ 2144afb90dbbSIoana Radulescu int dpaa2_eth_cls_key_size(void) 2145afb90dbbSIoana Radulescu { 2146afb90dbbSIoana Radulescu int i, size = 0; 2147afb90dbbSIoana Radulescu 2148afb90dbbSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 2149afb90dbbSIoana Radulescu size += dist_fields[i].size; 2150afb90dbbSIoana Radulescu 2151afb90dbbSIoana Radulescu return size; 2152afb90dbbSIoana Radulescu } 2153afb90dbbSIoana Radulescu 2154afb90dbbSIoana Radulescu /* Offset of header field in Rx classification key */ 2155afb90dbbSIoana Radulescu int dpaa2_eth_cls_fld_off(int prot, int field) 2156afb90dbbSIoana Radulescu { 2157afb90dbbSIoana Radulescu int i, off = 0; 2158afb90dbbSIoana Radulescu 2159afb90dbbSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 2160afb90dbbSIoana Radulescu if (dist_fields[i].cls_prot == prot && 2161afb90dbbSIoana Radulescu dist_fields[i].cls_field == field) 2162afb90dbbSIoana Radulescu return off; 2163afb90dbbSIoana Radulescu off += dist_fields[i].size; 2164afb90dbbSIoana Radulescu } 2165afb90dbbSIoana Radulescu 2166afb90dbbSIoana Radulescu WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 2167afb90dbbSIoana Radulescu return 0; 2168afb90dbbSIoana Radulescu } 2169afb90dbbSIoana Radulescu 21704aaaf9b9SIoana Radulescu /* Set Rx distribution (hash or flow classification) key 217134ff6846SIoana Radulescu * flags is a combination of RXH_ bits 217234ff6846SIoana Radulescu */ 21733233c151SIoana Ciornei static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 21744aaaf9b9SIoana Radulescu enum dpaa2_eth_rx_dist type, u64 flags) 217534ff6846SIoana Radulescu { 217634ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 217734ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 217834ff6846SIoana Radulescu struct dpkg_profile_cfg cls_cfg; 2179edad8d26SIoana Ciocoi Radulescu u32 rx_hash_fields = 0; 2180df85aeb9SIoana Radulescu dma_addr_t key_iova; 218134ff6846SIoana Radulescu u8 *dma_mem; 218234ff6846SIoana Radulescu int i; 218334ff6846SIoana Radulescu int err = 0; 218434ff6846SIoana Radulescu 218534ff6846SIoana Radulescu memset(&cls_cfg, 0, sizeof(cls_cfg)); 218634ff6846SIoana Radulescu 2187f76c483aSIoana Radulescu for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 218834ff6846SIoana Radulescu struct dpkg_extract *key = 218934ff6846SIoana Radulescu &cls_cfg.extracts[cls_cfg.num_extracts]; 219034ff6846SIoana Radulescu 21914aaaf9b9SIoana Radulescu /* For Rx hashing key we set only the selected fields. 21924aaaf9b9SIoana Radulescu * For Rx flow classification key we set all supported fields 21934aaaf9b9SIoana Radulescu */ 21944aaaf9b9SIoana Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) { 2195f76c483aSIoana Radulescu if (!(flags & dist_fields[i].rxnfc_field)) 219634ff6846SIoana Radulescu continue; 21974aaaf9b9SIoana Radulescu rx_hash_fields |= dist_fields[i].rxnfc_field; 21984aaaf9b9SIoana Radulescu } 219934ff6846SIoana Radulescu 220034ff6846SIoana Radulescu if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 220134ff6846SIoana Radulescu dev_err(dev, "error adding key extraction rule, too many rules?\n"); 220234ff6846SIoana Radulescu return -E2BIG; 220334ff6846SIoana Radulescu } 220434ff6846SIoana Radulescu 220534ff6846SIoana Radulescu key->type = DPKG_EXTRACT_FROM_HDR; 2206f76c483aSIoana Radulescu key->extract.from_hdr.prot = dist_fields[i].cls_prot; 220734ff6846SIoana Radulescu key->extract.from_hdr.type = DPKG_FULL_FIELD; 2208f76c483aSIoana Radulescu key->extract.from_hdr.field = dist_fields[i].cls_field; 220934ff6846SIoana Radulescu cls_cfg.num_extracts++; 221034ff6846SIoana Radulescu } 221134ff6846SIoana Radulescu 221234ff6846SIoana Radulescu dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 221334ff6846SIoana Radulescu if (!dma_mem) 221434ff6846SIoana Radulescu return -ENOMEM; 221534ff6846SIoana Radulescu 221634ff6846SIoana Radulescu err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 221734ff6846SIoana Radulescu if (err) { 221834ff6846SIoana Radulescu dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 2219df85aeb9SIoana Radulescu goto free_key; 222034ff6846SIoana Radulescu } 222134ff6846SIoana Radulescu 222234ff6846SIoana Radulescu /* Prepare for setting the rx dist */ 2223df85aeb9SIoana Radulescu key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 222434ff6846SIoana Radulescu DMA_TO_DEVICE); 2225df85aeb9SIoana Radulescu if (dma_mapping_error(dev, key_iova)) { 222634ff6846SIoana Radulescu dev_err(dev, "DMA mapping failed\n"); 222734ff6846SIoana Radulescu err = -ENOMEM; 2228df85aeb9SIoana Radulescu goto free_key; 222934ff6846SIoana Radulescu } 223034ff6846SIoana Radulescu 22314aaaf9b9SIoana Radulescu if (type == DPAA2_ETH_RX_DIST_HASH) { 2232df85aeb9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) 2233df85aeb9SIoana Radulescu err = config_legacy_hash_key(priv, key_iova); 2234edad8d26SIoana Ciocoi Radulescu else 2235df85aeb9SIoana Radulescu err = config_hash_key(priv, key_iova); 22364aaaf9b9SIoana Radulescu } else { 22374aaaf9b9SIoana Radulescu err = config_cls_key(priv, key_iova); 22384aaaf9b9SIoana Radulescu } 2239df85aeb9SIoana Radulescu 2240df85aeb9SIoana Radulescu dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 2241df85aeb9SIoana Radulescu DMA_TO_DEVICE); 22424aaaf9b9SIoana Radulescu if (!err && type == DPAA2_ETH_RX_DIST_HASH) 2243edad8d26SIoana Ciocoi Radulescu priv->rx_hash_fields = rx_hash_fields; 224434ff6846SIoana Radulescu 2245df85aeb9SIoana Radulescu free_key: 224634ff6846SIoana Radulescu kfree(dma_mem); 224734ff6846SIoana Radulescu return err; 224834ff6846SIoana Radulescu } 224934ff6846SIoana Radulescu 22504aaaf9b9SIoana Radulescu int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 22514aaaf9b9SIoana Radulescu { 22524aaaf9b9SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 22534aaaf9b9SIoana Radulescu 22544aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) 22554aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 22564aaaf9b9SIoana Radulescu 22574aaaf9b9SIoana Radulescu return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); 22584aaaf9b9SIoana Radulescu } 22594aaaf9b9SIoana Radulescu 22604aaaf9b9SIoana Radulescu static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) 22614aaaf9b9SIoana Radulescu { 22624aaaf9b9SIoana Radulescu struct device *dev = priv->net_dev->dev.parent; 22634aaaf9b9SIoana Radulescu 22644aaaf9b9SIoana Radulescu /* Check if we actually support Rx flow classification */ 22654aaaf9b9SIoana Radulescu if (dpaa2_eth_has_legacy_dist(priv)) { 22664aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls not supported by current MC version\n"); 22674aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 22684aaaf9b9SIoana Radulescu } 22694aaaf9b9SIoana Radulescu 22704aaaf9b9SIoana Radulescu if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || 22714aaaf9b9SIoana Radulescu !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) { 22724aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 22734aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 22744aaaf9b9SIoana Radulescu } 22754aaaf9b9SIoana Radulescu 22764aaaf9b9SIoana Radulescu if (!dpaa2_eth_hash_enabled(priv)) { 22774aaaf9b9SIoana Radulescu dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 22784aaaf9b9SIoana Radulescu return -EOPNOTSUPP; 22794aaaf9b9SIoana Radulescu } 22804aaaf9b9SIoana Radulescu 22814aaaf9b9SIoana Radulescu priv->rx_cls_enabled = 1; 22824aaaf9b9SIoana Radulescu 22834aaaf9b9SIoana Radulescu return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); 22844aaaf9b9SIoana Radulescu } 22854aaaf9b9SIoana Radulescu 228634ff6846SIoana Radulescu /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 228734ff6846SIoana Radulescu * frame queues and channels 228834ff6846SIoana Radulescu */ 228934ff6846SIoana Radulescu static int bind_dpni(struct dpaa2_eth_priv *priv) 229034ff6846SIoana Radulescu { 229134ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 229234ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 229334ff6846SIoana Radulescu struct dpni_pools_cfg pools_params; 229434ff6846SIoana Radulescu struct dpni_error_cfg err_cfg; 229534ff6846SIoana Radulescu int err = 0; 229634ff6846SIoana Radulescu int i; 229734ff6846SIoana Radulescu 229834ff6846SIoana Radulescu pools_params.num_dpbp = 1; 229934ff6846SIoana Radulescu pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 230034ff6846SIoana Radulescu pools_params.pools[0].backup_pool = 0; 230134ff6846SIoana Radulescu pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; 230234ff6846SIoana Radulescu err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 230334ff6846SIoana Radulescu if (err) { 230434ff6846SIoana Radulescu dev_err(dev, "dpni_set_pools() failed\n"); 230534ff6846SIoana Radulescu return err; 230634ff6846SIoana Radulescu } 230734ff6846SIoana Radulescu 230834ff6846SIoana Radulescu /* have the interface implicitly distribute traffic based on 230934ff6846SIoana Radulescu * the default hash key 231034ff6846SIoana Radulescu */ 231134ff6846SIoana Radulescu err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 2312edad8d26SIoana Ciocoi Radulescu if (err && err != -EOPNOTSUPP) 231334ff6846SIoana Radulescu dev_err(dev, "Failed to configure hashing\n"); 231434ff6846SIoana Radulescu 23154aaaf9b9SIoana Radulescu /* Configure the flow classification key; it includes all 23164aaaf9b9SIoana Radulescu * supported header fields and cannot be modified at runtime 23174aaaf9b9SIoana Radulescu */ 23184aaaf9b9SIoana Radulescu err = dpaa2_eth_set_cls(priv); 23194aaaf9b9SIoana Radulescu if (err && err != -EOPNOTSUPP) 23204aaaf9b9SIoana Radulescu dev_err(dev, "Failed to configure Rx classification key\n"); 23214aaaf9b9SIoana Radulescu 232234ff6846SIoana Radulescu /* Configure handling of error frames */ 232334ff6846SIoana Radulescu err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 232434ff6846SIoana Radulescu err_cfg.set_frame_annotation = 1; 232534ff6846SIoana Radulescu err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 232634ff6846SIoana Radulescu err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 232734ff6846SIoana Radulescu &err_cfg); 232834ff6846SIoana Radulescu if (err) { 232934ff6846SIoana Radulescu dev_err(dev, "dpni_set_errors_behavior failed\n"); 233034ff6846SIoana Radulescu return err; 233134ff6846SIoana Radulescu } 233234ff6846SIoana Radulescu 233334ff6846SIoana Radulescu /* Configure Rx and Tx conf queues to generate CDANs */ 233434ff6846SIoana Radulescu for (i = 0; i < priv->num_fqs; i++) { 233534ff6846SIoana Radulescu switch (priv->fq[i].type) { 233634ff6846SIoana Radulescu case DPAA2_RX_FQ: 233734ff6846SIoana Radulescu err = setup_rx_flow(priv, &priv->fq[i]); 233834ff6846SIoana Radulescu break; 233934ff6846SIoana Radulescu case DPAA2_TX_CONF_FQ: 234034ff6846SIoana Radulescu err = setup_tx_flow(priv, &priv->fq[i]); 234134ff6846SIoana Radulescu break; 234234ff6846SIoana Radulescu default: 234334ff6846SIoana Radulescu dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 234434ff6846SIoana Radulescu return -EINVAL; 234534ff6846SIoana Radulescu } 234634ff6846SIoana Radulescu if (err) 234734ff6846SIoana Radulescu return err; 234834ff6846SIoana Radulescu } 234934ff6846SIoana Radulescu 235034ff6846SIoana Radulescu err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 235134ff6846SIoana Radulescu DPNI_QUEUE_TX, &priv->tx_qdid); 235234ff6846SIoana Radulescu if (err) { 235334ff6846SIoana Radulescu dev_err(dev, "dpni_get_qdid() failed\n"); 235434ff6846SIoana Radulescu return err; 235534ff6846SIoana Radulescu } 235634ff6846SIoana Radulescu 235734ff6846SIoana Radulescu return 0; 235834ff6846SIoana Radulescu } 235934ff6846SIoana Radulescu 236034ff6846SIoana Radulescu /* Allocate rings for storing incoming frame descriptors */ 236134ff6846SIoana Radulescu static int alloc_rings(struct dpaa2_eth_priv *priv) 236234ff6846SIoana Radulescu { 236334ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 236434ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 236534ff6846SIoana Radulescu int i; 236634ff6846SIoana Radulescu 236734ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 236834ff6846SIoana Radulescu priv->channel[i]->store = 236934ff6846SIoana Radulescu dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 237034ff6846SIoana Radulescu if (!priv->channel[i]->store) { 237134ff6846SIoana Radulescu netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 237234ff6846SIoana Radulescu goto err_ring; 237334ff6846SIoana Radulescu } 237434ff6846SIoana Radulescu } 237534ff6846SIoana Radulescu 237634ff6846SIoana Radulescu return 0; 237734ff6846SIoana Radulescu 237834ff6846SIoana Radulescu err_ring: 237934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 238034ff6846SIoana Radulescu if (!priv->channel[i]->store) 238134ff6846SIoana Radulescu break; 238234ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 238334ff6846SIoana Radulescu } 238434ff6846SIoana Radulescu 238534ff6846SIoana Radulescu return -ENOMEM; 238634ff6846SIoana Radulescu } 238734ff6846SIoana Radulescu 238834ff6846SIoana Radulescu static void free_rings(struct dpaa2_eth_priv *priv) 238934ff6846SIoana Radulescu { 239034ff6846SIoana Radulescu int i; 239134ff6846SIoana Radulescu 239234ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) 239334ff6846SIoana Radulescu dpaa2_io_store_destroy(priv->channel[i]->store); 239434ff6846SIoana Radulescu } 239534ff6846SIoana Radulescu 239634ff6846SIoana Radulescu static int set_mac_addr(struct dpaa2_eth_priv *priv) 239734ff6846SIoana Radulescu { 239834ff6846SIoana Radulescu struct net_device *net_dev = priv->net_dev; 239934ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 240034ff6846SIoana Radulescu u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 240134ff6846SIoana Radulescu int err; 240234ff6846SIoana Radulescu 240334ff6846SIoana Radulescu /* Get firmware address, if any */ 240434ff6846SIoana Radulescu err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 240534ff6846SIoana Radulescu if (err) { 240634ff6846SIoana Radulescu dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 240734ff6846SIoana Radulescu return err; 240834ff6846SIoana Radulescu } 240934ff6846SIoana Radulescu 241034ff6846SIoana Radulescu /* Get DPNI attributes address, if any */ 241134ff6846SIoana Radulescu err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 241234ff6846SIoana Radulescu dpni_mac_addr); 241334ff6846SIoana Radulescu if (err) { 241434ff6846SIoana Radulescu dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 241534ff6846SIoana Radulescu return err; 241634ff6846SIoana Radulescu } 241734ff6846SIoana Radulescu 241834ff6846SIoana Radulescu /* First check if firmware has any address configured by bootloader */ 241934ff6846SIoana Radulescu if (!is_zero_ether_addr(mac_addr)) { 242034ff6846SIoana Radulescu /* If the DPMAC addr != DPNI addr, update it */ 242134ff6846SIoana Radulescu if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 242234ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, 242334ff6846SIoana Radulescu priv->mc_token, 242434ff6846SIoana Radulescu mac_addr); 242534ff6846SIoana Radulescu if (err) { 242634ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 242734ff6846SIoana Radulescu return err; 242834ff6846SIoana Radulescu } 242934ff6846SIoana Radulescu } 243034ff6846SIoana Radulescu memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 243134ff6846SIoana Radulescu } else if (is_zero_ether_addr(dpni_mac_addr)) { 243234ff6846SIoana Radulescu /* No MAC address configured, fill in net_dev->dev_addr 243334ff6846SIoana Radulescu * with a random one 243434ff6846SIoana Radulescu */ 243534ff6846SIoana Radulescu eth_hw_addr_random(net_dev); 243634ff6846SIoana Radulescu dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 243734ff6846SIoana Radulescu 243834ff6846SIoana Radulescu err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 243934ff6846SIoana Radulescu net_dev->dev_addr); 244034ff6846SIoana Radulescu if (err) { 244134ff6846SIoana Radulescu dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 244234ff6846SIoana Radulescu return err; 244334ff6846SIoana Radulescu } 244434ff6846SIoana Radulescu 244534ff6846SIoana Radulescu /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 244634ff6846SIoana Radulescu * practical purposes, this will be our "permanent" mac address, 244734ff6846SIoana Radulescu * at least until the next reboot. This move will also permit 244834ff6846SIoana Radulescu * register_netdevice() to properly fill up net_dev->perm_addr. 244934ff6846SIoana Radulescu */ 245034ff6846SIoana Radulescu net_dev->addr_assign_type = NET_ADDR_PERM; 245134ff6846SIoana Radulescu } else { 245234ff6846SIoana Radulescu /* NET_ADDR_PERM is default, all we have to do is 245334ff6846SIoana Radulescu * fill in the device addr. 245434ff6846SIoana Radulescu */ 245534ff6846SIoana Radulescu memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 245634ff6846SIoana Radulescu } 245734ff6846SIoana Radulescu 245834ff6846SIoana Radulescu return 0; 245934ff6846SIoana Radulescu } 246034ff6846SIoana Radulescu 246134ff6846SIoana Radulescu static int netdev_init(struct net_device *net_dev) 246234ff6846SIoana Radulescu { 246334ff6846SIoana Radulescu struct device *dev = net_dev->dev.parent; 246434ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 246534ff6846SIoana Radulescu u32 options = priv->dpni_attrs.options; 246634ff6846SIoana Radulescu u64 supported = 0, not_supported = 0; 246734ff6846SIoana Radulescu u8 bcast_addr[ETH_ALEN]; 246834ff6846SIoana Radulescu u8 num_queues; 246934ff6846SIoana Radulescu int err; 247034ff6846SIoana Radulescu 247134ff6846SIoana Radulescu net_dev->netdev_ops = &dpaa2_eth_ops; 247234ff6846SIoana Radulescu net_dev->ethtool_ops = &dpaa2_ethtool_ops; 247334ff6846SIoana Radulescu 247434ff6846SIoana Radulescu err = set_mac_addr(priv); 247534ff6846SIoana Radulescu if (err) 247634ff6846SIoana Radulescu return err; 247734ff6846SIoana Radulescu 247834ff6846SIoana Radulescu /* Explicitly add the broadcast address to the MAC filtering table */ 247934ff6846SIoana Radulescu eth_broadcast_addr(bcast_addr); 248034ff6846SIoana Radulescu err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 248134ff6846SIoana Radulescu if (err) { 248234ff6846SIoana Radulescu dev_err(dev, "dpni_add_mac_addr() failed\n"); 248334ff6846SIoana Radulescu return err; 248434ff6846SIoana Radulescu } 248534ff6846SIoana Radulescu 248634ff6846SIoana Radulescu /* Set MTU upper limit; lower limit is 68B (default value) */ 248734ff6846SIoana Radulescu net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 248834ff6846SIoana Radulescu err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 248934ff6846SIoana Radulescu DPAA2_ETH_MFL); 249034ff6846SIoana Radulescu if (err) { 249134ff6846SIoana Radulescu dev_err(dev, "dpni_set_max_frame_length() failed\n"); 249234ff6846SIoana Radulescu return err; 249334ff6846SIoana Radulescu } 249434ff6846SIoana Radulescu 249534ff6846SIoana Radulescu /* Set actual number of queues in the net device */ 249634ff6846SIoana Radulescu num_queues = dpaa2_eth_queue_count(priv); 249734ff6846SIoana Radulescu err = netif_set_real_num_tx_queues(net_dev, num_queues); 249834ff6846SIoana Radulescu if (err) { 249934ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 250034ff6846SIoana Radulescu return err; 250134ff6846SIoana Radulescu } 250234ff6846SIoana Radulescu err = netif_set_real_num_rx_queues(net_dev, num_queues); 250334ff6846SIoana Radulescu if (err) { 250434ff6846SIoana Radulescu dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 250534ff6846SIoana Radulescu return err; 250634ff6846SIoana Radulescu } 250734ff6846SIoana Radulescu 250834ff6846SIoana Radulescu /* Capabilities listing */ 250934ff6846SIoana Radulescu supported |= IFF_LIVE_ADDR_CHANGE; 251034ff6846SIoana Radulescu 251134ff6846SIoana Radulescu if (options & DPNI_OPT_NO_MAC_FILTER) 251234ff6846SIoana Radulescu not_supported |= IFF_UNICAST_FLT; 251334ff6846SIoana Radulescu else 251434ff6846SIoana Radulescu supported |= IFF_UNICAST_FLT; 251534ff6846SIoana Radulescu 251634ff6846SIoana Radulescu net_dev->priv_flags |= supported; 251734ff6846SIoana Radulescu net_dev->priv_flags &= ~not_supported; 251834ff6846SIoana Radulescu 251934ff6846SIoana Radulescu /* Features */ 252034ff6846SIoana Radulescu net_dev->features = NETIF_F_RXCSUM | 252134ff6846SIoana Radulescu NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 252234ff6846SIoana Radulescu NETIF_F_SG | NETIF_F_HIGHDMA | 252334ff6846SIoana Radulescu NETIF_F_LLTX; 252434ff6846SIoana Radulescu net_dev->hw_features = net_dev->features; 252534ff6846SIoana Radulescu 252634ff6846SIoana Radulescu return 0; 252734ff6846SIoana Radulescu } 252834ff6846SIoana Radulescu 252934ff6846SIoana Radulescu static int poll_link_state(void *arg) 253034ff6846SIoana Radulescu { 253134ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 253234ff6846SIoana Radulescu int err; 253334ff6846SIoana Radulescu 253434ff6846SIoana Radulescu while (!kthread_should_stop()) { 253534ff6846SIoana Radulescu err = link_state_update(priv); 253634ff6846SIoana Radulescu if (unlikely(err)) 253734ff6846SIoana Radulescu return err; 253834ff6846SIoana Radulescu 253934ff6846SIoana Radulescu msleep(DPAA2_ETH_LINK_STATE_REFRESH); 254034ff6846SIoana Radulescu } 254134ff6846SIoana Radulescu 254234ff6846SIoana Radulescu return 0; 254334ff6846SIoana Radulescu } 254434ff6846SIoana Radulescu 254534ff6846SIoana Radulescu static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 254634ff6846SIoana Radulescu { 254734ff6846SIoana Radulescu u32 status = ~0; 254834ff6846SIoana Radulescu struct device *dev = (struct device *)arg; 254934ff6846SIoana Radulescu struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 255034ff6846SIoana Radulescu struct net_device *net_dev = dev_get_drvdata(dev); 255134ff6846SIoana Radulescu int err; 255234ff6846SIoana Radulescu 255334ff6846SIoana Radulescu err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 255434ff6846SIoana Radulescu DPNI_IRQ_INDEX, &status); 255534ff6846SIoana Radulescu if (unlikely(err)) { 255634ff6846SIoana Radulescu netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 255734ff6846SIoana Radulescu return IRQ_HANDLED; 255834ff6846SIoana Radulescu } 255934ff6846SIoana Radulescu 256034ff6846SIoana Radulescu if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 256134ff6846SIoana Radulescu link_state_update(netdev_priv(net_dev)); 256234ff6846SIoana Radulescu 256334ff6846SIoana Radulescu return IRQ_HANDLED; 256434ff6846SIoana Radulescu } 256534ff6846SIoana Radulescu 256634ff6846SIoana Radulescu static int setup_irqs(struct fsl_mc_device *ls_dev) 256734ff6846SIoana Radulescu { 256834ff6846SIoana Radulescu int err = 0; 256934ff6846SIoana Radulescu struct fsl_mc_device_irq *irq; 257034ff6846SIoana Radulescu 257134ff6846SIoana Radulescu err = fsl_mc_allocate_irqs(ls_dev); 257234ff6846SIoana Radulescu if (err) { 257334ff6846SIoana Radulescu dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 257434ff6846SIoana Radulescu return err; 257534ff6846SIoana Radulescu } 257634ff6846SIoana Radulescu 257734ff6846SIoana Radulescu irq = ls_dev->irqs[0]; 257834ff6846SIoana Radulescu err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 257934ff6846SIoana Radulescu NULL, dpni_irq0_handler_thread, 258034ff6846SIoana Radulescu IRQF_NO_SUSPEND | IRQF_ONESHOT, 258134ff6846SIoana Radulescu dev_name(&ls_dev->dev), &ls_dev->dev); 258234ff6846SIoana Radulescu if (err < 0) { 258334ff6846SIoana Radulescu dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 258434ff6846SIoana Radulescu goto free_mc_irq; 258534ff6846SIoana Radulescu } 258634ff6846SIoana Radulescu 258734ff6846SIoana Radulescu err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 258834ff6846SIoana Radulescu DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); 258934ff6846SIoana Radulescu if (err < 0) { 259034ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 259134ff6846SIoana Radulescu goto free_irq; 259234ff6846SIoana Radulescu } 259334ff6846SIoana Radulescu 259434ff6846SIoana Radulescu err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 259534ff6846SIoana Radulescu DPNI_IRQ_INDEX, 1); 259634ff6846SIoana Radulescu if (err < 0) { 259734ff6846SIoana Radulescu dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 259834ff6846SIoana Radulescu goto free_irq; 259934ff6846SIoana Radulescu } 260034ff6846SIoana Radulescu 260134ff6846SIoana Radulescu return 0; 260234ff6846SIoana Radulescu 260334ff6846SIoana Radulescu free_irq: 260434ff6846SIoana Radulescu devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 260534ff6846SIoana Radulescu free_mc_irq: 260634ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 260734ff6846SIoana Radulescu 260834ff6846SIoana Radulescu return err; 260934ff6846SIoana Radulescu } 261034ff6846SIoana Radulescu 261134ff6846SIoana Radulescu static void add_ch_napi(struct dpaa2_eth_priv *priv) 261234ff6846SIoana Radulescu { 261334ff6846SIoana Radulescu int i; 261434ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 261534ff6846SIoana Radulescu 261634ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 261734ff6846SIoana Radulescu ch = priv->channel[i]; 261834ff6846SIoana Radulescu /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 261934ff6846SIoana Radulescu netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 262034ff6846SIoana Radulescu NAPI_POLL_WEIGHT); 262134ff6846SIoana Radulescu } 262234ff6846SIoana Radulescu } 262334ff6846SIoana Radulescu 262434ff6846SIoana Radulescu static void del_ch_napi(struct dpaa2_eth_priv *priv) 262534ff6846SIoana Radulescu { 262634ff6846SIoana Radulescu int i; 262734ff6846SIoana Radulescu struct dpaa2_eth_channel *ch; 262834ff6846SIoana Radulescu 262934ff6846SIoana Radulescu for (i = 0; i < priv->num_channels; i++) { 263034ff6846SIoana Radulescu ch = priv->channel[i]; 263134ff6846SIoana Radulescu netif_napi_del(&ch->napi); 263234ff6846SIoana Radulescu } 263334ff6846SIoana Radulescu } 263434ff6846SIoana Radulescu 263534ff6846SIoana Radulescu static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 263634ff6846SIoana Radulescu { 263734ff6846SIoana Radulescu struct device *dev; 263834ff6846SIoana Radulescu struct net_device *net_dev = NULL; 263934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv = NULL; 264034ff6846SIoana Radulescu int err = 0; 264134ff6846SIoana Radulescu 264234ff6846SIoana Radulescu dev = &dpni_dev->dev; 264334ff6846SIoana Radulescu 264434ff6846SIoana Radulescu /* Net device */ 264534ff6846SIoana Radulescu net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); 264634ff6846SIoana Radulescu if (!net_dev) { 264734ff6846SIoana Radulescu dev_err(dev, "alloc_etherdev_mq() failed\n"); 264834ff6846SIoana Radulescu return -ENOMEM; 264934ff6846SIoana Radulescu } 265034ff6846SIoana Radulescu 265134ff6846SIoana Radulescu SET_NETDEV_DEV(net_dev, dev); 265234ff6846SIoana Radulescu dev_set_drvdata(dev, net_dev); 265334ff6846SIoana Radulescu 265434ff6846SIoana Radulescu priv = netdev_priv(net_dev); 265534ff6846SIoana Radulescu priv->net_dev = net_dev; 265634ff6846SIoana Radulescu 265734ff6846SIoana Radulescu priv->iommu_domain = iommu_get_domain_for_dev(dev); 265834ff6846SIoana Radulescu 265934ff6846SIoana Radulescu /* Obtain a MC portal */ 266034ff6846SIoana Radulescu err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 266134ff6846SIoana Radulescu &priv->mc_io); 266234ff6846SIoana Radulescu if (err) { 266334ff6846SIoana Radulescu if (err == -ENXIO) 266434ff6846SIoana Radulescu err = -EPROBE_DEFER; 266534ff6846SIoana Radulescu else 266634ff6846SIoana Radulescu dev_err(dev, "MC portal allocation failed\n"); 266734ff6846SIoana Radulescu goto err_portal_alloc; 266834ff6846SIoana Radulescu } 266934ff6846SIoana Radulescu 267034ff6846SIoana Radulescu /* MC objects initialization and configuration */ 267134ff6846SIoana Radulescu err = setup_dpni(dpni_dev); 267234ff6846SIoana Radulescu if (err) 267334ff6846SIoana Radulescu goto err_dpni_setup; 267434ff6846SIoana Radulescu 267534ff6846SIoana Radulescu err = setup_dpio(priv); 267634ff6846SIoana Radulescu if (err) 267734ff6846SIoana Radulescu goto err_dpio_setup; 267834ff6846SIoana Radulescu 267934ff6846SIoana Radulescu setup_fqs(priv); 268034ff6846SIoana Radulescu 268134ff6846SIoana Radulescu err = setup_dpbp(priv); 268234ff6846SIoana Radulescu if (err) 268334ff6846SIoana Radulescu goto err_dpbp_setup; 268434ff6846SIoana Radulescu 268534ff6846SIoana Radulescu err = bind_dpni(priv); 268634ff6846SIoana Radulescu if (err) 268734ff6846SIoana Radulescu goto err_bind; 268834ff6846SIoana Radulescu 268934ff6846SIoana Radulescu /* Add a NAPI context for each channel */ 269034ff6846SIoana Radulescu add_ch_napi(priv); 269134ff6846SIoana Radulescu 269234ff6846SIoana Radulescu /* Percpu statistics */ 269334ff6846SIoana Radulescu priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 269434ff6846SIoana Radulescu if (!priv->percpu_stats) { 269534ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 269634ff6846SIoana Radulescu err = -ENOMEM; 269734ff6846SIoana Radulescu goto err_alloc_percpu_stats; 269834ff6846SIoana Radulescu } 269934ff6846SIoana Radulescu priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 270034ff6846SIoana Radulescu if (!priv->percpu_extras) { 270134ff6846SIoana Radulescu dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 270234ff6846SIoana Radulescu err = -ENOMEM; 270334ff6846SIoana Radulescu goto err_alloc_percpu_extras; 270434ff6846SIoana Radulescu } 270534ff6846SIoana Radulescu 270634ff6846SIoana Radulescu err = netdev_init(net_dev); 270734ff6846SIoana Radulescu if (err) 270834ff6846SIoana Radulescu goto err_netdev_init; 270934ff6846SIoana Radulescu 271034ff6846SIoana Radulescu /* Configure checksum offload based on current interface flags */ 271134ff6846SIoana Radulescu err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 271234ff6846SIoana Radulescu if (err) 271334ff6846SIoana Radulescu goto err_csum; 271434ff6846SIoana Radulescu 271534ff6846SIoana Radulescu err = set_tx_csum(priv, !!(net_dev->features & 271634ff6846SIoana Radulescu (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 271734ff6846SIoana Radulescu if (err) 271834ff6846SIoana Radulescu goto err_csum; 271934ff6846SIoana Radulescu 272034ff6846SIoana Radulescu err = alloc_rings(priv); 272134ff6846SIoana Radulescu if (err) 272234ff6846SIoana Radulescu goto err_alloc_rings; 272334ff6846SIoana Radulescu 272434ff6846SIoana Radulescu err = setup_irqs(dpni_dev); 272534ff6846SIoana Radulescu if (err) { 272634ff6846SIoana Radulescu netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 272734ff6846SIoana Radulescu priv->poll_thread = kthread_run(poll_link_state, priv, 272834ff6846SIoana Radulescu "%s_poll_link", net_dev->name); 272934ff6846SIoana Radulescu if (IS_ERR(priv->poll_thread)) { 273034ff6846SIoana Radulescu dev_err(dev, "Error starting polling thread\n"); 273134ff6846SIoana Radulescu goto err_poll_thread; 273234ff6846SIoana Radulescu } 273334ff6846SIoana Radulescu priv->do_link_poll = true; 273434ff6846SIoana Radulescu } 273534ff6846SIoana Radulescu 273634ff6846SIoana Radulescu err = register_netdev(net_dev); 273734ff6846SIoana Radulescu if (err < 0) { 273834ff6846SIoana Radulescu dev_err(dev, "register_netdev() failed\n"); 273934ff6846SIoana Radulescu goto err_netdev_reg; 274034ff6846SIoana Radulescu } 274134ff6846SIoana Radulescu 274234ff6846SIoana Radulescu dev_info(dev, "Probed interface %s\n", net_dev->name); 274334ff6846SIoana Radulescu return 0; 274434ff6846SIoana Radulescu 274534ff6846SIoana Radulescu err_netdev_reg: 274634ff6846SIoana Radulescu if (priv->do_link_poll) 274734ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 274834ff6846SIoana Radulescu else 274934ff6846SIoana Radulescu fsl_mc_free_irqs(dpni_dev); 275034ff6846SIoana Radulescu err_poll_thread: 275134ff6846SIoana Radulescu free_rings(priv); 275234ff6846SIoana Radulescu err_alloc_rings: 275334ff6846SIoana Radulescu err_csum: 275434ff6846SIoana Radulescu err_netdev_init: 275534ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 275634ff6846SIoana Radulescu err_alloc_percpu_extras: 275734ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 275834ff6846SIoana Radulescu err_alloc_percpu_stats: 275934ff6846SIoana Radulescu del_ch_napi(priv); 276034ff6846SIoana Radulescu err_bind: 276134ff6846SIoana Radulescu free_dpbp(priv); 276234ff6846SIoana Radulescu err_dpbp_setup: 276334ff6846SIoana Radulescu free_dpio(priv); 276434ff6846SIoana Radulescu err_dpio_setup: 276534ff6846SIoana Radulescu free_dpni(priv); 276634ff6846SIoana Radulescu err_dpni_setup: 276734ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 276834ff6846SIoana Radulescu err_portal_alloc: 276934ff6846SIoana Radulescu dev_set_drvdata(dev, NULL); 277034ff6846SIoana Radulescu free_netdev(net_dev); 277134ff6846SIoana Radulescu 277234ff6846SIoana Radulescu return err; 277334ff6846SIoana Radulescu } 277434ff6846SIoana Radulescu 277534ff6846SIoana Radulescu static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 277634ff6846SIoana Radulescu { 277734ff6846SIoana Radulescu struct device *dev; 277834ff6846SIoana Radulescu struct net_device *net_dev; 277934ff6846SIoana Radulescu struct dpaa2_eth_priv *priv; 278034ff6846SIoana Radulescu 278134ff6846SIoana Radulescu dev = &ls_dev->dev; 278234ff6846SIoana Radulescu net_dev = dev_get_drvdata(dev); 278334ff6846SIoana Radulescu priv = netdev_priv(net_dev); 278434ff6846SIoana Radulescu 278534ff6846SIoana Radulescu unregister_netdev(net_dev); 278634ff6846SIoana Radulescu 278734ff6846SIoana Radulescu if (priv->do_link_poll) 278834ff6846SIoana Radulescu kthread_stop(priv->poll_thread); 278934ff6846SIoana Radulescu else 279034ff6846SIoana Radulescu fsl_mc_free_irqs(ls_dev); 279134ff6846SIoana Radulescu 279234ff6846SIoana Radulescu free_rings(priv); 279334ff6846SIoana Radulescu free_percpu(priv->percpu_stats); 279434ff6846SIoana Radulescu free_percpu(priv->percpu_extras); 279534ff6846SIoana Radulescu 279634ff6846SIoana Radulescu del_ch_napi(priv); 279734ff6846SIoana Radulescu free_dpbp(priv); 279834ff6846SIoana Radulescu free_dpio(priv); 279934ff6846SIoana Radulescu free_dpni(priv); 280034ff6846SIoana Radulescu 280134ff6846SIoana Radulescu fsl_mc_portal_free(priv->mc_io); 280234ff6846SIoana Radulescu 280334ff6846SIoana Radulescu free_netdev(net_dev); 280434ff6846SIoana Radulescu 280534ff6846SIoana Radulescu dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 280634ff6846SIoana Radulescu 280734ff6846SIoana Radulescu return 0; 280834ff6846SIoana Radulescu } 280934ff6846SIoana Radulescu 281034ff6846SIoana Radulescu static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 281134ff6846SIoana Radulescu { 281234ff6846SIoana Radulescu .vendor = FSL_MC_VENDOR_FREESCALE, 281334ff6846SIoana Radulescu .obj_type = "dpni", 281434ff6846SIoana Radulescu }, 281534ff6846SIoana Radulescu { .vendor = 0x0 } 281634ff6846SIoana Radulescu }; 281734ff6846SIoana Radulescu MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 281834ff6846SIoana Radulescu 281934ff6846SIoana Radulescu static struct fsl_mc_driver dpaa2_eth_driver = { 282034ff6846SIoana Radulescu .driver = { 282134ff6846SIoana Radulescu .name = KBUILD_MODNAME, 282234ff6846SIoana Radulescu .owner = THIS_MODULE, 282334ff6846SIoana Radulescu }, 282434ff6846SIoana Radulescu .probe = dpaa2_eth_probe, 282534ff6846SIoana Radulescu .remove = dpaa2_eth_remove, 282634ff6846SIoana Radulescu .match_id_table = dpaa2_eth_match_id_table 282734ff6846SIoana Radulescu }; 282834ff6846SIoana Radulescu 282934ff6846SIoana Radulescu module_fsl_mc_driver(dpaa2_eth_driver); 2830