1f1e37e31SAntoine Tenart // SPDX-License-Identifier: GPL-2.0
2db9d7d36SMaxime Chevallier /*
3db9d7d36SMaxime Chevallier * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4db9d7d36SMaxime Chevallier *
5db9d7d36SMaxime Chevallier * Copyright (C) 2014 Marvell
6db9d7d36SMaxime Chevallier *
7db9d7d36SMaxime Chevallier * Marcin Wojtas <mw@semihalf.com>
8db9d7d36SMaxime Chevallier */
9db9d7d36SMaxime Chevallier
10db9d7d36SMaxime Chevallier #include <linux/acpi.h>
11db9d7d36SMaxime Chevallier #include <linux/kernel.h>
12db9d7d36SMaxime Chevallier #include <linux/netdevice.h>
13db9d7d36SMaxime Chevallier #include <linux/etherdevice.h>
14db9d7d36SMaxime Chevallier #include <linux/platform_device.h>
15db9d7d36SMaxime Chevallier #include <linux/skbuff.h>
16db9d7d36SMaxime Chevallier #include <linux/inetdevice.h>
17db9d7d36SMaxime Chevallier #include <linux/mbus.h>
18db9d7d36SMaxime Chevallier #include <linux/module.h>
19db9d7d36SMaxime Chevallier #include <linux/mfd/syscon.h>
20db9d7d36SMaxime Chevallier #include <linux/interrupt.h>
21db9d7d36SMaxime Chevallier #include <linux/cpumask.h>
22db9d7d36SMaxime Chevallier #include <linux/of.h>
23db9d7d36SMaxime Chevallier #include <linux/of_irq.h>
24db9d7d36SMaxime Chevallier #include <linux/of_mdio.h>
25db9d7d36SMaxime Chevallier #include <linux/of_net.h>
26db9d7d36SMaxime Chevallier #include <linux/of_address.h>
27db9d7d36SMaxime Chevallier #include <linux/phy.h>
28db9d7d36SMaxime Chevallier #include <linux/phylink.h>
29db9d7d36SMaxime Chevallier #include <linux/phy/phy.h>
30f5015a59SRussell King #include <linux/ptp_classify.h>
31db9d7d36SMaxime Chevallier #include <linux/clk.h>
32db9d7d36SMaxime Chevallier #include <linux/hrtimer.h>
33db9d7d36SMaxime Chevallier #include <linux/ktime.h>
34db9d7d36SMaxime Chevallier #include <linux/regmap.h>
35db9d7d36SMaxime Chevallier #include <uapi/linux/ppp_defs.h>
36db9d7d36SMaxime Chevallier #include <net/ip.h>
37db9d7d36SMaxime Chevallier #include <net/ipv6.h>
38a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h>
39db9d7d36SMaxime Chevallier #include <net/tso.h>
4007dd0a7aSMatteo Croce #include <linux/bpf_trace.h>
41db9d7d36SMaxime Chevallier
42db9d7d36SMaxime Chevallier #include "mvpp2.h"
43db9d7d36SMaxime Chevallier #include "mvpp2_prs.h"
44db9d7d36SMaxime Chevallier #include "mvpp2_cls.h"
45db9d7d36SMaxime Chevallier
46db9d7d36SMaxime Chevallier enum mvpp2_bm_pool_log_num {
47db9d7d36SMaxime Chevallier MVPP2_BM_SHORT,
48db9d7d36SMaxime Chevallier MVPP2_BM_LONG,
49db9d7d36SMaxime Chevallier MVPP2_BM_JUMBO,
50db9d7d36SMaxime Chevallier MVPP2_BM_POOLS_NUM
51db9d7d36SMaxime Chevallier };
52db9d7d36SMaxime Chevallier
53db9d7d36SMaxime Chevallier static struct {
54db9d7d36SMaxime Chevallier int pkt_size;
55db9d7d36SMaxime Chevallier int buf_num;
56db9d7d36SMaxime Chevallier } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57db9d7d36SMaxime Chevallier
58db9d7d36SMaxime Chevallier /* The prototype is added here to be used in start_dev when using ACPI. This
59db9d7d36SMaxime Chevallier * will be removed once phylink is used for all modes (dt+ACPI).
60db9d7d36SMaxime Chevallier */
6187745c74SRussell King static void mvpp2_acpi_start(struct mvpp2_port *port);
62db9d7d36SMaxime Chevallier
63db9d7d36SMaxime Chevallier /* Queue modes */
64db9d7d36SMaxime Chevallier #define MVPP2_QDIST_SINGLE_MODE 0
65db9d7d36SMaxime Chevallier #define MVPP2_QDIST_MULTI_MODE 1
66db9d7d36SMaxime Chevallier
673f6aaf72SMaxime Chevallier static int queue_mode = MVPP2_QDIST_MULTI_MODE;
68db9d7d36SMaxime Chevallier
69db9d7d36SMaxime Chevallier module_param(queue_mode, int, 0444);
70db9d7d36SMaxime Chevallier MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
71db9d7d36SMaxime Chevallier
72db9d7d36SMaxime Chevallier /* Utility/helper methods */
73db9d7d36SMaxime Chevallier
mvpp2_write(struct mvpp2 * priv,u32 offset,u32 data)74db9d7d36SMaxime Chevallier void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
75db9d7d36SMaxime Chevallier {
76db9d7d36SMaxime Chevallier writel(data, priv->swth_base[0] + offset);
77db9d7d36SMaxime Chevallier }
78db9d7d36SMaxime Chevallier
mvpp2_read(struct mvpp2 * priv,u32 offset)79db9d7d36SMaxime Chevallier u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
80db9d7d36SMaxime Chevallier {
81db9d7d36SMaxime Chevallier return readl(priv->swth_base[0] + offset);
82db9d7d36SMaxime Chevallier }
83db9d7d36SMaxime Chevallier
mvpp2_read_relaxed(struct mvpp2 * priv,u32 offset)8416274427SAntoine Tenart static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
85db9d7d36SMaxime Chevallier {
86db9d7d36SMaxime Chevallier return readl_relaxed(priv->swth_base[0] + offset);
87db9d7d36SMaxime Chevallier }
88543ec376SAntoine Tenart
mvpp2_cpu_to_thread(struct mvpp2 * priv,int cpu)89e531f767SAntoine Tenart static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90543ec376SAntoine Tenart {
91e531f767SAntoine Tenart return cpu % priv->nthreads;
92543ec376SAntoine Tenart }
93543ec376SAntoine Tenart
mvpp2_cm3_write(struct mvpp2 * priv,u32 offset,u32 data)94a59d3542SStefan Chulski static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
95a59d3542SStefan Chulski {
96a59d3542SStefan Chulski writel(data, priv->cm3_base + offset);
97a59d3542SStefan Chulski }
98a59d3542SStefan Chulski
mvpp2_cm3_read(struct mvpp2 * priv,u32 offset)99a59d3542SStefan Chulski static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
100a59d3542SStefan Chulski {
101a59d3542SStefan Chulski return readl(priv->cm3_base + offset);
102a59d3542SStefan Chulski }
103a59d3542SStefan Chulski
104b27db227SMatteo Croce static struct page_pool *
mvpp2_create_page_pool(struct device * dev,int num,int len,enum dma_data_direction dma_dir)105c2d6fe61SMatteo Croce mvpp2_create_page_pool(struct device *dev, int num, int len,
106c2d6fe61SMatteo Croce enum dma_data_direction dma_dir)
107b27db227SMatteo Croce {
108b27db227SMatteo Croce struct page_pool_params pp_params = {
109b27db227SMatteo Croce /* internal DMA mapping in page_pool */
110b27db227SMatteo Croce .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
111b27db227SMatteo Croce .pool_size = num,
112b27db227SMatteo Croce .nid = NUMA_NO_NODE,
113b27db227SMatteo Croce .dev = dev,
114c2d6fe61SMatteo Croce .dma_dir = dma_dir,
11507dd0a7aSMatteo Croce .offset = MVPP2_SKB_HEADROOM,
116b27db227SMatteo Croce .max_len = len,
117b27db227SMatteo Croce };
118b27db227SMatteo Croce
119b27db227SMatteo Croce return page_pool_create(&pp_params);
120b27db227SMatteo Croce }
121b27db227SMatteo Croce
122db9d7d36SMaxime Chevallier /* These accessors should be used to access:
123db9d7d36SMaxime Chevallier *
124543ec376SAntoine Tenart * - per-thread registers, where each thread has its own copy of the
125db9d7d36SMaxime Chevallier * register.
126db9d7d36SMaxime Chevallier *
127db9d7d36SMaxime Chevallier * MVPP2_BM_VIRT_ALLOC_REG
128db9d7d36SMaxime Chevallier * MVPP2_BM_ADDR_HIGH_ALLOC
129db9d7d36SMaxime Chevallier * MVPP22_BM_ADDR_HIGH_RLS_REG
130db9d7d36SMaxime Chevallier * MVPP2_BM_VIRT_RLS_REG
131db9d7d36SMaxime Chevallier * MVPP2_ISR_RX_TX_CAUSE_REG
132db9d7d36SMaxime Chevallier * MVPP2_ISR_RX_TX_MASK_REG
133db9d7d36SMaxime Chevallier * MVPP2_TXQ_NUM_REG
134db9d7d36SMaxime Chevallier * MVPP2_AGGR_TXQ_UPDATE_REG
135db9d7d36SMaxime Chevallier * MVPP2_TXQ_RSVD_REQ_REG
136db9d7d36SMaxime Chevallier * MVPP2_TXQ_RSVD_RSLT_REG
137db9d7d36SMaxime Chevallier * MVPP2_TXQ_SENT_REG
138db9d7d36SMaxime Chevallier * MVPP2_RXQ_NUM_REG
139db9d7d36SMaxime Chevallier *
140543ec376SAntoine Tenart * - global registers that must be accessed through a specific thread
141543ec376SAntoine Tenart * window, because they are related to an access to a per-thread
142db9d7d36SMaxime Chevallier * register
143db9d7d36SMaxime Chevallier *
144db9d7d36SMaxime Chevallier * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
145db9d7d36SMaxime Chevallier * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
146db9d7d36SMaxime Chevallier * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
147db9d7d36SMaxime Chevallier * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
148db9d7d36SMaxime Chevallier * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
149db9d7d36SMaxime Chevallier * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
150db9d7d36SMaxime Chevallier * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
151db9d7d36SMaxime Chevallier * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
152db9d7d36SMaxime Chevallier * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
153db9d7d36SMaxime Chevallier * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
154db9d7d36SMaxime Chevallier * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
155db9d7d36SMaxime Chevallier * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
156db9d7d36SMaxime Chevallier * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
157db9d7d36SMaxime Chevallier */
mvpp2_thread_write(struct mvpp2 * priv,unsigned int thread,u32 offset,u32 data)1581068549cSAntoine Tenart static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
159db9d7d36SMaxime Chevallier u32 offset, u32 data)
160db9d7d36SMaxime Chevallier {
161543ec376SAntoine Tenart writel(data, priv->swth_base[thread] + offset);
162db9d7d36SMaxime Chevallier }
163db9d7d36SMaxime Chevallier
mvpp2_thread_read(struct mvpp2 * priv,unsigned int thread,u32 offset)1641068549cSAntoine Tenart static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
165db9d7d36SMaxime Chevallier u32 offset)
166db9d7d36SMaxime Chevallier {
167543ec376SAntoine Tenart return readl(priv->swth_base[thread] + offset);
168db9d7d36SMaxime Chevallier }
169db9d7d36SMaxime Chevallier
mvpp2_thread_write_relaxed(struct mvpp2 * priv,unsigned int thread,u32 offset,u32 data)1701068549cSAntoine Tenart static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
171db9d7d36SMaxime Chevallier u32 offset, u32 data)
172db9d7d36SMaxime Chevallier {
173543ec376SAntoine Tenart writel_relaxed(data, priv->swth_base[thread] + offset);
174db9d7d36SMaxime Chevallier }
175db9d7d36SMaxime Chevallier
mvpp2_thread_read_relaxed(struct mvpp2 * priv,unsigned int thread,u32 offset)1761068549cSAntoine Tenart static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
177db9d7d36SMaxime Chevallier u32 offset)
178db9d7d36SMaxime Chevallier {
179543ec376SAntoine Tenart return readl_relaxed(priv->swth_base[thread] + offset);
180db9d7d36SMaxime Chevallier }
181db9d7d36SMaxime Chevallier
mvpp2_txdesc_dma_addr_get(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc)182db9d7d36SMaxime Chevallier static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
183db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc)
184db9d7d36SMaxime Chevallier {
185db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
1867b9c7d7dSMaxime Chevallier return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
187db9d7d36SMaxime Chevallier else
1887b9c7d7dSMaxime Chevallier return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
1897b9c7d7dSMaxime Chevallier MVPP2_DESC_DMA_MASK;
190db9d7d36SMaxime Chevallier }
191db9d7d36SMaxime Chevallier
mvpp2_txdesc_dma_addr_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,dma_addr_t dma_addr)192db9d7d36SMaxime Chevallier static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
193db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc,
194db9d7d36SMaxime Chevallier dma_addr_t dma_addr)
195db9d7d36SMaxime Chevallier {
196db9d7d36SMaxime Chevallier dma_addr_t addr, offset;
197db9d7d36SMaxime Chevallier
198db9d7d36SMaxime Chevallier addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
199db9d7d36SMaxime Chevallier offset = dma_addr & MVPP2_TX_DESC_ALIGN;
200db9d7d36SMaxime Chevallier
201db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21) {
2027b9c7d7dSMaxime Chevallier tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
203db9d7d36SMaxime Chevallier tx_desc->pp21.packet_offset = offset;
204db9d7d36SMaxime Chevallier } else {
2057b9c7d7dSMaxime Chevallier __le64 val = cpu_to_le64(addr);
206db9d7d36SMaxime Chevallier
2077b9c7d7dSMaxime Chevallier tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
208db9d7d36SMaxime Chevallier tx_desc->pp22.buf_dma_addr_ptp |= val;
209db9d7d36SMaxime Chevallier tx_desc->pp22.packet_offset = offset;
210db9d7d36SMaxime Chevallier }
211db9d7d36SMaxime Chevallier }
212db9d7d36SMaxime Chevallier
mvpp2_txdesc_size_get(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc)213db9d7d36SMaxime Chevallier static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
214db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc)
215db9d7d36SMaxime Chevallier {
216db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2177b9c7d7dSMaxime Chevallier return le16_to_cpu(tx_desc->pp21.data_size);
218db9d7d36SMaxime Chevallier else
2197b9c7d7dSMaxime Chevallier return le16_to_cpu(tx_desc->pp22.data_size);
220db9d7d36SMaxime Chevallier }
221db9d7d36SMaxime Chevallier
mvpp2_txdesc_size_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,size_t size)222db9d7d36SMaxime Chevallier static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
223db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc,
224db9d7d36SMaxime Chevallier size_t size)
225db9d7d36SMaxime Chevallier {
226db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2277b9c7d7dSMaxime Chevallier tx_desc->pp21.data_size = cpu_to_le16(size);
228db9d7d36SMaxime Chevallier else
2297b9c7d7dSMaxime Chevallier tx_desc->pp22.data_size = cpu_to_le16(size);
230db9d7d36SMaxime Chevallier }
231db9d7d36SMaxime Chevallier
mvpp2_txdesc_txq_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int txq)232db9d7d36SMaxime Chevallier static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
233db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc,
234db9d7d36SMaxime Chevallier unsigned int txq)
235db9d7d36SMaxime Chevallier {
236db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
237db9d7d36SMaxime Chevallier tx_desc->pp21.phys_txq = txq;
238db9d7d36SMaxime Chevallier else
239db9d7d36SMaxime Chevallier tx_desc->pp22.phys_txq = txq;
240db9d7d36SMaxime Chevallier }
241db9d7d36SMaxime Chevallier
mvpp2_txdesc_cmd_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int command)242db9d7d36SMaxime Chevallier static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
243db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc,
244db9d7d36SMaxime Chevallier unsigned int command)
245db9d7d36SMaxime Chevallier {
246db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2477b9c7d7dSMaxime Chevallier tx_desc->pp21.command = cpu_to_le32(command);
248db9d7d36SMaxime Chevallier else
2497b9c7d7dSMaxime Chevallier tx_desc->pp22.command = cpu_to_le32(command);
250db9d7d36SMaxime Chevallier }
251db9d7d36SMaxime Chevallier
mvpp2_txdesc_offset_get(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc)252db9d7d36SMaxime Chevallier static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
253db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc)
254db9d7d36SMaxime Chevallier {
255db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
256db9d7d36SMaxime Chevallier return tx_desc->pp21.packet_offset;
257db9d7d36SMaxime Chevallier else
258db9d7d36SMaxime Chevallier return tx_desc->pp22.packet_offset;
259db9d7d36SMaxime Chevallier }
260db9d7d36SMaxime Chevallier
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)261db9d7d36SMaxime Chevallier static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
262db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc)
263db9d7d36SMaxime Chevallier {
264db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2657b9c7d7dSMaxime Chevallier return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
266db9d7d36SMaxime Chevallier else
2677b9c7d7dSMaxime Chevallier return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
2687b9c7d7dSMaxime Chevallier MVPP2_DESC_DMA_MASK;
269db9d7d36SMaxime Chevallier }
270db9d7d36SMaxime Chevallier
mvpp2_rxdesc_cookie_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)271db9d7d36SMaxime Chevallier static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
272db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc)
273db9d7d36SMaxime Chevallier {
274db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2757b9c7d7dSMaxime Chevallier return le32_to_cpu(rx_desc->pp21.buf_cookie);
276db9d7d36SMaxime Chevallier else
2777b9c7d7dSMaxime Chevallier return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
2787b9c7d7dSMaxime Chevallier MVPP2_DESC_DMA_MASK;
279db9d7d36SMaxime Chevallier }
280db9d7d36SMaxime Chevallier
mvpp2_rxdesc_size_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)281db9d7d36SMaxime Chevallier static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
282db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc)
283db9d7d36SMaxime Chevallier {
284db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2857b9c7d7dSMaxime Chevallier return le16_to_cpu(rx_desc->pp21.data_size);
286db9d7d36SMaxime Chevallier else
2877b9c7d7dSMaxime Chevallier return le16_to_cpu(rx_desc->pp22.data_size);
288db9d7d36SMaxime Chevallier }
289db9d7d36SMaxime Chevallier
mvpp2_rxdesc_status_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)290db9d7d36SMaxime Chevallier static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
291db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc)
292db9d7d36SMaxime Chevallier {
293db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2947b9c7d7dSMaxime Chevallier return le32_to_cpu(rx_desc->pp21.status);
295db9d7d36SMaxime Chevallier else
2967b9c7d7dSMaxime Chevallier return le32_to_cpu(rx_desc->pp22.status);
297db9d7d36SMaxime Chevallier }
298db9d7d36SMaxime Chevallier
mvpp2_txq_inc_get(struct mvpp2_txq_pcpu * txq_pcpu)299db9d7d36SMaxime Chevallier static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
300db9d7d36SMaxime Chevallier {
301db9d7d36SMaxime Chevallier txq_pcpu->txq_get_index++;
302db9d7d36SMaxime Chevallier if (txq_pcpu->txq_get_index == txq_pcpu->size)
303db9d7d36SMaxime Chevallier txq_pcpu->txq_get_index = 0;
304db9d7d36SMaxime Chevallier }
305db9d7d36SMaxime Chevallier
mvpp2_txq_inc_put(struct mvpp2_port * port,struct mvpp2_txq_pcpu * txq_pcpu,void * data,struct mvpp2_tx_desc * tx_desc,enum mvpp2_tx_buf_type buf_type)306db9d7d36SMaxime Chevallier static void mvpp2_txq_inc_put(struct mvpp2_port *port,
307db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu,
308c2d6fe61SMatteo Croce void *data,
309c2d6fe61SMatteo Croce struct mvpp2_tx_desc *tx_desc,
310c2d6fe61SMatteo Croce enum mvpp2_tx_buf_type buf_type)
311db9d7d36SMaxime Chevallier {
312db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu_buf *tx_buf =
313db9d7d36SMaxime Chevallier txq_pcpu->buffs + txq_pcpu->txq_put_index;
314c2d6fe61SMatteo Croce tx_buf->type = buf_type;
315c2d6fe61SMatteo Croce if (buf_type == MVPP2_TYPE_SKB)
316c2d6fe61SMatteo Croce tx_buf->skb = data;
317c2d6fe61SMatteo Croce else
318c2d6fe61SMatteo Croce tx_buf->xdpf = data;
319db9d7d36SMaxime Chevallier tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
320db9d7d36SMaxime Chevallier tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
321db9d7d36SMaxime Chevallier mvpp2_txdesc_offset_get(port, tx_desc);
322db9d7d36SMaxime Chevallier txq_pcpu->txq_put_index++;
323db9d7d36SMaxime Chevallier if (txq_pcpu->txq_put_index == txq_pcpu->size)
324db9d7d36SMaxime Chevallier txq_pcpu->txq_put_index = 0;
325db9d7d36SMaxime Chevallier }
326db9d7d36SMaxime Chevallier
3277d04b0b1SMatteo Croce /* Get number of maximum RXQ */
mvpp2_get_nrxqs(struct mvpp2 * priv)3287d04b0b1SMatteo Croce static int mvpp2_get_nrxqs(struct mvpp2 *priv)
3297d04b0b1SMatteo Croce {
3307d04b0b1SMatteo Croce unsigned int nrxqs;
3317d04b0b1SMatteo Croce
332f704177eSStefan Chulski if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
3337d04b0b1SMatteo Croce return 1;
3347d04b0b1SMatteo Croce
3357d04b0b1SMatteo Croce /* According to the PPv2.2 datasheet and our experiments on
3367d04b0b1SMatteo Croce * PPv2.1, RX queues have an allocation granularity of 4 (when
3377d04b0b1SMatteo Croce * more than a single one on PPv2.2).
3387d04b0b1SMatteo Croce * Round up to nearest multiple of 4.
3397d04b0b1SMatteo Croce */
3407d04b0b1SMatteo Croce nrxqs = (num_possible_cpus() + 3) & ~0x3;
3417d04b0b1SMatteo Croce if (nrxqs > MVPP2_PORT_MAX_RXQ)
3427d04b0b1SMatteo Croce nrxqs = MVPP2_PORT_MAX_RXQ;
3437d04b0b1SMatteo Croce
3447d04b0b1SMatteo Croce return nrxqs;
3457d04b0b1SMatteo Croce }
3467d04b0b1SMatteo Croce
347db9d7d36SMaxime Chevallier /* Get number of physical egress port */
mvpp2_egress_port(struct mvpp2_port * port)348db9d7d36SMaxime Chevallier static inline int mvpp2_egress_port(struct mvpp2_port *port)
349db9d7d36SMaxime Chevallier {
350db9d7d36SMaxime Chevallier return MVPP2_MAX_TCONT + port->id;
351db9d7d36SMaxime Chevallier }
352db9d7d36SMaxime Chevallier
353db9d7d36SMaxime Chevallier /* Get number of physical TXQ */
mvpp2_txq_phys(int port,int txq)354db9d7d36SMaxime Chevallier static inline int mvpp2_txq_phys(int port, int txq)
355db9d7d36SMaxime Chevallier {
356db9d7d36SMaxime Chevallier return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
357db9d7d36SMaxime Chevallier }
358db9d7d36SMaxime Chevallier
359b27db227SMatteo Croce /* Returns a struct page if page_pool is set, otherwise a buffer */
mvpp2_frag_alloc(const struct mvpp2_bm_pool * pool,struct page_pool * page_pool)360b27db227SMatteo Croce static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
361b27db227SMatteo Croce struct page_pool *page_pool)
362db9d7d36SMaxime Chevallier {
363b27db227SMatteo Croce if (page_pool)
364b27db227SMatteo Croce return page_pool_dev_alloc_pages(page_pool);
365b27db227SMatteo Croce
366db9d7d36SMaxime Chevallier if (likely(pool->frag_size <= PAGE_SIZE))
367db9d7d36SMaxime Chevallier return netdev_alloc_frag(pool->frag_size);
368b27db227SMatteo Croce
369db9d7d36SMaxime Chevallier return kmalloc(pool->frag_size, GFP_ATOMIC);
370db9d7d36SMaxime Chevallier }
371db9d7d36SMaxime Chevallier
mvpp2_frag_free(const struct mvpp2_bm_pool * pool,struct page_pool * page_pool,void * data)372b27db227SMatteo Croce static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
373b27db227SMatteo Croce struct page_pool *page_pool, void *data)
374db9d7d36SMaxime Chevallier {
375b27db227SMatteo Croce if (page_pool)
376b27db227SMatteo Croce page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
377b27db227SMatteo Croce else if (likely(pool->frag_size <= PAGE_SIZE))
378db9d7d36SMaxime Chevallier skb_free_frag(data);
379db9d7d36SMaxime Chevallier else
380db9d7d36SMaxime Chevallier kfree(data);
381db9d7d36SMaxime Chevallier }
382db9d7d36SMaxime Chevallier
383db9d7d36SMaxime Chevallier /* Buffer Manager configuration routines */
384db9d7d36SMaxime Chevallier
385db9d7d36SMaxime Chevallier /* Create pool */
mvpp2_bm_pool_create(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int size)38613616361SMatteo Croce static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
387db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool, int size)
388db9d7d36SMaxime Chevallier {
389db9d7d36SMaxime Chevallier u32 val;
390db9d7d36SMaxime Chevallier
391db9d7d36SMaxime Chevallier /* Number of buffer pointers must be a multiple of 16, as per
392db9d7d36SMaxime Chevallier * hardware constraints
393db9d7d36SMaxime Chevallier */
394db9d7d36SMaxime Chevallier if (!IS_ALIGNED(size, 16))
395db9d7d36SMaxime Chevallier return -EINVAL;
396db9d7d36SMaxime Chevallier
3976af27a1dSStefan Chulski /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
398db9d7d36SMaxime Chevallier * bytes per buffer pointer
399db9d7d36SMaxime Chevallier */
400db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21)
401db9d7d36SMaxime Chevallier bm_pool->size_bytes = 2 * sizeof(u32) * size;
402db9d7d36SMaxime Chevallier else
403db9d7d36SMaxime Chevallier bm_pool->size_bytes = 2 * sizeof(u64) * size;
404db9d7d36SMaxime Chevallier
40513616361SMatteo Croce bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
406db9d7d36SMaxime Chevallier &bm_pool->dma_addr,
407db9d7d36SMaxime Chevallier GFP_KERNEL);
408db9d7d36SMaxime Chevallier if (!bm_pool->virt_addr)
409db9d7d36SMaxime Chevallier return -ENOMEM;
410db9d7d36SMaxime Chevallier
411db9d7d36SMaxime Chevallier if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
412db9d7d36SMaxime Chevallier MVPP2_BM_POOL_PTR_ALIGN)) {
41313616361SMatteo Croce dma_free_coherent(dev, bm_pool->size_bytes,
414db9d7d36SMaxime Chevallier bm_pool->virt_addr, bm_pool->dma_addr);
41513616361SMatteo Croce dev_err(dev, "BM pool %d is not %d bytes aligned\n",
416db9d7d36SMaxime Chevallier bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
417db9d7d36SMaxime Chevallier return -ENOMEM;
418db9d7d36SMaxime Chevallier }
419db9d7d36SMaxime Chevallier
420db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
421db9d7d36SMaxime Chevallier lower_32_bits(bm_pool->dma_addr));
422db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
423db9d7d36SMaxime Chevallier
424db9d7d36SMaxime Chevallier val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
425db9d7d36SMaxime Chevallier val |= MVPP2_BM_START_MASK;
426eb30b269SStefan Chulski
427eb30b269SStefan Chulski val &= ~MVPP2_BM_LOW_THRESH_MASK;
428eb30b269SStefan Chulski val &= ~MVPP2_BM_HIGH_THRESH_MASK;
429eb30b269SStefan Chulski
430eb30b269SStefan Chulski /* Set 8 Pools BPPI threshold for MVPP23 */
431eb30b269SStefan Chulski if (priv->hw_version == MVPP23) {
432eb30b269SStefan Chulski val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
433eb30b269SStefan Chulski val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
434eb30b269SStefan Chulski } else {
435eb30b269SStefan Chulski val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
436eb30b269SStefan Chulski val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
437eb30b269SStefan Chulski }
438eb30b269SStefan Chulski
439db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
440db9d7d36SMaxime Chevallier
441db9d7d36SMaxime Chevallier bm_pool->size = size;
442db9d7d36SMaxime Chevallier bm_pool->pkt_size = 0;
443db9d7d36SMaxime Chevallier bm_pool->buf_num = 0;
444db9d7d36SMaxime Chevallier
445db9d7d36SMaxime Chevallier return 0;
446db9d7d36SMaxime Chevallier }
447db9d7d36SMaxime Chevallier
448db9d7d36SMaxime Chevallier /* Set pool buffer size */
mvpp2_bm_pool_bufsize_set(struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int buf_size)449db9d7d36SMaxime Chevallier static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
450db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool,
451db9d7d36SMaxime Chevallier int buf_size)
452db9d7d36SMaxime Chevallier {
453db9d7d36SMaxime Chevallier u32 val;
454db9d7d36SMaxime Chevallier
455db9d7d36SMaxime Chevallier bm_pool->buf_size = buf_size;
456db9d7d36SMaxime Chevallier
457db9d7d36SMaxime Chevallier val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
458db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
459db9d7d36SMaxime Chevallier }
460db9d7d36SMaxime Chevallier
mvpp2_bm_bufs_get_addrs(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,dma_addr_t * dma_addr,phys_addr_t * phys_addr)461db9d7d36SMaxime Chevallier static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
462db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool,
463db9d7d36SMaxime Chevallier dma_addr_t *dma_addr,
464db9d7d36SMaxime Chevallier phys_addr_t *phys_addr)
465db9d7d36SMaxime Chevallier {
466e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
467db9d7d36SMaxime Chevallier
4681068549cSAntoine Tenart *dma_addr = mvpp2_thread_read(priv, thread,
469db9d7d36SMaxime Chevallier MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4701068549cSAntoine Tenart *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
471db9d7d36SMaxime Chevallier
472f704177eSStefan Chulski if (priv->hw_version >= MVPP22) {
473db9d7d36SMaxime Chevallier u32 val;
474db9d7d36SMaxime Chevallier u32 dma_addr_highbits, phys_addr_highbits;
475db9d7d36SMaxime Chevallier
4761068549cSAntoine Tenart val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
477db9d7d36SMaxime Chevallier dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
478db9d7d36SMaxime Chevallier phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
479db9d7d36SMaxime Chevallier MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
480db9d7d36SMaxime Chevallier
481db9d7d36SMaxime Chevallier if (sizeof(dma_addr_t) == 8)
482db9d7d36SMaxime Chevallier *dma_addr |= (u64)dma_addr_highbits << 32;
483db9d7d36SMaxime Chevallier
484db9d7d36SMaxime Chevallier if (sizeof(phys_addr_t) == 8)
485db9d7d36SMaxime Chevallier *phys_addr |= (u64)phys_addr_highbits << 32;
486db9d7d36SMaxime Chevallier }
487db9d7d36SMaxime Chevallier
488db9d7d36SMaxime Chevallier put_cpu();
489db9d7d36SMaxime Chevallier }
490db9d7d36SMaxime Chevallier
491db9d7d36SMaxime Chevallier /* Free all buffers from the pool */
mvpp2_bm_bufs_free(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int buf_num)492db9d7d36SMaxime Chevallier static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
493db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool, int buf_num)
494db9d7d36SMaxime Chevallier {
495b27db227SMatteo Croce struct page_pool *pp = NULL;
496db9d7d36SMaxime Chevallier int i;
497db9d7d36SMaxime Chevallier
498db9d7d36SMaxime Chevallier if (buf_num > bm_pool->buf_num) {
499db9d7d36SMaxime Chevallier WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
500db9d7d36SMaxime Chevallier bm_pool->id, buf_num);
501db9d7d36SMaxime Chevallier buf_num = bm_pool->buf_num;
502db9d7d36SMaxime Chevallier }
503db9d7d36SMaxime Chevallier
504b27db227SMatteo Croce if (priv->percpu_pools)
505b27db227SMatteo Croce pp = priv->page_pool[bm_pool->id];
506b27db227SMatteo Croce
507db9d7d36SMaxime Chevallier for (i = 0; i < buf_num; i++) {
508db9d7d36SMaxime Chevallier dma_addr_t buf_dma_addr;
509db9d7d36SMaxime Chevallier phys_addr_t buf_phys_addr;
510db9d7d36SMaxime Chevallier void *data;
511db9d7d36SMaxime Chevallier
512db9d7d36SMaxime Chevallier mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
513db9d7d36SMaxime Chevallier &buf_dma_addr, &buf_phys_addr);
514db9d7d36SMaxime Chevallier
515b27db227SMatteo Croce if (!pp)
516db9d7d36SMaxime Chevallier dma_unmap_single(dev, buf_dma_addr,
517db9d7d36SMaxime Chevallier bm_pool->buf_size, DMA_FROM_DEVICE);
518db9d7d36SMaxime Chevallier
519db9d7d36SMaxime Chevallier data = (void *)phys_to_virt(buf_phys_addr);
520db9d7d36SMaxime Chevallier if (!data)
521db9d7d36SMaxime Chevallier break;
522db9d7d36SMaxime Chevallier
523b27db227SMatteo Croce mvpp2_frag_free(bm_pool, pp, data);
524db9d7d36SMaxime Chevallier }
525db9d7d36SMaxime Chevallier
526db9d7d36SMaxime Chevallier /* Update BM driver with number of buffers removed from pool */
527db9d7d36SMaxime Chevallier bm_pool->buf_num -= i;
528db9d7d36SMaxime Chevallier }
529db9d7d36SMaxime Chevallier
530db9d7d36SMaxime Chevallier /* Check number of buffers in BM pool */
mvpp2_check_hw_buf_num(struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)531db9d7d36SMaxime Chevallier static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
532db9d7d36SMaxime Chevallier {
533db9d7d36SMaxime Chevallier int buf_num = 0;
534db9d7d36SMaxime Chevallier
535db9d7d36SMaxime Chevallier buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
536db9d7d36SMaxime Chevallier MVPP22_BM_POOL_PTRS_NUM_MASK;
537db9d7d36SMaxime Chevallier buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
538db9d7d36SMaxime Chevallier MVPP2_BM_BPPI_PTR_NUM_MASK;
539db9d7d36SMaxime Chevallier
540db9d7d36SMaxime Chevallier /* HW has one buffer ready which is not reflected in the counters */
541db9d7d36SMaxime Chevallier if (buf_num)
542db9d7d36SMaxime Chevallier buf_num += 1;
543db9d7d36SMaxime Chevallier
544db9d7d36SMaxime Chevallier return buf_num;
545db9d7d36SMaxime Chevallier }
546db9d7d36SMaxime Chevallier
547db9d7d36SMaxime Chevallier /* Cleanup pool */
mvpp2_bm_pool_destroy(struct device * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)54813616361SMatteo Croce static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
549db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool)
550db9d7d36SMaxime Chevallier {
551db9d7d36SMaxime Chevallier int buf_num;
552db9d7d36SMaxime Chevallier u32 val;
553db9d7d36SMaxime Chevallier
554db9d7d36SMaxime Chevallier buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
55513616361SMatteo Croce mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
556db9d7d36SMaxime Chevallier
557db9d7d36SMaxime Chevallier /* Check buffer counters after free */
558db9d7d36SMaxime Chevallier buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
559db9d7d36SMaxime Chevallier if (buf_num) {
560db9d7d36SMaxime Chevallier WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
561db9d7d36SMaxime Chevallier bm_pool->id, bm_pool->buf_num);
562db9d7d36SMaxime Chevallier return 0;
563db9d7d36SMaxime Chevallier }
564db9d7d36SMaxime Chevallier
565db9d7d36SMaxime Chevallier val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
566db9d7d36SMaxime Chevallier val |= MVPP2_BM_STOP_MASK;
567db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
568db9d7d36SMaxime Chevallier
5694e48978cSMatteo Croce if (priv->percpu_pools) {
570c2d6fe61SMatteo Croce page_pool_destroy(priv->page_pool[bm_pool->id]);
5714e48978cSMatteo Croce priv->page_pool[bm_pool->id] = NULL;
5724e48978cSMatteo Croce }
573c2d6fe61SMatteo Croce
57413616361SMatteo Croce dma_free_coherent(dev, bm_pool->size_bytes,
575db9d7d36SMaxime Chevallier bm_pool->virt_addr,
576db9d7d36SMaxime Chevallier bm_pool->dma_addr);
577db9d7d36SMaxime Chevallier return 0;
578db9d7d36SMaxime Chevallier }
579db9d7d36SMaxime Chevallier
mvpp2_bm_pools_init(struct device * dev,struct mvpp2 * priv)58013616361SMatteo Croce static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
581db9d7d36SMaxime Chevallier {
5827d04b0b1SMatteo Croce int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
583db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool;
584db9d7d36SMaxime Chevallier
5857d04b0b1SMatteo Croce if (priv->percpu_pools)
5867d04b0b1SMatteo Croce poolnum = mvpp2_get_nrxqs(priv) * 2;
5877d04b0b1SMatteo Croce
588db9d7d36SMaxime Chevallier /* Create all pools with maximum size */
589db9d7d36SMaxime Chevallier size = MVPP2_BM_POOL_SIZE_MAX;
5907d04b0b1SMatteo Croce for (i = 0; i < poolnum; i++) {
591db9d7d36SMaxime Chevallier bm_pool = &priv->bm_pools[i];
592db9d7d36SMaxime Chevallier bm_pool->id = i;
59313616361SMatteo Croce err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
594db9d7d36SMaxime Chevallier if (err)
595db9d7d36SMaxime Chevallier goto err_unroll_pools;
596db9d7d36SMaxime Chevallier mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
597db9d7d36SMaxime Chevallier }
598db9d7d36SMaxime Chevallier return 0;
599db9d7d36SMaxime Chevallier
600db9d7d36SMaxime Chevallier err_unroll_pools:
60113616361SMatteo Croce dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
602db9d7d36SMaxime Chevallier for (i = i - 1; i >= 0; i--)
60313616361SMatteo Croce mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
604db9d7d36SMaxime Chevallier return err;
605db9d7d36SMaxime Chevallier }
606db9d7d36SMaxime Chevallier
607eb30b269SStefan Chulski /* Routine enable PPv23 8 pool mode */
mvpp23_bm_set_8pool_mode(struct mvpp2 * priv)608eb30b269SStefan Chulski static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
609eb30b269SStefan Chulski {
610eb30b269SStefan Chulski int val;
611eb30b269SStefan Chulski
612eb30b269SStefan Chulski val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
613eb30b269SStefan Chulski val |= MVPP23_BM_8POOL_MODE;
614eb30b269SStefan Chulski mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
615eb30b269SStefan Chulski }
616eb30b269SStefan Chulski
61793872948SJenishkumar Maheshbhai Patel /* Cleanup pool before actual initialization in the OS */
mvpp2_bm_pool_cleanup(struct mvpp2 * priv,int pool_id)61893872948SJenishkumar Maheshbhai Patel static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
61993872948SJenishkumar Maheshbhai Patel {
62093872948SJenishkumar Maheshbhai Patel unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
62193872948SJenishkumar Maheshbhai Patel u32 val;
62293872948SJenishkumar Maheshbhai Patel int i;
62393872948SJenishkumar Maheshbhai Patel
62493872948SJenishkumar Maheshbhai Patel /* Drain the BM from all possible residues left by firmware */
62593872948SJenishkumar Maheshbhai Patel for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
62693872948SJenishkumar Maheshbhai Patel mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
62793872948SJenishkumar Maheshbhai Patel
62893872948SJenishkumar Maheshbhai Patel put_cpu();
62993872948SJenishkumar Maheshbhai Patel
63093872948SJenishkumar Maheshbhai Patel /* Stop the BM pool */
63193872948SJenishkumar Maheshbhai Patel val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
63293872948SJenishkumar Maheshbhai Patel val |= MVPP2_BM_STOP_MASK;
63393872948SJenishkumar Maheshbhai Patel mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
63493872948SJenishkumar Maheshbhai Patel }
63593872948SJenishkumar Maheshbhai Patel
mvpp2_bm_init(struct device * dev,struct mvpp2 * priv)63613616361SMatteo Croce static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
637db9d7d36SMaxime Chevallier {
638c2d6fe61SMatteo Croce enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
6397d04b0b1SMatteo Croce int i, err, poolnum = MVPP2_BM_POOLS_NUM;
640c2d6fe61SMatteo Croce struct mvpp2_port *port;
641db9d7d36SMaxime Chevallier
64293872948SJenishkumar Maheshbhai Patel if (priv->percpu_pools)
64393872948SJenishkumar Maheshbhai Patel poolnum = mvpp2_get_nrxqs(priv) * 2;
64493872948SJenishkumar Maheshbhai Patel
64593872948SJenishkumar Maheshbhai Patel /* Clean up the pool state in case it contains stale state */
64693872948SJenishkumar Maheshbhai Patel for (i = 0; i < poolnum; i++)
64793872948SJenishkumar Maheshbhai Patel mvpp2_bm_pool_cleanup(priv, i);
64893872948SJenishkumar Maheshbhai Patel
649b27db227SMatteo Croce if (priv->percpu_pools) {
650c2d6fe61SMatteo Croce for (i = 0; i < priv->port_count; i++) {
651c2d6fe61SMatteo Croce port = priv->port_list[i];
652c2d6fe61SMatteo Croce if (port->xdp_prog) {
653c2d6fe61SMatteo Croce dma_dir = DMA_BIDIRECTIONAL;
654c2d6fe61SMatteo Croce break;
655c2d6fe61SMatteo Croce }
656c2d6fe61SMatteo Croce }
657c2d6fe61SMatteo Croce
658b27db227SMatteo Croce for (i = 0; i < poolnum; i++) {
659b27db227SMatteo Croce /* the pool in use */
660b27db227SMatteo Croce int pn = i / (poolnum / 2);
661b27db227SMatteo Croce
662b27db227SMatteo Croce priv->page_pool[i] =
663b27db227SMatteo Croce mvpp2_create_page_pool(dev,
664b27db227SMatteo Croce mvpp2_pools[pn].buf_num,
665c2d6fe61SMatteo Croce mvpp2_pools[pn].pkt_size,
666c2d6fe61SMatteo Croce dma_dir);
6674e48978cSMatteo Croce if (IS_ERR(priv->page_pool[i])) {
6684e48978cSMatteo Croce int j;
6694e48978cSMatteo Croce
6704e48978cSMatteo Croce for (j = 0; j < i; j++) {
6714e48978cSMatteo Croce page_pool_destroy(priv->page_pool[j]);
6724e48978cSMatteo Croce priv->page_pool[j] = NULL;
6734e48978cSMatteo Croce }
674b27db227SMatteo Croce return PTR_ERR(priv->page_pool[i]);
675b27db227SMatteo Croce }
676b27db227SMatteo Croce }
6774e48978cSMatteo Croce }
6787d04b0b1SMatteo Croce
6797d04b0b1SMatteo Croce dev_info(dev, "using %d %s buffers\n", poolnum,
6807d04b0b1SMatteo Croce priv->percpu_pools ? "per-cpu" : "shared");
6817d04b0b1SMatteo Croce
6827d04b0b1SMatteo Croce for (i = 0; i < poolnum; i++) {
683db9d7d36SMaxime Chevallier /* Mask BM all interrupts */
684db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
685db9d7d36SMaxime Chevallier /* Clear BM cause register */
686db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
687db9d7d36SMaxime Chevallier }
688db9d7d36SMaxime Chevallier
689db9d7d36SMaxime Chevallier /* Allocate and initialize BM pools */
6907d04b0b1SMatteo Croce priv->bm_pools = devm_kcalloc(dev, poolnum,
691db9d7d36SMaxime Chevallier sizeof(*priv->bm_pools), GFP_KERNEL);
692db9d7d36SMaxime Chevallier if (!priv->bm_pools)
693db9d7d36SMaxime Chevallier return -ENOMEM;
694db9d7d36SMaxime Chevallier
695eb30b269SStefan Chulski if (priv->hw_version == MVPP23)
696eb30b269SStefan Chulski mvpp23_bm_set_8pool_mode(priv);
697eb30b269SStefan Chulski
69813616361SMatteo Croce err = mvpp2_bm_pools_init(dev, priv);
699db9d7d36SMaxime Chevallier if (err < 0)
700db9d7d36SMaxime Chevallier return err;
701db9d7d36SMaxime Chevallier return 0;
702db9d7d36SMaxime Chevallier }
703db9d7d36SMaxime Chevallier
mvpp2_setup_bm_pool(void)704db9d7d36SMaxime Chevallier static void mvpp2_setup_bm_pool(void)
705db9d7d36SMaxime Chevallier {
706db9d7d36SMaxime Chevallier /* Short pool */
707db9d7d36SMaxime Chevallier mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
708db9d7d36SMaxime Chevallier mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
709db9d7d36SMaxime Chevallier
710db9d7d36SMaxime Chevallier /* Long pool */
711db9d7d36SMaxime Chevallier mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
712db9d7d36SMaxime Chevallier mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
713db9d7d36SMaxime Chevallier
714db9d7d36SMaxime Chevallier /* Jumbo pool */
715db9d7d36SMaxime Chevallier mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
716db9d7d36SMaxime Chevallier mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
717db9d7d36SMaxime Chevallier }
718db9d7d36SMaxime Chevallier
719db9d7d36SMaxime Chevallier /* Attach long pool to rxq */
mvpp2_rxq_long_pool_set(struct mvpp2_port * port,int lrxq,int long_pool)720db9d7d36SMaxime Chevallier static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
721db9d7d36SMaxime Chevallier int lrxq, int long_pool)
722db9d7d36SMaxime Chevallier {
723db9d7d36SMaxime Chevallier u32 val, mask;
724db9d7d36SMaxime Chevallier int prxq;
725db9d7d36SMaxime Chevallier
726db9d7d36SMaxime Chevallier /* Get queue physical ID */
727db9d7d36SMaxime Chevallier prxq = port->rxqs[lrxq]->id;
728db9d7d36SMaxime Chevallier
729db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
730db9d7d36SMaxime Chevallier mask = MVPP21_RXQ_POOL_LONG_MASK;
731db9d7d36SMaxime Chevallier else
732db9d7d36SMaxime Chevallier mask = MVPP22_RXQ_POOL_LONG_MASK;
733db9d7d36SMaxime Chevallier
734db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
735db9d7d36SMaxime Chevallier val &= ~mask;
736db9d7d36SMaxime Chevallier val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
737db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
738db9d7d36SMaxime Chevallier }
739db9d7d36SMaxime Chevallier
740db9d7d36SMaxime Chevallier /* Attach short pool to rxq */
mvpp2_rxq_short_pool_set(struct mvpp2_port * port,int lrxq,int short_pool)741db9d7d36SMaxime Chevallier static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
742db9d7d36SMaxime Chevallier int lrxq, int short_pool)
743db9d7d36SMaxime Chevallier {
744db9d7d36SMaxime Chevallier u32 val, mask;
745db9d7d36SMaxime Chevallier int prxq;
746db9d7d36SMaxime Chevallier
747db9d7d36SMaxime Chevallier /* Get queue physical ID */
748db9d7d36SMaxime Chevallier prxq = port->rxqs[lrxq]->id;
749db9d7d36SMaxime Chevallier
750db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
751db9d7d36SMaxime Chevallier mask = MVPP21_RXQ_POOL_SHORT_MASK;
752db9d7d36SMaxime Chevallier else
753db9d7d36SMaxime Chevallier mask = MVPP22_RXQ_POOL_SHORT_MASK;
754db9d7d36SMaxime Chevallier
755db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
756db9d7d36SMaxime Chevallier val &= ~mask;
757db9d7d36SMaxime Chevallier val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
758db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
759db9d7d36SMaxime Chevallier }
760db9d7d36SMaxime Chevallier
mvpp2_buf_alloc(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,struct page_pool * page_pool,dma_addr_t * buf_dma_addr,phys_addr_t * buf_phys_addr,gfp_t gfp_mask)761db9d7d36SMaxime Chevallier static void *mvpp2_buf_alloc(struct mvpp2_port *port,
762db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool,
763b27db227SMatteo Croce struct page_pool *page_pool,
764db9d7d36SMaxime Chevallier dma_addr_t *buf_dma_addr,
765db9d7d36SMaxime Chevallier phys_addr_t *buf_phys_addr,
766db9d7d36SMaxime Chevallier gfp_t gfp_mask)
767db9d7d36SMaxime Chevallier {
768db9d7d36SMaxime Chevallier dma_addr_t dma_addr;
769b27db227SMatteo Croce struct page *page;
770db9d7d36SMaxime Chevallier void *data;
771db9d7d36SMaxime Chevallier
772b27db227SMatteo Croce data = mvpp2_frag_alloc(bm_pool, page_pool);
773db9d7d36SMaxime Chevallier if (!data)
774db9d7d36SMaxime Chevallier return NULL;
775db9d7d36SMaxime Chevallier
776b27db227SMatteo Croce if (page_pool) {
777b27db227SMatteo Croce page = (struct page *)data;
778b27db227SMatteo Croce dma_addr = page_pool_get_dma_addr(page);
779b27db227SMatteo Croce data = page_to_virt(page);
780b27db227SMatteo Croce } else {
781db9d7d36SMaxime Chevallier dma_addr = dma_map_single(port->dev->dev.parent, data,
782db9d7d36SMaxime Chevallier MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
783db9d7d36SMaxime Chevallier DMA_FROM_DEVICE);
784db9d7d36SMaxime Chevallier if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
785b27db227SMatteo Croce mvpp2_frag_free(bm_pool, NULL, data);
786db9d7d36SMaxime Chevallier return NULL;
787db9d7d36SMaxime Chevallier }
788b27db227SMatteo Croce }
789db9d7d36SMaxime Chevallier *buf_dma_addr = dma_addr;
790db9d7d36SMaxime Chevallier *buf_phys_addr = virt_to_phys(data);
791db9d7d36SMaxime Chevallier
792db9d7d36SMaxime Chevallier return data;
793db9d7d36SMaxime Chevallier }
794db9d7d36SMaxime Chevallier
7953bd17fdcSStefan Chulski /* Routine enable flow control for RXQs condition */
mvpp2_rxq_enable_fc(struct mvpp2_port * port)7963bd17fdcSStefan Chulski static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
7973bd17fdcSStefan Chulski {
7983bd17fdcSStefan Chulski int val, cm3_state, host_id, q;
7993bd17fdcSStefan Chulski int fq = port->first_rxq;
8003bd17fdcSStefan Chulski unsigned long flags;
8013bd17fdcSStefan Chulski
8023bd17fdcSStefan Chulski spin_lock_irqsave(&port->priv->mss_spinlock, flags);
8033bd17fdcSStefan Chulski
8043bd17fdcSStefan Chulski /* Remove Flow control enable bit to prevent race between FW and Kernel
8053bd17fdcSStefan Chulski * If Flow control was enabled, it would be re-enabled.
8063bd17fdcSStefan Chulski */
8073bd17fdcSStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
8083bd17fdcSStefan Chulski cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
8093bd17fdcSStefan Chulski val &= ~FLOW_CONTROL_ENABLE_BIT;
8103bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
8113bd17fdcSStefan Chulski
8123bd17fdcSStefan Chulski /* Set same Flow control for all RXQs */
8133bd17fdcSStefan Chulski for (q = 0; q < port->nrxqs; q++) {
8143bd17fdcSStefan Chulski /* Set stop and start Flow control RXQ thresholds */
8153bd17fdcSStefan Chulski val = MSS_THRESHOLD_START;
8163bd17fdcSStefan Chulski val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
8173bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
8183bd17fdcSStefan Chulski
8193bd17fdcSStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
8203bd17fdcSStefan Chulski /* Set RXQ port ID */
8213bd17fdcSStefan Chulski val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
8223bd17fdcSStefan Chulski val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
8233bd17fdcSStefan Chulski val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
8243bd17fdcSStefan Chulski + MSS_RXQ_ASS_HOSTID_OFFS));
8253bd17fdcSStefan Chulski
8263bd17fdcSStefan Chulski /* Calculate RXQ host ID:
8273bd17fdcSStefan Chulski * In Single queue mode: Host ID equal to Host ID used for
8283bd17fdcSStefan Chulski * shared RX interrupt
8293bd17fdcSStefan Chulski * In Multi queue mode: Host ID equal to number of
8303bd17fdcSStefan Chulski * RXQ ID / number of CoS queues
8313bd17fdcSStefan Chulski * In Single resource mode: Host ID always equal to 0
8323bd17fdcSStefan Chulski */
8333bd17fdcSStefan Chulski if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
8343bd17fdcSStefan Chulski host_id = port->nqvecs;
8353bd17fdcSStefan Chulski else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
8363bd17fdcSStefan Chulski host_id = q;
8373bd17fdcSStefan Chulski else
8383bd17fdcSStefan Chulski host_id = 0;
8393bd17fdcSStefan Chulski
8403bd17fdcSStefan Chulski /* Set RXQ host ID */
8413bd17fdcSStefan Chulski val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
8423bd17fdcSStefan Chulski + MSS_RXQ_ASS_HOSTID_OFFS));
8433bd17fdcSStefan Chulski
8443bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
8453bd17fdcSStefan Chulski }
8463bd17fdcSStefan Chulski
8473bd17fdcSStefan Chulski /* Notify Firmware that Flow control config space ready for update */
8483bd17fdcSStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
8493bd17fdcSStefan Chulski val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
8503bd17fdcSStefan Chulski val |= cm3_state;
8513bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
8523bd17fdcSStefan Chulski
8533bd17fdcSStefan Chulski spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
8543bd17fdcSStefan Chulski }
8553bd17fdcSStefan Chulski
8563bd17fdcSStefan Chulski /* Routine disable flow control for RXQs condition */
mvpp2_rxq_disable_fc(struct mvpp2_port * port)8573bd17fdcSStefan Chulski static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
8583bd17fdcSStefan Chulski {
8593bd17fdcSStefan Chulski int val, cm3_state, q;
8603bd17fdcSStefan Chulski unsigned long flags;
8613bd17fdcSStefan Chulski int fq = port->first_rxq;
8623bd17fdcSStefan Chulski
8633bd17fdcSStefan Chulski spin_lock_irqsave(&port->priv->mss_spinlock, flags);
8643bd17fdcSStefan Chulski
8653bd17fdcSStefan Chulski /* Remove Flow control enable bit to prevent race between FW and Kernel
8663bd17fdcSStefan Chulski * If Flow control was enabled, it would be re-enabled.
8673bd17fdcSStefan Chulski */
8683bd17fdcSStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
8693bd17fdcSStefan Chulski cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
8703bd17fdcSStefan Chulski val &= ~FLOW_CONTROL_ENABLE_BIT;
8713bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
8723bd17fdcSStefan Chulski
8733bd17fdcSStefan Chulski /* Disable Flow control for all RXQs */
8743bd17fdcSStefan Chulski for (q = 0; q < port->nrxqs; q++) {
8753bd17fdcSStefan Chulski /* Set threshold 0 to disable Flow control */
8763bd17fdcSStefan Chulski val = 0;
8773bd17fdcSStefan Chulski val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
8783bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
8793bd17fdcSStefan Chulski
8803bd17fdcSStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
8813bd17fdcSStefan Chulski
8823bd17fdcSStefan Chulski val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
8833bd17fdcSStefan Chulski
8843bd17fdcSStefan Chulski val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
8853bd17fdcSStefan Chulski + MSS_RXQ_ASS_HOSTID_OFFS));
8863bd17fdcSStefan Chulski
8873bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
8883bd17fdcSStefan Chulski }
8893bd17fdcSStefan Chulski
8903bd17fdcSStefan Chulski /* Notify Firmware that Flow control config space ready for update */
8913bd17fdcSStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
8923bd17fdcSStefan Chulski val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
8933bd17fdcSStefan Chulski val |= cm3_state;
8943bd17fdcSStefan Chulski mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
8953bd17fdcSStefan Chulski
8963bd17fdcSStefan Chulski spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
8973bd17fdcSStefan Chulski }
8983bd17fdcSStefan Chulski
89976055831SStefan Chulski /* Routine disable/enable flow control for BM pool condition */
mvpp2_bm_pool_update_fc(struct mvpp2_port * port,struct mvpp2_bm_pool * pool,bool en)90076055831SStefan Chulski static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
90176055831SStefan Chulski struct mvpp2_bm_pool *pool,
90276055831SStefan Chulski bool en)
90376055831SStefan Chulski {
90476055831SStefan Chulski int val, cm3_state;
90576055831SStefan Chulski unsigned long flags;
90676055831SStefan Chulski
90776055831SStefan Chulski spin_lock_irqsave(&port->priv->mss_spinlock, flags);
90876055831SStefan Chulski
90976055831SStefan Chulski /* Remove Flow control enable bit to prevent race between FW and Kernel
91076055831SStefan Chulski * If Flow control were enabled, it would be re-enabled.
91176055831SStefan Chulski */
91276055831SStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
91376055831SStefan Chulski cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
91476055831SStefan Chulski val &= ~FLOW_CONTROL_ENABLE_BIT;
91576055831SStefan Chulski mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
91676055831SStefan Chulski
91776055831SStefan Chulski /* Check if BM pool should be enabled/disable */
91876055831SStefan Chulski if (en) {
91976055831SStefan Chulski /* Set BM pool start and stop thresholds per port */
92076055831SStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
92176055831SStefan Chulski val |= MSS_BUF_POOL_PORT_OFFS(port->id);
92276055831SStefan Chulski val &= ~MSS_BUF_POOL_START_MASK;
92376055831SStefan Chulski val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
92476055831SStefan Chulski val &= ~MSS_BUF_POOL_STOP_MASK;
92576055831SStefan Chulski val |= MSS_THRESHOLD_STOP;
92676055831SStefan Chulski mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
92776055831SStefan Chulski } else {
92876055831SStefan Chulski /* Remove BM pool from the port */
92976055831SStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
93076055831SStefan Chulski val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
93176055831SStefan Chulski
93276055831SStefan Chulski /* Zero BM pool start and stop thresholds to disable pool
93376055831SStefan Chulski * flow control if pool empty (not used by any port)
93476055831SStefan Chulski */
93576055831SStefan Chulski if (!pool->buf_num) {
93676055831SStefan Chulski val &= ~MSS_BUF_POOL_START_MASK;
93776055831SStefan Chulski val &= ~MSS_BUF_POOL_STOP_MASK;
93876055831SStefan Chulski }
93976055831SStefan Chulski
94076055831SStefan Chulski mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
94176055831SStefan Chulski }
94276055831SStefan Chulski
94376055831SStefan Chulski /* Notify Firmware that Flow control config space ready for update */
94476055831SStefan Chulski val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
94576055831SStefan Chulski val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
94676055831SStefan Chulski val |= cm3_state;
94776055831SStefan Chulski mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
94876055831SStefan Chulski
94976055831SStefan Chulski spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
95076055831SStefan Chulski }
95176055831SStefan Chulski
9523a616b92SStefan Chulski /* disable/enable flow control for BM pool on all ports */
mvpp2_bm_pool_update_priv_fc(struct mvpp2 * priv,bool en)9533a616b92SStefan Chulski static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
9543a616b92SStefan Chulski {
9553a616b92SStefan Chulski struct mvpp2_port *port;
956eb2926caSDan Carpenter int i, j;
9573a616b92SStefan Chulski
9583a616b92SStefan Chulski for (i = 0; i < priv->port_count; i++) {
9593a616b92SStefan Chulski port = priv->port_list[i];
9603a616b92SStefan Chulski if (port->priv->percpu_pools) {
961eb2926caSDan Carpenter for (j = 0; j < port->nrxqs; j++)
962eb2926caSDan Carpenter mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
9633a616b92SStefan Chulski port->tx_fc & en);
9643a616b92SStefan Chulski } else {
9653a616b92SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
9663a616b92SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
9673a616b92SStefan Chulski }
9683a616b92SStefan Chulski }
9693a616b92SStefan Chulski }
9703a616b92SStefan Chulski
mvpp2_enable_global_fc(struct mvpp2 * priv)9719ca5e767SStefan Chulski static int mvpp2_enable_global_fc(struct mvpp2 *priv)
9729ca5e767SStefan Chulski {
9739ca5e767SStefan Chulski int val, timeout = 0;
9749ca5e767SStefan Chulski
9759ca5e767SStefan Chulski /* Enable global flow control. In this stage global
9769ca5e767SStefan Chulski * flow control enabled, but still disabled per port.
9779ca5e767SStefan Chulski */
9789ca5e767SStefan Chulski val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
9799ca5e767SStefan Chulski val |= FLOW_CONTROL_ENABLE_BIT;
9809ca5e767SStefan Chulski mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
9819ca5e767SStefan Chulski
9829ca5e767SStefan Chulski /* Check if Firmware running and disable FC if not*/
9839ca5e767SStefan Chulski val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
9849ca5e767SStefan Chulski mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
9859ca5e767SStefan Chulski
9869ca5e767SStefan Chulski while (timeout < MSS_FC_MAX_TIMEOUT) {
9879ca5e767SStefan Chulski val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
9889ca5e767SStefan Chulski
9899ca5e767SStefan Chulski if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
9909ca5e767SStefan Chulski return 0;
9919ca5e767SStefan Chulski usleep_range(10, 20);
9929ca5e767SStefan Chulski timeout++;
9939ca5e767SStefan Chulski }
9949ca5e767SStefan Chulski
9959ca5e767SStefan Chulski priv->global_tx_fc = false;
9969ca5e767SStefan Chulski return -EOPNOTSUPP;
9979ca5e767SStefan Chulski }
9989ca5e767SStefan Chulski
999db9d7d36SMaxime Chevallier /* Release buffer to BM */
mvpp2_bm_pool_put(struct mvpp2_port * port,int pool,dma_addr_t buf_dma_addr,phys_addr_t buf_phys_addr)1000db9d7d36SMaxime Chevallier static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
1001db9d7d36SMaxime Chevallier dma_addr_t buf_dma_addr,
1002db9d7d36SMaxime Chevallier phys_addr_t buf_phys_addr)
1003db9d7d36SMaxime Chevallier {
1004e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
1005e531f767SAntoine Tenart unsigned long flags = 0;
1006e531f767SAntoine Tenart
1007e531f767SAntoine Tenart if (test_bit(thread, &port->priv->lock_map))
1008e531f767SAntoine Tenart spin_lock_irqsave(&port->bm_lock[thread], flags);
1009db9d7d36SMaxime Chevallier
1010f704177eSStefan Chulski if (port->priv->hw_version >= MVPP22) {
1011db9d7d36SMaxime Chevallier u32 val = 0;
1012db9d7d36SMaxime Chevallier
1013db9d7d36SMaxime Chevallier if (sizeof(dma_addr_t) == 8)
1014db9d7d36SMaxime Chevallier val |= upper_32_bits(buf_dma_addr) &
1015db9d7d36SMaxime Chevallier MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
1016db9d7d36SMaxime Chevallier
1017db9d7d36SMaxime Chevallier if (sizeof(phys_addr_t) == 8)
1018db9d7d36SMaxime Chevallier val |= (upper_32_bits(buf_phys_addr)
1019db9d7d36SMaxime Chevallier << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
1020db9d7d36SMaxime Chevallier MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
1021db9d7d36SMaxime Chevallier
10221068549cSAntoine Tenart mvpp2_thread_write_relaxed(port->priv, thread,
1023db9d7d36SMaxime Chevallier MVPP22_BM_ADDR_HIGH_RLS_REG, val);
1024db9d7d36SMaxime Chevallier }
1025db9d7d36SMaxime Chevallier
1026db9d7d36SMaxime Chevallier /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
1027db9d7d36SMaxime Chevallier * returned in the "cookie" field of the RX
1028db9d7d36SMaxime Chevallier * descriptor. Instead of storing the virtual address, we
1029db9d7d36SMaxime Chevallier * store the physical address
1030db9d7d36SMaxime Chevallier */
10311068549cSAntoine Tenart mvpp2_thread_write_relaxed(port->priv, thread,
1032db9d7d36SMaxime Chevallier MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
10331068549cSAntoine Tenart mvpp2_thread_write_relaxed(port->priv, thread,
1034db9d7d36SMaxime Chevallier MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
1035db9d7d36SMaxime Chevallier
1036e531f767SAntoine Tenart if (test_bit(thread, &port->priv->lock_map))
1037e531f767SAntoine Tenart spin_unlock_irqrestore(&port->bm_lock[thread], flags);
1038e531f767SAntoine Tenart
1039db9d7d36SMaxime Chevallier put_cpu();
1040db9d7d36SMaxime Chevallier }
1041db9d7d36SMaxime Chevallier
1042db9d7d36SMaxime Chevallier /* Allocate buffers for the pool */
mvpp2_bm_bufs_add(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,int buf_num)1043db9d7d36SMaxime Chevallier static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
1044db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool, int buf_num)
1045db9d7d36SMaxime Chevallier {
1046db9d7d36SMaxime Chevallier int i, buf_size, total_size;
1047db9d7d36SMaxime Chevallier dma_addr_t dma_addr;
1048db9d7d36SMaxime Chevallier phys_addr_t phys_addr;
1049b27db227SMatteo Croce struct page_pool *pp = NULL;
1050db9d7d36SMaxime Chevallier void *buf;
1051db9d7d36SMaxime Chevallier
10527d04b0b1SMatteo Croce if (port->priv->percpu_pools &&
10537d04b0b1SMatteo Croce bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
10547d04b0b1SMatteo Croce netdev_err(port->dev,
10557d04b0b1SMatteo Croce "attempted to use jumbo frames with per-cpu pools");
10567d04b0b1SMatteo Croce return 0;
10577d04b0b1SMatteo Croce }
10587d04b0b1SMatteo Croce
1059db9d7d36SMaxime Chevallier buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
1060db9d7d36SMaxime Chevallier total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
1061db9d7d36SMaxime Chevallier
1062db9d7d36SMaxime Chevallier if (buf_num < 0 ||
1063db9d7d36SMaxime Chevallier (buf_num + bm_pool->buf_num > bm_pool->size)) {
1064db9d7d36SMaxime Chevallier netdev_err(port->dev,
1065db9d7d36SMaxime Chevallier "cannot allocate %d buffers for pool %d\n",
1066db9d7d36SMaxime Chevallier buf_num, bm_pool->id);
1067db9d7d36SMaxime Chevallier return 0;
1068db9d7d36SMaxime Chevallier }
1069db9d7d36SMaxime Chevallier
1070b27db227SMatteo Croce if (port->priv->percpu_pools)
1071b27db227SMatteo Croce pp = port->priv->page_pool[bm_pool->id];
1072db9d7d36SMaxime Chevallier for (i = 0; i < buf_num; i++) {
1073b27db227SMatteo Croce buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
1074db9d7d36SMaxime Chevallier &phys_addr, GFP_KERNEL);
1075db9d7d36SMaxime Chevallier if (!buf)
1076db9d7d36SMaxime Chevallier break;
1077db9d7d36SMaxime Chevallier
1078db9d7d36SMaxime Chevallier mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
1079db9d7d36SMaxime Chevallier phys_addr);
1080db9d7d36SMaxime Chevallier }
1081db9d7d36SMaxime Chevallier
1082db9d7d36SMaxime Chevallier /* Update BM driver with number of buffers added to pool */
1083db9d7d36SMaxime Chevallier bm_pool->buf_num += i;
1084db9d7d36SMaxime Chevallier
1085db9d7d36SMaxime Chevallier netdev_dbg(port->dev,
1086db9d7d36SMaxime Chevallier "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
1087db9d7d36SMaxime Chevallier bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
1088db9d7d36SMaxime Chevallier
1089db9d7d36SMaxime Chevallier netdev_dbg(port->dev,
1090db9d7d36SMaxime Chevallier "pool %d: %d of %d buffers added\n",
1091db9d7d36SMaxime Chevallier bm_pool->id, i, buf_num);
1092db9d7d36SMaxime Chevallier return i;
1093db9d7d36SMaxime Chevallier }
1094db9d7d36SMaxime Chevallier
1095db9d7d36SMaxime Chevallier /* Notify the driver that BM pool is being used as specific type and return the
1096db9d7d36SMaxime Chevallier * pool pointer on success
1097db9d7d36SMaxime Chevallier */
1098db9d7d36SMaxime Chevallier static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port * port,unsigned pool,int pkt_size)1099db9d7d36SMaxime Chevallier mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
1100db9d7d36SMaxime Chevallier {
1101db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1102db9d7d36SMaxime Chevallier int num;
1103db9d7d36SMaxime Chevallier
11047d04b0b1SMatteo Croce if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
11057d04b0b1SMatteo Croce (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
11067d04b0b1SMatteo Croce netdev_err(port->dev, "Invalid pool %d\n", pool);
11077d04b0b1SMatteo Croce return NULL;
11087d04b0b1SMatteo Croce }
11097d04b0b1SMatteo Croce
11107d04b0b1SMatteo Croce /* Allocate buffers in case BM pool is used as long pool, but packet
11117d04b0b1SMatteo Croce * size doesn't match MTU or BM pool hasn't being used yet
11127d04b0b1SMatteo Croce */
11137d04b0b1SMatteo Croce if (new_pool->pkt_size == 0) {
11147d04b0b1SMatteo Croce int pkts_num;
11157d04b0b1SMatteo Croce
11167d04b0b1SMatteo Croce /* Set default buffer number or free all the buffers in case
11177d04b0b1SMatteo Croce * the pool is not empty
11187d04b0b1SMatteo Croce */
11197d04b0b1SMatteo Croce pkts_num = new_pool->buf_num;
11207d04b0b1SMatteo Croce if (pkts_num == 0) {
11217d04b0b1SMatteo Croce if (port->priv->percpu_pools) {
11227d04b0b1SMatteo Croce if (pool < port->nrxqs)
11237d04b0b1SMatteo Croce pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
11247d04b0b1SMatteo Croce else
11257d04b0b1SMatteo Croce pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
11267d04b0b1SMatteo Croce } else {
11277d04b0b1SMatteo Croce pkts_num = mvpp2_pools[pool].buf_num;
11287d04b0b1SMatteo Croce }
11297d04b0b1SMatteo Croce } else {
11307d04b0b1SMatteo Croce mvpp2_bm_bufs_free(port->dev->dev.parent,
11317d04b0b1SMatteo Croce port->priv, new_pool, pkts_num);
11327d04b0b1SMatteo Croce }
11337d04b0b1SMatteo Croce
11347d04b0b1SMatteo Croce new_pool->pkt_size = pkt_size;
11357d04b0b1SMatteo Croce new_pool->frag_size =
11367d04b0b1SMatteo Croce SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
11377d04b0b1SMatteo Croce MVPP2_SKB_SHINFO_SIZE;
11387d04b0b1SMatteo Croce
11397d04b0b1SMatteo Croce /* Allocate buffers for this pool */
11407d04b0b1SMatteo Croce num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
11417d04b0b1SMatteo Croce if (num != pkts_num) {
11427d04b0b1SMatteo Croce WARN(1, "pool %d: %d of %d allocated\n",
11437d04b0b1SMatteo Croce new_pool->id, num, pkts_num);
11447d04b0b1SMatteo Croce return NULL;
11457d04b0b1SMatteo Croce }
11467d04b0b1SMatteo Croce }
11477d04b0b1SMatteo Croce
11487d04b0b1SMatteo Croce mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
11497d04b0b1SMatteo Croce MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
11507d04b0b1SMatteo Croce
11517d04b0b1SMatteo Croce return new_pool;
11527d04b0b1SMatteo Croce }
11537d04b0b1SMatteo Croce
11547d04b0b1SMatteo Croce static struct mvpp2_bm_pool *
mvpp2_bm_pool_use_percpu(struct mvpp2_port * port,int type,unsigned int pool,int pkt_size)11557d04b0b1SMatteo Croce mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
11567d04b0b1SMatteo Croce unsigned int pool, int pkt_size)
11577d04b0b1SMatteo Croce {
11587d04b0b1SMatteo Croce struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
11597d04b0b1SMatteo Croce int num;
11607d04b0b1SMatteo Croce
11617d04b0b1SMatteo Croce if (pool > port->nrxqs * 2) {
1162db9d7d36SMaxime Chevallier netdev_err(port->dev, "Invalid pool %d\n", pool);
1163db9d7d36SMaxime Chevallier return NULL;
1164db9d7d36SMaxime Chevallier }
1165db9d7d36SMaxime Chevallier
1166db9d7d36SMaxime Chevallier /* Allocate buffers in case BM pool is used as long pool, but packet
1167db9d7d36SMaxime Chevallier * size doesn't match MTU or BM pool hasn't being used yet
1168db9d7d36SMaxime Chevallier */
1169db9d7d36SMaxime Chevallier if (new_pool->pkt_size == 0) {
1170db9d7d36SMaxime Chevallier int pkts_num;
1171db9d7d36SMaxime Chevallier
1172db9d7d36SMaxime Chevallier /* Set default buffer number or free all the buffers in case
1173db9d7d36SMaxime Chevallier * the pool is not empty
1174db9d7d36SMaxime Chevallier */
1175db9d7d36SMaxime Chevallier pkts_num = new_pool->buf_num;
1176db9d7d36SMaxime Chevallier if (pkts_num == 0)
11777d04b0b1SMatteo Croce pkts_num = mvpp2_pools[type].buf_num;
1178db9d7d36SMaxime Chevallier else
1179db9d7d36SMaxime Chevallier mvpp2_bm_bufs_free(port->dev->dev.parent,
1180db9d7d36SMaxime Chevallier port->priv, new_pool, pkts_num);
1181db9d7d36SMaxime Chevallier
1182db9d7d36SMaxime Chevallier new_pool->pkt_size = pkt_size;
1183db9d7d36SMaxime Chevallier new_pool->frag_size =
1184db9d7d36SMaxime Chevallier SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1185db9d7d36SMaxime Chevallier MVPP2_SKB_SHINFO_SIZE;
1186db9d7d36SMaxime Chevallier
1187db9d7d36SMaxime Chevallier /* Allocate buffers for this pool */
1188db9d7d36SMaxime Chevallier num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1189db9d7d36SMaxime Chevallier if (num != pkts_num) {
1190db9d7d36SMaxime Chevallier WARN(1, "pool %d: %d of %d allocated\n",
1191db9d7d36SMaxime Chevallier new_pool->id, num, pkts_num);
1192db9d7d36SMaxime Chevallier return NULL;
1193db9d7d36SMaxime Chevallier }
1194db9d7d36SMaxime Chevallier }
1195db9d7d36SMaxime Chevallier
1196db9d7d36SMaxime Chevallier mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1197db9d7d36SMaxime Chevallier MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1198db9d7d36SMaxime Chevallier
1199db9d7d36SMaxime Chevallier return new_pool;
1200db9d7d36SMaxime Chevallier }
1201db9d7d36SMaxime Chevallier
12027d04b0b1SMatteo Croce /* Initialize pools for swf, shared buffers variant */
mvpp2_swf_bm_pool_init_shared(struct mvpp2_port * port)12037d04b0b1SMatteo Croce static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
1204db9d7d36SMaxime Chevallier {
1205db9d7d36SMaxime Chevallier enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
12067d04b0b1SMatteo Croce int rxq;
1207db9d7d36SMaxime Chevallier
1208db9d7d36SMaxime Chevallier /* If port pkt_size is higher than 1518B:
1209db9d7d36SMaxime Chevallier * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1210db9d7d36SMaxime Chevallier * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1211db9d7d36SMaxime Chevallier */
1212db9d7d36SMaxime Chevallier if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1213db9d7d36SMaxime Chevallier long_log_pool = MVPP2_BM_JUMBO;
1214db9d7d36SMaxime Chevallier short_log_pool = MVPP2_BM_LONG;
1215db9d7d36SMaxime Chevallier } else {
1216db9d7d36SMaxime Chevallier long_log_pool = MVPP2_BM_LONG;
1217db9d7d36SMaxime Chevallier short_log_pool = MVPP2_BM_SHORT;
1218db9d7d36SMaxime Chevallier }
1219db9d7d36SMaxime Chevallier
1220db9d7d36SMaxime Chevallier if (!port->pool_long) {
1221db9d7d36SMaxime Chevallier port->pool_long =
1222db9d7d36SMaxime Chevallier mvpp2_bm_pool_use(port, long_log_pool,
1223db9d7d36SMaxime Chevallier mvpp2_pools[long_log_pool].pkt_size);
1224db9d7d36SMaxime Chevallier if (!port->pool_long)
1225db9d7d36SMaxime Chevallier return -ENOMEM;
1226db9d7d36SMaxime Chevallier
1227db9d7d36SMaxime Chevallier port->pool_long->port_map |= BIT(port->id);
1228db9d7d36SMaxime Chevallier
1229db9d7d36SMaxime Chevallier for (rxq = 0; rxq < port->nrxqs; rxq++)
1230db9d7d36SMaxime Chevallier mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
1231db9d7d36SMaxime Chevallier }
1232db9d7d36SMaxime Chevallier
1233db9d7d36SMaxime Chevallier if (!port->pool_short) {
1234db9d7d36SMaxime Chevallier port->pool_short =
1235db9d7d36SMaxime Chevallier mvpp2_bm_pool_use(port, short_log_pool,
1236db9d7d36SMaxime Chevallier mvpp2_pools[short_log_pool].pkt_size);
1237db9d7d36SMaxime Chevallier if (!port->pool_short)
1238db9d7d36SMaxime Chevallier return -ENOMEM;
1239db9d7d36SMaxime Chevallier
1240db9d7d36SMaxime Chevallier port->pool_short->port_map |= BIT(port->id);
1241db9d7d36SMaxime Chevallier
1242db9d7d36SMaxime Chevallier for (rxq = 0; rxq < port->nrxqs; rxq++)
1243db9d7d36SMaxime Chevallier mvpp2_rxq_short_pool_set(port, rxq,
1244db9d7d36SMaxime Chevallier port->pool_short->id);
1245db9d7d36SMaxime Chevallier }
1246db9d7d36SMaxime Chevallier
1247db9d7d36SMaxime Chevallier return 0;
1248db9d7d36SMaxime Chevallier }
1249db9d7d36SMaxime Chevallier
12507d04b0b1SMatteo Croce /* Initialize pools for swf, percpu buffers variant */
mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port * port)12517d04b0b1SMatteo Croce static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
12527d04b0b1SMatteo Croce {
1253136bcd84SMatteo Croce struct mvpp2_bm_pool *bm_pool;
12547d04b0b1SMatteo Croce int i;
12557d04b0b1SMatteo Croce
12567d04b0b1SMatteo Croce for (i = 0; i < port->nrxqs; i++) {
1257136bcd84SMatteo Croce bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
12587d04b0b1SMatteo Croce mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1259136bcd84SMatteo Croce if (!bm_pool)
12607d04b0b1SMatteo Croce return -ENOMEM;
12617d04b0b1SMatteo Croce
1262136bcd84SMatteo Croce bm_pool->port_map |= BIT(port->id);
1263136bcd84SMatteo Croce mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
12647d04b0b1SMatteo Croce }
12657d04b0b1SMatteo Croce
12667d04b0b1SMatteo Croce for (i = 0; i < port->nrxqs; i++) {
1267136bcd84SMatteo Croce bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
12687d04b0b1SMatteo Croce mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1269136bcd84SMatteo Croce if (!bm_pool)
12707d04b0b1SMatteo Croce return -ENOMEM;
12717d04b0b1SMatteo Croce
1272136bcd84SMatteo Croce bm_pool->port_map |= BIT(port->id);
1273136bcd84SMatteo Croce mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
12747d04b0b1SMatteo Croce }
12757d04b0b1SMatteo Croce
12767d04b0b1SMatteo Croce port->pool_long = NULL;
12777d04b0b1SMatteo Croce port->pool_short = NULL;
12787d04b0b1SMatteo Croce
12797d04b0b1SMatteo Croce return 0;
12807d04b0b1SMatteo Croce }
12817d04b0b1SMatteo Croce
mvpp2_swf_bm_pool_init(struct mvpp2_port * port)12827d04b0b1SMatteo Croce static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
12837d04b0b1SMatteo Croce {
12847d04b0b1SMatteo Croce if (port->priv->percpu_pools)
12857d04b0b1SMatteo Croce return mvpp2_swf_bm_pool_init_percpu(port);
12867d04b0b1SMatteo Croce else
12877d04b0b1SMatteo Croce return mvpp2_swf_bm_pool_init_shared(port);
12887d04b0b1SMatteo Croce }
12897d04b0b1SMatteo Croce
mvpp2_set_hw_csum(struct mvpp2_port * port,enum mvpp2_bm_pool_log_num new_long_pool)1290d66503c4SMatteo Croce static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1291d66503c4SMatteo Croce enum mvpp2_bm_pool_log_num new_long_pool)
1292d66503c4SMatteo Croce {
1293d66503c4SMatteo Croce const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1294d66503c4SMatteo Croce
1295d66503c4SMatteo Croce /* Update L4 checksum when jumbo enable/disable on port.
1296d66503c4SMatteo Croce * Only port 0 supports hardware checksum offload due to
1297d66503c4SMatteo Croce * the Tx FIFO size limitation.
1298d66503c4SMatteo Croce * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1299d66503c4SMatteo Croce * has 7 bits, so the maximum L3 offset is 128.
1300d66503c4SMatteo Croce */
1301d66503c4SMatteo Croce if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1302d66503c4SMatteo Croce port->dev->features &= ~csums;
1303d66503c4SMatteo Croce port->dev->hw_features &= ~csums;
1304d66503c4SMatteo Croce } else {
1305d66503c4SMatteo Croce port->dev->features |= csums;
1306d66503c4SMatteo Croce port->dev->hw_features |= csums;
1307d66503c4SMatteo Croce }
1308d66503c4SMatteo Croce }
1309d66503c4SMatteo Croce
mvpp2_bm_update_mtu(struct net_device * dev,int mtu)1310db9d7d36SMaxime Chevallier static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1311db9d7d36SMaxime Chevallier {
1312db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
1313db9d7d36SMaxime Chevallier enum mvpp2_bm_pool_log_num new_long_pool;
1314db9d7d36SMaxime Chevallier int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1315db9d7d36SMaxime Chevallier
13167d04b0b1SMatteo Croce if (port->priv->percpu_pools)
13177d04b0b1SMatteo Croce goto out_set;
13187d04b0b1SMatteo Croce
1319db9d7d36SMaxime Chevallier /* If port MTU is higher than 1518B:
1320db9d7d36SMaxime Chevallier * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1321db9d7d36SMaxime Chevallier * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1322db9d7d36SMaxime Chevallier */
1323db9d7d36SMaxime Chevallier if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1324db9d7d36SMaxime Chevallier new_long_pool = MVPP2_BM_JUMBO;
1325db9d7d36SMaxime Chevallier else
1326db9d7d36SMaxime Chevallier new_long_pool = MVPP2_BM_LONG;
1327db9d7d36SMaxime Chevallier
1328db9d7d36SMaxime Chevallier if (new_long_pool != port->pool_long->id) {
132976055831SStefan Chulski if (port->tx_fc) {
133076055831SStefan Chulski if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
133176055831SStefan Chulski mvpp2_bm_pool_update_fc(port,
133276055831SStefan Chulski port->pool_short,
133376055831SStefan Chulski false);
133476055831SStefan Chulski else
133576055831SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_long,
133676055831SStefan Chulski false);
133776055831SStefan Chulski }
133876055831SStefan Chulski
1339db9d7d36SMaxime Chevallier /* Remove port from old short & long pool */
1340db9d7d36SMaxime Chevallier port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1341db9d7d36SMaxime Chevallier port->pool_long->pkt_size);
1342db9d7d36SMaxime Chevallier port->pool_long->port_map &= ~BIT(port->id);
1343db9d7d36SMaxime Chevallier port->pool_long = NULL;
1344db9d7d36SMaxime Chevallier
1345db9d7d36SMaxime Chevallier port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1346db9d7d36SMaxime Chevallier port->pool_short->pkt_size);
1347db9d7d36SMaxime Chevallier port->pool_short->port_map &= ~BIT(port->id);
1348db9d7d36SMaxime Chevallier port->pool_short = NULL;
1349db9d7d36SMaxime Chevallier
1350db9d7d36SMaxime Chevallier port->pkt_size = pkt_size;
1351db9d7d36SMaxime Chevallier
1352db9d7d36SMaxime Chevallier /* Add port to new short & long pool */
1353db9d7d36SMaxime Chevallier mvpp2_swf_bm_pool_init(port);
1354db9d7d36SMaxime Chevallier
1355d66503c4SMatteo Croce mvpp2_set_hw_csum(port, new_long_pool);
135676055831SStefan Chulski
135776055831SStefan Chulski if (port->tx_fc) {
135876055831SStefan Chulski if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
135976055831SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_long,
136076055831SStefan Chulski true);
136176055831SStefan Chulski else
136276055831SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_short,
136376055831SStefan Chulski true);
136476055831SStefan Chulski }
136576055831SStefan Chulski
136676055831SStefan Chulski /* Update L4 checksum when jumbo enable/disable on port */
136776055831SStefan Chulski if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
136876055831SStefan Chulski dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
136976055831SStefan Chulski dev->hw_features &= ~(NETIF_F_IP_CSUM |
137076055831SStefan Chulski NETIF_F_IPV6_CSUM);
137176055831SStefan Chulski } else {
137276055831SStefan Chulski dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
137376055831SStefan Chulski dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
137476055831SStefan Chulski }
1375db9d7d36SMaxime Chevallier }
1376db9d7d36SMaxime Chevallier
13777d04b0b1SMatteo Croce out_set:
1378db9d7d36SMaxime Chevallier dev->mtu = mtu;
1379db9d7d36SMaxime Chevallier dev->wanted_features = dev->features;
1380db9d7d36SMaxime Chevallier
1381db9d7d36SMaxime Chevallier netdev_update_features(dev);
1382db9d7d36SMaxime Chevallier return 0;
1383db9d7d36SMaxime Chevallier }
1384db9d7d36SMaxime Chevallier
mvpp2_interrupts_enable(struct mvpp2_port * port)1385db9d7d36SMaxime Chevallier static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1386db9d7d36SMaxime Chevallier {
1387db9d7d36SMaxime Chevallier int i, sw_thread_mask = 0;
1388db9d7d36SMaxime Chevallier
1389db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++)
1390db9d7d36SMaxime Chevallier sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1391db9d7d36SMaxime Chevallier
1392db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1393db9d7d36SMaxime Chevallier MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1394db9d7d36SMaxime Chevallier }
1395db9d7d36SMaxime Chevallier
mvpp2_interrupts_disable(struct mvpp2_port * port)1396db9d7d36SMaxime Chevallier static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1397db9d7d36SMaxime Chevallier {
1398db9d7d36SMaxime Chevallier int i, sw_thread_mask = 0;
1399db9d7d36SMaxime Chevallier
1400db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++)
1401db9d7d36SMaxime Chevallier sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1402db9d7d36SMaxime Chevallier
1403db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1404db9d7d36SMaxime Chevallier MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1405db9d7d36SMaxime Chevallier }
1406db9d7d36SMaxime Chevallier
mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector * qvec)1407db9d7d36SMaxime Chevallier static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1408db9d7d36SMaxime Chevallier {
1409db9d7d36SMaxime Chevallier struct mvpp2_port *port = qvec->port;
1410db9d7d36SMaxime Chevallier
1411db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1412db9d7d36SMaxime Chevallier MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1413db9d7d36SMaxime Chevallier }
1414db9d7d36SMaxime Chevallier
mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector * qvec)1415db9d7d36SMaxime Chevallier static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1416db9d7d36SMaxime Chevallier {
1417db9d7d36SMaxime Chevallier struct mvpp2_port *port = qvec->port;
1418db9d7d36SMaxime Chevallier
1419db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1420db9d7d36SMaxime Chevallier MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1421db9d7d36SMaxime Chevallier }
1422db9d7d36SMaxime Chevallier
1423543ec376SAntoine Tenart /* Mask the current thread's Rx/Tx interrupts
1424db9d7d36SMaxime Chevallier * Called by on_each_cpu(), guaranteed to run with migration disabled,
1425db9d7d36SMaxime Chevallier * using smp_processor_id() is OK.
1426db9d7d36SMaxime Chevallier */
mvpp2_interrupts_mask(void * arg)1427db9d7d36SMaxime Chevallier static void mvpp2_interrupts_mask(void *arg)
1428db9d7d36SMaxime Chevallier {
1429db9d7d36SMaxime Chevallier struct mvpp2_port *port = arg;
1430bf270fa3SStefan Chulski int cpu = smp_processor_id();
1431bf270fa3SStefan Chulski u32 thread;
1432db9d7d36SMaxime Chevallier
1433e531f767SAntoine Tenart /* If the thread isn't used, don't do anything */
1434bf270fa3SStefan Chulski if (cpu > port->priv->nthreads)
1435e531f767SAntoine Tenart return;
1436e531f767SAntoine Tenart
1437bf270fa3SStefan Chulski thread = mvpp2_cpu_to_thread(port->priv, cpu);
1438bf270fa3SStefan Chulski
1439bf270fa3SStefan Chulski mvpp2_thread_write(port->priv, thread,
1440db9d7d36SMaxime Chevallier MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1441bf270fa3SStefan Chulski mvpp2_thread_write(port->priv, thread,
1442bf270fa3SStefan Chulski MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
1443db9d7d36SMaxime Chevallier }
1444db9d7d36SMaxime Chevallier
1445543ec376SAntoine Tenart /* Unmask the current thread's Rx/Tx interrupts.
1446db9d7d36SMaxime Chevallier * Called by on_each_cpu(), guaranteed to run with migration disabled,
1447db9d7d36SMaxime Chevallier * using smp_processor_id() is OK.
1448db9d7d36SMaxime Chevallier */
mvpp2_interrupts_unmask(void * arg)1449db9d7d36SMaxime Chevallier static void mvpp2_interrupts_unmask(void *arg)
1450db9d7d36SMaxime Chevallier {
1451db9d7d36SMaxime Chevallier struct mvpp2_port *port = arg;
1452bf270fa3SStefan Chulski int cpu = smp_processor_id();
1453bf270fa3SStefan Chulski u32 val, thread;
1454db9d7d36SMaxime Chevallier
1455e531f767SAntoine Tenart /* If the thread isn't used, don't do anything */
14567867299cSStefan Chulski if (cpu >= port->priv->nthreads)
1457e531f767SAntoine Tenart return;
1458e531f767SAntoine Tenart
1459bf270fa3SStefan Chulski thread = mvpp2_cpu_to_thread(port->priv, cpu);
1460bf270fa3SStefan Chulski
1461db9d7d36SMaxime Chevallier val = MVPP2_CAUSE_MISC_SUM_MASK |
146270afb58eSAntoine Tenart MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1463db9d7d36SMaxime Chevallier if (port->has_tx_irqs)
1464db9d7d36SMaxime Chevallier val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1465db9d7d36SMaxime Chevallier
1466bf270fa3SStefan Chulski mvpp2_thread_write(port->priv, thread,
1467db9d7d36SMaxime Chevallier MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1468bf270fa3SStefan Chulski mvpp2_thread_write(port->priv, thread,
1469bf270fa3SStefan Chulski MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1470bf270fa3SStefan Chulski MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1471db9d7d36SMaxime Chevallier }
1472db9d7d36SMaxime Chevallier
1473db9d7d36SMaxime Chevallier static void
mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port * port,bool mask)1474db9d7d36SMaxime Chevallier mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1475db9d7d36SMaxime Chevallier {
1476db9d7d36SMaxime Chevallier u32 val;
1477db9d7d36SMaxime Chevallier int i;
1478db9d7d36SMaxime Chevallier
147960dcd6b7SStefan Chulski if (port->priv->hw_version == MVPP21)
1480db9d7d36SMaxime Chevallier return;
1481db9d7d36SMaxime Chevallier
1482db9d7d36SMaxime Chevallier if (mask)
1483db9d7d36SMaxime Chevallier val = 0;
1484db9d7d36SMaxime Chevallier else
148570afb58eSAntoine Tenart val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1486db9d7d36SMaxime Chevallier
1487db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++) {
1488db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *v = port->qvecs + i;
1489db9d7d36SMaxime Chevallier
1490db9d7d36SMaxime Chevallier if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1491db9d7d36SMaxime Chevallier continue;
1492db9d7d36SMaxime Chevallier
14931068549cSAntoine Tenart mvpp2_thread_write(port->priv, v->sw_thread_id,
1494db9d7d36SMaxime Chevallier MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1495bf270fa3SStefan Chulski mvpp2_thread_write(port->priv, v->sw_thread_id,
1496bf270fa3SStefan Chulski MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1497bf270fa3SStefan Chulski MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1498db9d7d36SMaxime Chevallier }
1499db9d7d36SMaxime Chevallier }
1500db9d7d36SMaxime Chevallier
1501a9a33202SRussell King /* Only GOP port 0 has an XLG MAC */
mvpp2_port_supports_xlg(struct mvpp2_port * port)1502a9a33202SRussell King static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1503a9a33202SRussell King {
1504a9a33202SRussell King return port->gop_id == 0;
1505a9a33202SRussell King }
1506a9a33202SRussell King
mvpp2_port_supports_rgmii(struct mvpp2_port * port)1507a9a33202SRussell King static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1508a9a33202SRussell King {
1509f704177eSStefan Chulski return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
1510a9a33202SRussell King }
1511a9a33202SRussell King
1512db9d7d36SMaxime Chevallier /* Port configuration routines */
mvpp2_is_xlg(phy_interface_t interface)1513b7d286f0SRussell King static bool mvpp2_is_xlg(phy_interface_t interface)
1514b7d286f0SRussell King {
1515e0f909bcSRussell King return interface == PHY_INTERFACE_MODE_10GBASER ||
15164043ec70SMarek Behún interface == PHY_INTERFACE_MODE_5GBASER ||
1517b7d286f0SRussell King interface == PHY_INTERFACE_MODE_XAUI;
1518b7d286f0SRussell King }
1519db9d7d36SMaxime Chevallier
mvpp2_modify(void __iomem * ptr,u32 mask,u32 set)1520bd45f644SRussell King static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1521bd45f644SRussell King {
1522bd45f644SRussell King u32 old, val;
1523bd45f644SRussell King
1524bd45f644SRussell King old = val = readl(ptr);
1525bd45f644SRussell King val &= ~mask;
1526bd45f644SRussell King val |= set;
1527bd45f644SRussell King if (old != val)
1528bd45f644SRussell King writel(val, ptr);
1529bd45f644SRussell King }
1530bd45f644SRussell King
mvpp22_gop_init_rgmii(struct mvpp2_port * port)1531db9d7d36SMaxime Chevallier static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1532db9d7d36SMaxime Chevallier {
1533db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
1534db9d7d36SMaxime Chevallier u32 val;
1535db9d7d36SMaxime Chevallier
1536db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1537db9d7d36SMaxime Chevallier val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1538db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1539db9d7d36SMaxime Chevallier
1540db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1541db9d7d36SMaxime Chevallier if (port->gop_id == 2)
1542935a1184SStefan Chulski val |= GENCONF_CTRL0_PORT2_RGMII;
1543db9d7d36SMaxime Chevallier else if (port->gop_id == 3)
1544935a1184SStefan Chulski val |= GENCONF_CTRL0_PORT3_RGMII_MII;
1545db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1546db9d7d36SMaxime Chevallier }
1547db9d7d36SMaxime Chevallier
mvpp22_gop_init_sgmii(struct mvpp2_port * port)1548db9d7d36SMaxime Chevallier static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1549db9d7d36SMaxime Chevallier {
1550db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
1551db9d7d36SMaxime Chevallier u32 val;
1552db9d7d36SMaxime Chevallier
1553db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1554db9d7d36SMaxime Chevallier val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1555db9d7d36SMaxime Chevallier GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1556db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1557db9d7d36SMaxime Chevallier
1558db9d7d36SMaxime Chevallier if (port->gop_id > 1) {
1559db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1560db9d7d36SMaxime Chevallier if (port->gop_id == 2)
1561935a1184SStefan Chulski val &= ~GENCONF_CTRL0_PORT2_RGMII;
1562db9d7d36SMaxime Chevallier else if (port->gop_id == 3)
1563935a1184SStefan Chulski val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
1564db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1565db9d7d36SMaxime Chevallier }
1566db9d7d36SMaxime Chevallier }
1567db9d7d36SMaxime Chevallier
mvpp22_gop_init_10gkr(struct mvpp2_port * port)1568db9d7d36SMaxime Chevallier static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1569db9d7d36SMaxime Chevallier {
1570db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
1571db9d7d36SMaxime Chevallier void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1572db9d7d36SMaxime Chevallier void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1573db9d7d36SMaxime Chevallier u32 val;
1574db9d7d36SMaxime Chevallier
1575db9d7d36SMaxime Chevallier val = readl(xpcs + MVPP22_XPCS_CFG0);
1576db9d7d36SMaxime Chevallier val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1577db9d7d36SMaxime Chevallier MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1578db9d7d36SMaxime Chevallier val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1579db9d7d36SMaxime Chevallier writel(val, xpcs + MVPP22_XPCS_CFG0);
1580db9d7d36SMaxime Chevallier
1581db9d7d36SMaxime Chevallier val = readl(mpcs + MVPP22_MPCS_CTRL);
1582db9d7d36SMaxime Chevallier val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1583db9d7d36SMaxime Chevallier writel(val, mpcs + MVPP22_MPCS_CTRL);
1584db9d7d36SMaxime Chevallier
1585db9d7d36SMaxime Chevallier val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
15867409e66eSAntoine Tenart val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1587db9d7d36SMaxime Chevallier val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1588db9d7d36SMaxime Chevallier writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1589db9d7d36SMaxime Chevallier }
1590db9d7d36SMaxime Chevallier
mvpp22_gop_fca_enable_periodic(struct mvpp2_port * port,bool en)15912788d841SStefan Chulski static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
15922788d841SStefan Chulski {
15932788d841SStefan Chulski struct mvpp2 *priv = port->priv;
15942788d841SStefan Chulski void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
15952788d841SStefan Chulski u32 val;
15962788d841SStefan Chulski
15972788d841SStefan Chulski val = readl(fca + MVPP22_FCA_CONTROL_REG);
15982788d841SStefan Chulski val &= ~MVPP22_FCA_ENABLE_PERIODIC;
15992788d841SStefan Chulski if (en)
16002788d841SStefan Chulski val |= MVPP22_FCA_ENABLE_PERIODIC;
16012788d841SStefan Chulski writel(val, fca + MVPP22_FCA_CONTROL_REG);
16022788d841SStefan Chulski }
16032788d841SStefan Chulski
mvpp22_gop_fca_set_timer(struct mvpp2_port * port,u32 timer)16042788d841SStefan Chulski static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
16052788d841SStefan Chulski {
16062788d841SStefan Chulski struct mvpp2 *priv = port->priv;
16072788d841SStefan Chulski void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
16082788d841SStefan Chulski u32 lsb, msb;
16092788d841SStefan Chulski
16102788d841SStefan Chulski lsb = timer & MVPP22_FCA_REG_MASK;
16112788d841SStefan Chulski msb = timer >> MVPP22_FCA_REG_SIZE;
16122788d841SStefan Chulski
16132788d841SStefan Chulski writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
16142788d841SStefan Chulski writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
16152788d841SStefan Chulski }
16162788d841SStefan Chulski
16172788d841SStefan Chulski /* Set Flow Control timer x100 faster than pause quanta to ensure that link
16182788d841SStefan Chulski * partner won't send traffic if port is in XOFF mode.
16192788d841SStefan Chulski */
mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port * port)16202788d841SStefan Chulski static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
16212788d841SStefan Chulski {
16222788d841SStefan Chulski u32 timer;
16232788d841SStefan Chulski
16242788d841SStefan Chulski timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
16252788d841SStefan Chulski * FC_QUANTA;
16262788d841SStefan Chulski
16272788d841SStefan Chulski mvpp22_gop_fca_enable_periodic(port, false);
16282788d841SStefan Chulski
16292788d841SStefan Chulski mvpp22_gop_fca_set_timer(port, timer);
16302788d841SStefan Chulski
16312788d841SStefan Chulski mvpp22_gop_fca_enable_periodic(port, true);
16322788d841SStefan Chulski }
16332788d841SStefan Chulski
mvpp22_gop_init(struct mvpp2_port * port,phy_interface_t interface)1634bb7bbb6eSMarek Behún static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
1635db9d7d36SMaxime Chevallier {
1636db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
1637db9d7d36SMaxime Chevallier u32 val;
1638db9d7d36SMaxime Chevallier
1639db9d7d36SMaxime Chevallier if (!priv->sysctrl_base)
1640db9d7d36SMaxime Chevallier return 0;
1641db9d7d36SMaxime Chevallier
1642bb7bbb6eSMarek Behún switch (interface) {
1643db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_RGMII:
1644db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_RGMII_ID:
1645db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_RGMII_RXID:
1646db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_RGMII_TXID:
1647a9a33202SRussell King if (!mvpp2_port_supports_rgmii(port))
1648db9d7d36SMaxime Chevallier goto invalid_conf;
1649db9d7d36SMaxime Chevallier mvpp22_gop_init_rgmii(port);
1650db9d7d36SMaxime Chevallier break;
1651db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_SGMII:
1652db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_1000BASEX:
1653db9d7d36SMaxime Chevallier case PHY_INTERFACE_MODE_2500BASEX:
1654db9d7d36SMaxime Chevallier mvpp22_gop_init_sgmii(port);
1655db9d7d36SMaxime Chevallier break;
16564043ec70SMarek Behún case PHY_INTERFACE_MODE_5GBASER:
1657e0f909bcSRussell King case PHY_INTERFACE_MODE_10GBASER:
1658a9a33202SRussell King if (!mvpp2_port_supports_xlg(port))
1659db9d7d36SMaxime Chevallier goto invalid_conf;
1660db9d7d36SMaxime Chevallier mvpp22_gop_init_10gkr(port);
1661db9d7d36SMaxime Chevallier break;
1662db9d7d36SMaxime Chevallier default:
1663db9d7d36SMaxime Chevallier goto unsupported_conf;
1664db9d7d36SMaxime Chevallier }
1665db9d7d36SMaxime Chevallier
1666db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1667db9d7d36SMaxime Chevallier val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1668db9d7d36SMaxime Chevallier GENCONF_PORT_CTRL1_EN(port->gop_id);
1669db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1670db9d7d36SMaxime Chevallier
1671db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1672db9d7d36SMaxime Chevallier val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1673db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1674db9d7d36SMaxime Chevallier
1675db9d7d36SMaxime Chevallier regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1676db9d7d36SMaxime Chevallier val |= GENCONF_SOFT_RESET1_GOP;
1677db9d7d36SMaxime Chevallier regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1678db9d7d36SMaxime Chevallier
16792788d841SStefan Chulski mvpp22_gop_fca_set_periodic_timer(port);
16802788d841SStefan Chulski
1681db9d7d36SMaxime Chevallier unsupported_conf:
1682db9d7d36SMaxime Chevallier return 0;
1683db9d7d36SMaxime Chevallier
1684db9d7d36SMaxime Chevallier invalid_conf:
1685db9d7d36SMaxime Chevallier netdev_err(port->dev, "Invalid port configuration\n");
1686db9d7d36SMaxime Chevallier return -EINVAL;
1687db9d7d36SMaxime Chevallier }
1688db9d7d36SMaxime Chevallier
mvpp22_gop_unmask_irq(struct mvpp2_port * port)1689db9d7d36SMaxime Chevallier static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1690db9d7d36SMaxime Chevallier {
1691db9d7d36SMaxime Chevallier u32 val;
1692db9d7d36SMaxime Chevallier
1693db9d7d36SMaxime Chevallier if (phy_interface_mode_is_rgmii(port->phy_interface) ||
16944a4cec72SRussell King phy_interface_mode_is_8023z(port->phy_interface) ||
16954a4cec72SRussell King port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1696db9d7d36SMaxime Chevallier /* Enable the GMAC link status irq for this port */
1697db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1698db9d7d36SMaxime Chevallier val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1699db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1700db9d7d36SMaxime Chevallier }
1701db9d7d36SMaxime Chevallier
1702a9a33202SRussell King if (mvpp2_port_supports_xlg(port)) {
1703db9d7d36SMaxime Chevallier /* Enable the XLG/GIG irqs for this port */
1704db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
17051d9b041eSRussell King if (mvpp2_is_xlg(port->phy_interface))
1706db9d7d36SMaxime Chevallier val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1707db9d7d36SMaxime Chevallier else
1708db9d7d36SMaxime Chevallier val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1709db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1710db9d7d36SMaxime Chevallier }
1711db9d7d36SMaxime Chevallier }
1712db9d7d36SMaxime Chevallier
mvpp22_gop_mask_irq(struct mvpp2_port * port)1713db9d7d36SMaxime Chevallier static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1714db9d7d36SMaxime Chevallier {
1715db9d7d36SMaxime Chevallier u32 val;
1716db9d7d36SMaxime Chevallier
1717a9a33202SRussell King if (mvpp2_port_supports_xlg(port)) {
1718db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1719db9d7d36SMaxime Chevallier val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1720db9d7d36SMaxime Chevallier MVPP22_XLG_EXT_INT_MASK_GIG);
1721db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1722db9d7d36SMaxime Chevallier }
1723db9d7d36SMaxime Chevallier
1724db9d7d36SMaxime Chevallier if (phy_interface_mode_is_rgmii(port->phy_interface) ||
17254a4cec72SRussell King phy_interface_mode_is_8023z(port->phy_interface) ||
17264a4cec72SRussell King port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1727db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1728db9d7d36SMaxime Chevallier val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1729db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1730db9d7d36SMaxime Chevallier }
1731db9d7d36SMaxime Chevallier }
1732db9d7d36SMaxime Chevallier
mvpp22_gop_setup_irq(struct mvpp2_port * port)1733db9d7d36SMaxime Chevallier static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1734db9d7d36SMaxime Chevallier {
1735db9d7d36SMaxime Chevallier u32 val;
1736db9d7d36SMaxime Chevallier
1737f5015a59SRussell King mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1738f5015a59SRussell King MVPP22_GMAC_INT_SUM_MASK_PTP,
1739f5015a59SRussell King MVPP22_GMAC_INT_SUM_MASK_PTP);
1740f5015a59SRussell King
1741bf2fa125SRussell King if (port->phylink ||
1742bf2fa125SRussell King phy_interface_mode_is_rgmii(port->phy_interface) ||
17434a4cec72SRussell King phy_interface_mode_is_8023z(port->phy_interface) ||
17444a4cec72SRussell King port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1745db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_GMAC_INT_MASK);
1746db9d7d36SMaxime Chevallier val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1747db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_GMAC_INT_MASK);
1748db9d7d36SMaxime Chevallier }
1749db9d7d36SMaxime Chevallier
1750a9a33202SRussell King if (mvpp2_port_supports_xlg(port)) {
1751db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_XLG_INT_MASK);
1752db9d7d36SMaxime Chevallier val |= MVPP22_XLG_INT_MASK_LINK;
1753db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_XLG_INT_MASK);
1754f5015a59SRussell King
1755f5015a59SRussell King mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1756f5015a59SRussell King MVPP22_XLG_EXT_INT_MASK_PTP,
1757f5015a59SRussell King MVPP22_XLG_EXT_INT_MASK_PTP);
1758db9d7d36SMaxime Chevallier }
1759db9d7d36SMaxime Chevallier
1760db9d7d36SMaxime Chevallier mvpp22_gop_unmask_irq(port);
1761db9d7d36SMaxime Chevallier }
1762db9d7d36SMaxime Chevallier
1763db9d7d36SMaxime Chevallier /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1764db9d7d36SMaxime Chevallier *
1765db9d7d36SMaxime Chevallier * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1766db9d7d36SMaxime Chevallier * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1767db9d7d36SMaxime Chevallier * differ.
1768db9d7d36SMaxime Chevallier *
1769db9d7d36SMaxime Chevallier * The COMPHY configures the serdes lanes regardless of the actual use of the
1770db9d7d36SMaxime Chevallier * lanes by the physical layer. This is why configurations like
1771db9d7d36SMaxime Chevallier * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1772db9d7d36SMaxime Chevallier */
mvpp22_comphy_init(struct mvpp2_port * port,phy_interface_t interface)1773bb7bbb6eSMarek Behún static int mvpp22_comphy_init(struct mvpp2_port *port,
1774bb7bbb6eSMarek Behún phy_interface_t interface)
1775db9d7d36SMaxime Chevallier {
1776db9d7d36SMaxime Chevallier int ret;
1777db9d7d36SMaxime Chevallier
1778db9d7d36SMaxime Chevallier if (!port->comphy)
1779db9d7d36SMaxime Chevallier return 0;
1780db9d7d36SMaxime Chevallier
1781bb7bbb6eSMarek Behún ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
1782db9d7d36SMaxime Chevallier if (ret)
1783db9d7d36SMaxime Chevallier return ret;
1784db9d7d36SMaxime Chevallier
1785db9d7d36SMaxime Chevallier return phy_power_on(port->comphy);
1786db9d7d36SMaxime Chevallier }
1787db9d7d36SMaxime Chevallier
mvpp2_port_enable(struct mvpp2_port * port)1788db9d7d36SMaxime Chevallier static void mvpp2_port_enable(struct mvpp2_port *port)
1789db9d7d36SMaxime Chevallier {
1790db9d7d36SMaxime Chevallier u32 val;
1791db9d7d36SMaxime Chevallier
1792a9a33202SRussell King if (mvpp2_port_supports_xlg(port) &&
1793a9a33202SRussell King mvpp2_is_xlg(port->phy_interface)) {
1794db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1795649e51d5SAntoine Tenart val |= MVPP22_XLG_CTRL0_PORT_EN;
1796db9d7d36SMaxime Chevallier val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1797db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1798db9d7d36SMaxime Chevallier } else {
1799db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1800db9d7d36SMaxime Chevallier val |= MVPP2_GMAC_PORT_EN_MASK;
1801db9d7d36SMaxime Chevallier val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1802db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1803db9d7d36SMaxime Chevallier }
1804db9d7d36SMaxime Chevallier }
1805db9d7d36SMaxime Chevallier
mvpp2_port_disable(struct mvpp2_port * port)1806db9d7d36SMaxime Chevallier static void mvpp2_port_disable(struct mvpp2_port *port)
1807db9d7d36SMaxime Chevallier {
1808db9d7d36SMaxime Chevallier u32 val;
1809db9d7d36SMaxime Chevallier
1810a9a33202SRussell King if (mvpp2_port_supports_xlg(port) &&
1811a9a33202SRussell King mvpp2_is_xlg(port->phy_interface)) {
1812db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1813db9d7d36SMaxime Chevallier val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1814db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_XLG_CTRL0_REG);
18156b10bfc5SAntoine Tenart }
18166b10bfc5SAntoine Tenart
1817db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1818db9d7d36SMaxime Chevallier val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1819db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1820db9d7d36SMaxime Chevallier }
1821db9d7d36SMaxime Chevallier
1822db9d7d36SMaxime Chevallier /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
mvpp2_port_periodic_xon_disable(struct mvpp2_port * port)1823db9d7d36SMaxime Chevallier static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1824db9d7d36SMaxime Chevallier {
1825db9d7d36SMaxime Chevallier u32 val;
1826db9d7d36SMaxime Chevallier
1827db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1828db9d7d36SMaxime Chevallier ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1829db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1830db9d7d36SMaxime Chevallier }
1831db9d7d36SMaxime Chevallier
1832db9d7d36SMaxime Chevallier /* Configure loopback port */
mvpp2_port_loopback_set(struct mvpp2_port * port,const struct phylink_link_state * state)1833db9d7d36SMaxime Chevallier static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1834db9d7d36SMaxime Chevallier const struct phylink_link_state *state)
1835db9d7d36SMaxime Chevallier {
1836db9d7d36SMaxime Chevallier u32 val;
1837db9d7d36SMaxime Chevallier
1838db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1839db9d7d36SMaxime Chevallier
1840db9d7d36SMaxime Chevallier if (state->speed == 1000)
1841db9d7d36SMaxime Chevallier val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1842db9d7d36SMaxime Chevallier else
1843db9d7d36SMaxime Chevallier val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1844db9d7d36SMaxime Chevallier
184594bfe438SRussell King if (phy_interface_mode_is_8023z(state->interface) ||
184694bfe438SRussell King state->interface == PHY_INTERFACE_MODE_SGMII)
1847db9d7d36SMaxime Chevallier val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1848db9d7d36SMaxime Chevallier else
1849db9d7d36SMaxime Chevallier val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1850db9d7d36SMaxime Chevallier
1851db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1852db9d7d36SMaxime Chevallier }
1853db9d7d36SMaxime Chevallier
185439b96315SSven Auhagen enum {
185539b96315SSven Auhagen ETHTOOL_XDP_REDIRECT,
185639b96315SSven Auhagen ETHTOOL_XDP_PASS,
185739b96315SSven Auhagen ETHTOOL_XDP_DROP,
185839b96315SSven Auhagen ETHTOOL_XDP_TX,
185939b96315SSven Auhagen ETHTOOL_XDP_TX_ERR,
186039b96315SSven Auhagen ETHTOOL_XDP_XMIT,
186139b96315SSven Auhagen ETHTOOL_XDP_XMIT_ERR,
186239b96315SSven Auhagen };
186339b96315SSven Auhagen
1864db9d7d36SMaxime Chevallier struct mvpp2_ethtool_counter {
1865db9d7d36SMaxime Chevallier unsigned int offset;
1866db9d7d36SMaxime Chevallier const char string[ETH_GSTRING_LEN];
1867db9d7d36SMaxime Chevallier bool reg_is_64b;
1868db9d7d36SMaxime Chevallier };
1869db9d7d36SMaxime Chevallier
mvpp2_read_count(struct mvpp2_port * port,const struct mvpp2_ethtool_counter * counter)1870db9d7d36SMaxime Chevallier static u64 mvpp2_read_count(struct mvpp2_port *port,
1871db9d7d36SMaxime Chevallier const struct mvpp2_ethtool_counter *counter)
1872db9d7d36SMaxime Chevallier {
1873db9d7d36SMaxime Chevallier u64 val;
1874db9d7d36SMaxime Chevallier
1875db9d7d36SMaxime Chevallier val = readl(port->stats_base + counter->offset);
1876db9d7d36SMaxime Chevallier if (counter->reg_is_64b)
1877db9d7d36SMaxime Chevallier val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1878db9d7d36SMaxime Chevallier
1879db9d7d36SMaxime Chevallier return val;
1880db9d7d36SMaxime Chevallier }
1881db9d7d36SMaxime Chevallier
18829bea6897SMaxime Chevallier /* Some counters are accessed indirectly by first writing an index to
18839bea6897SMaxime Chevallier * MVPP2_CTRS_IDX. The index can represent various resources depending on the
18849bea6897SMaxime Chevallier * register we access, it can be a hit counter for some classification tables,
18859bea6897SMaxime Chevallier * a counter specific to a rxq, a txq or a buffer pool.
18869bea6897SMaxime Chevallier */
mvpp2_read_index(struct mvpp2 * priv,u32 index,u32 reg)18879bea6897SMaxime Chevallier static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
18889bea6897SMaxime Chevallier {
18899bea6897SMaxime Chevallier mvpp2_write(priv, MVPP2_CTRS_IDX, index);
18909bea6897SMaxime Chevallier return mvpp2_read(priv, reg);
18919bea6897SMaxime Chevallier }
18929bea6897SMaxime Chevallier
1893db9d7d36SMaxime Chevallier /* Due to the fact that software statistics and hardware statistics are, by
1894db9d7d36SMaxime Chevallier * design, incremented at different moments in the chain of packet processing,
1895db9d7d36SMaxime Chevallier * it is very likely that incoming packets could have been dropped after being
1896db9d7d36SMaxime Chevallier * counted by hardware but before reaching software statistics (most probably
1897e34be16bSJulia Lawall * multicast packets), and in the opposite way, during transmission, FCS bytes
1898db9d7d36SMaxime Chevallier * are added in between as well as TSO skb will be split and header bytes added.
1899db9d7d36SMaxime Chevallier * Hence, statistics gathered from userspace with ifconfig (software) and
1900db9d7d36SMaxime Chevallier * ethtool (hardware) cannot be compared.
1901db9d7d36SMaxime Chevallier */
1902f9fa96b9SMaxime Chevallier static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1903db9d7d36SMaxime Chevallier { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1904db9d7d36SMaxime Chevallier { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1905db9d7d36SMaxime Chevallier { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1906db9d7d36SMaxime Chevallier { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1907db9d7d36SMaxime Chevallier { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1908db9d7d36SMaxime Chevallier { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1909db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1910db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1911db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1912db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1913db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1914db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1915db9d7d36SMaxime Chevallier { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1916db9d7d36SMaxime Chevallier { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1917db9d7d36SMaxime Chevallier { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1918db9d7d36SMaxime Chevallier { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1919db9d7d36SMaxime Chevallier { MVPP2_MIB_FC_SENT, "fc_sent" },
1920db9d7d36SMaxime Chevallier { MVPP2_MIB_FC_RCVD, "fc_received" },
1921db9d7d36SMaxime Chevallier { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1922db9d7d36SMaxime Chevallier { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1923db9d7d36SMaxime Chevallier { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1924db9d7d36SMaxime Chevallier { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1925db9d7d36SMaxime Chevallier { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1926db9d7d36SMaxime Chevallier { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1927db9d7d36SMaxime Chevallier { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1928db9d7d36SMaxime Chevallier { MVPP2_MIB_COLLISION, "collision" },
1929db9d7d36SMaxime Chevallier { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1930db9d7d36SMaxime Chevallier };
1931db9d7d36SMaxime Chevallier
19329bea6897SMaxime Chevallier static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
19339bea6897SMaxime Chevallier { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
19349bea6897SMaxime Chevallier { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
19359bea6897SMaxime Chevallier };
19369bea6897SMaxime Chevallier
19379bea6897SMaxime Chevallier static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
19389bea6897SMaxime Chevallier { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
19399bea6897SMaxime Chevallier { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
19409bea6897SMaxime Chevallier { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
19419bea6897SMaxime Chevallier { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
19429bea6897SMaxime Chevallier { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
19439bea6897SMaxime Chevallier { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
19449bea6897SMaxime Chevallier { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
19459bea6897SMaxime Chevallier { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
19469bea6897SMaxime Chevallier { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
19479bea6897SMaxime Chevallier };
19489bea6897SMaxime Chevallier
19499bea6897SMaxime Chevallier static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
19509bea6897SMaxime Chevallier { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
19519bea6897SMaxime Chevallier { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
19529bea6897SMaxime Chevallier { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
19539bea6897SMaxime Chevallier { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
19549bea6897SMaxime Chevallier };
19559bea6897SMaxime Chevallier
195639b96315SSven Auhagen static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
195739b96315SSven Auhagen { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
195839b96315SSven Auhagen { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
195939b96315SSven Auhagen { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
196039b96315SSven Auhagen { ETHTOOL_XDP_TX, "rx_xdp_tx", },
196139b96315SSven Auhagen { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
196239b96315SSven Auhagen { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
196339b96315SSven Auhagen { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
196439b96315SSven Auhagen };
196539b96315SSven Auhagen
19669bea6897SMaxime Chevallier #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
19679bea6897SMaxime Chevallier ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
19689bea6897SMaxime Chevallier (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
196939b96315SSven Auhagen (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
197039b96315SSven Auhagen ARRAY_SIZE(mvpp2_ethtool_xdp))
19719bea6897SMaxime Chevallier
mvpp2_ethtool_get_strings(struct net_device * netdev,u32 sset,u8 * data)1972db9d7d36SMaxime Chevallier static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1973db9d7d36SMaxime Chevallier u8 *data)
1974db9d7d36SMaxime Chevallier {
19759bea6897SMaxime Chevallier struct mvpp2_port *port = netdev_priv(netdev);
19769bea6897SMaxime Chevallier int i, q;
19779bea6897SMaxime Chevallier
19789bea6897SMaxime Chevallier if (sset != ETH_SS_STATS)
19799bea6897SMaxime Chevallier return;
19809bea6897SMaxime Chevallier
19819bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
19829bea6897SMaxime Chevallier strscpy(data, mvpp2_ethtool_mib_regs[i].string,
19839bea6897SMaxime Chevallier ETH_GSTRING_LEN);
19849bea6897SMaxime Chevallier data += ETH_GSTRING_LEN;
19859bea6897SMaxime Chevallier }
19869bea6897SMaxime Chevallier
19879bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
19889bea6897SMaxime Chevallier strscpy(data, mvpp2_ethtool_port_regs[i].string,
19899bea6897SMaxime Chevallier ETH_GSTRING_LEN);
19909bea6897SMaxime Chevallier data += ETH_GSTRING_LEN;
19919bea6897SMaxime Chevallier }
19929bea6897SMaxime Chevallier
19939bea6897SMaxime Chevallier for (q = 0; q < port->ntxqs; q++) {
19949bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
19959bea6897SMaxime Chevallier snprintf(data, ETH_GSTRING_LEN,
19969bea6897SMaxime Chevallier mvpp2_ethtool_txq_regs[i].string, q);
19979bea6897SMaxime Chevallier data += ETH_GSTRING_LEN;
19989bea6897SMaxime Chevallier }
19999bea6897SMaxime Chevallier }
20009bea6897SMaxime Chevallier
20019bea6897SMaxime Chevallier for (q = 0; q < port->nrxqs; q++) {
20029bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
20039bea6897SMaxime Chevallier snprintf(data, ETH_GSTRING_LEN,
20049bea6897SMaxime Chevallier mvpp2_ethtool_rxq_regs[i].string,
20059bea6897SMaxime Chevallier q);
20069bea6897SMaxime Chevallier data += ETH_GSTRING_LEN;
20079bea6897SMaxime Chevallier }
20089bea6897SMaxime Chevallier }
200939b96315SSven Auhagen
201039b96315SSven Auhagen for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
201139b96315SSven Auhagen strscpy(data, mvpp2_ethtool_xdp[i].string,
201239b96315SSven Auhagen ETH_GSTRING_LEN);
201339b96315SSven Auhagen data += ETH_GSTRING_LEN;
201439b96315SSven Auhagen }
201539b96315SSven Auhagen }
201639b96315SSven Auhagen
201739b96315SSven Auhagen static void
mvpp2_get_xdp_stats(struct mvpp2_port * port,struct mvpp2_pcpu_stats * xdp_stats)201839b96315SSven Auhagen mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
201939b96315SSven Auhagen {
202039b96315SSven Auhagen unsigned int start;
202139b96315SSven Auhagen unsigned int cpu;
202239b96315SSven Auhagen
202339b96315SSven Auhagen /* Gather XDP Statistics */
202439b96315SSven Auhagen for_each_possible_cpu(cpu) {
202539b96315SSven Auhagen struct mvpp2_pcpu_stats *cpu_stats;
202639b96315SSven Auhagen u64 xdp_redirect;
202739b96315SSven Auhagen u64 xdp_pass;
202839b96315SSven Auhagen u64 xdp_drop;
202939b96315SSven Auhagen u64 xdp_xmit;
203039b96315SSven Auhagen u64 xdp_xmit_err;
203139b96315SSven Auhagen u64 xdp_tx;
203239b96315SSven Auhagen u64 xdp_tx_err;
203339b96315SSven Auhagen
203439b96315SSven Auhagen cpu_stats = per_cpu_ptr(port->stats, cpu);
203539b96315SSven Auhagen do {
2036068c38adSThomas Gleixner start = u64_stats_fetch_begin(&cpu_stats->syncp);
203739b96315SSven Auhagen xdp_redirect = cpu_stats->xdp_redirect;
203839b96315SSven Auhagen xdp_pass = cpu_stats->xdp_pass;
203939b96315SSven Auhagen xdp_drop = cpu_stats->xdp_drop;
204039b96315SSven Auhagen xdp_xmit = cpu_stats->xdp_xmit;
204139b96315SSven Auhagen xdp_xmit_err = cpu_stats->xdp_xmit_err;
204239b96315SSven Auhagen xdp_tx = cpu_stats->xdp_tx;
204339b96315SSven Auhagen xdp_tx_err = cpu_stats->xdp_tx_err;
2044068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
204539b96315SSven Auhagen
204639b96315SSven Auhagen xdp_stats->xdp_redirect += xdp_redirect;
204739b96315SSven Auhagen xdp_stats->xdp_pass += xdp_pass;
204839b96315SSven Auhagen xdp_stats->xdp_drop += xdp_drop;
204939b96315SSven Auhagen xdp_stats->xdp_xmit += xdp_xmit;
205039b96315SSven Auhagen xdp_stats->xdp_xmit_err += xdp_xmit_err;
205139b96315SSven Auhagen xdp_stats->xdp_tx += xdp_tx;
205239b96315SSven Auhagen xdp_stats->xdp_tx_err += xdp_tx_err;
205339b96315SSven Auhagen }
20549bea6897SMaxime Chevallier }
20559bea6897SMaxime Chevallier
mvpp2_read_stats(struct mvpp2_port * port)20569bea6897SMaxime Chevallier static void mvpp2_read_stats(struct mvpp2_port *port)
20579bea6897SMaxime Chevallier {
205839b96315SSven Auhagen struct mvpp2_pcpu_stats xdp_stats = {};
205939b96315SSven Auhagen const struct mvpp2_ethtool_counter *s;
20609bea6897SMaxime Chevallier u64 *pstats;
20619bea6897SMaxime Chevallier int i, q;
20629bea6897SMaxime Chevallier
20639bea6897SMaxime Chevallier pstats = port->ethtool_stats;
2064db9d7d36SMaxime Chevallier
2065f9fa96b9SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
20669bea6897SMaxime Chevallier *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
20679bea6897SMaxime Chevallier
20689bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
20699bea6897SMaxime Chevallier *pstats++ += mvpp2_read(port->priv,
20709bea6897SMaxime Chevallier mvpp2_ethtool_port_regs[i].offset +
20719bea6897SMaxime Chevallier 4 * port->id);
20729bea6897SMaxime Chevallier
20739bea6897SMaxime Chevallier for (q = 0; q < port->ntxqs; q++)
20749bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
20759bea6897SMaxime Chevallier *pstats++ += mvpp2_read_index(port->priv,
2076cc970925SSven Auhagen MVPP22_CTRS_TX_CTR(port->id, q),
20779bea6897SMaxime Chevallier mvpp2_ethtool_txq_regs[i].offset);
20789bea6897SMaxime Chevallier
20799bea6897SMaxime Chevallier /* Rxqs are numbered from 0 from the user standpoint, but not from the
20809bea6897SMaxime Chevallier * driver's. We need to add the port->first_rxq offset.
20819bea6897SMaxime Chevallier */
20829bea6897SMaxime Chevallier for (q = 0; q < port->nrxqs; q++)
20839bea6897SMaxime Chevallier for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
20849bea6897SMaxime Chevallier *pstats++ += mvpp2_read_index(port->priv,
2085cc970925SSven Auhagen port->first_rxq + q,
20869bea6897SMaxime Chevallier mvpp2_ethtool_rxq_regs[i].offset);
208739b96315SSven Auhagen
208839b96315SSven Auhagen /* Gather XDP Statistics */
208939b96315SSven Auhagen mvpp2_get_xdp_stats(port, &xdp_stats);
209039b96315SSven Auhagen
209139b96315SSven Auhagen for (i = 0, s = mvpp2_ethtool_xdp;
209239b96315SSven Auhagen s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
209339b96315SSven Auhagen s++, i++) {
209439b96315SSven Auhagen switch (s->offset) {
209539b96315SSven Auhagen case ETHTOOL_XDP_REDIRECT:
209639b96315SSven Auhagen *pstats++ = xdp_stats.xdp_redirect;
209739b96315SSven Auhagen break;
209839b96315SSven Auhagen case ETHTOOL_XDP_PASS:
209939b96315SSven Auhagen *pstats++ = xdp_stats.xdp_pass;
210039b96315SSven Auhagen break;
210139b96315SSven Auhagen case ETHTOOL_XDP_DROP:
210239b96315SSven Auhagen *pstats++ = xdp_stats.xdp_drop;
210339b96315SSven Auhagen break;
210439b96315SSven Auhagen case ETHTOOL_XDP_TX:
210539b96315SSven Auhagen *pstats++ = xdp_stats.xdp_tx;
210639b96315SSven Auhagen break;
210739b96315SSven Auhagen case ETHTOOL_XDP_TX_ERR:
210839b96315SSven Auhagen *pstats++ = xdp_stats.xdp_tx_err;
210939b96315SSven Auhagen break;
211039b96315SSven Auhagen case ETHTOOL_XDP_XMIT:
211139b96315SSven Auhagen *pstats++ = xdp_stats.xdp_xmit;
211239b96315SSven Auhagen break;
211339b96315SSven Auhagen case ETHTOOL_XDP_XMIT_ERR:
211439b96315SSven Auhagen *pstats++ = xdp_stats.xdp_xmit_err;
211539b96315SSven Auhagen break;
211639b96315SSven Auhagen }
211739b96315SSven Auhagen }
2118db9d7d36SMaxime Chevallier }
2119db9d7d36SMaxime Chevallier
mvpp2_gather_hw_statistics(struct work_struct * work)2120db9d7d36SMaxime Chevallier static void mvpp2_gather_hw_statistics(struct work_struct *work)
2121db9d7d36SMaxime Chevallier {
2122db9d7d36SMaxime Chevallier struct delayed_work *del_work = to_delayed_work(work);
2123db9d7d36SMaxime Chevallier struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
2124db9d7d36SMaxime Chevallier stats_work);
2125db9d7d36SMaxime Chevallier
2126db9d7d36SMaxime Chevallier mutex_lock(&port->gather_stats_lock);
2127db9d7d36SMaxime Chevallier
21289bea6897SMaxime Chevallier mvpp2_read_stats(port);
2129db9d7d36SMaxime Chevallier
2130db9d7d36SMaxime Chevallier /* No need to read again the counters right after this function if it
2131db9d7d36SMaxime Chevallier * was called asynchronously by the user (ie. use of ethtool).
2132db9d7d36SMaxime Chevallier */
2133db9d7d36SMaxime Chevallier cancel_delayed_work(&port->stats_work);
2134db9d7d36SMaxime Chevallier queue_delayed_work(port->priv->stats_queue, &port->stats_work,
2135db9d7d36SMaxime Chevallier MVPP2_MIB_COUNTERS_STATS_DELAY);
2136db9d7d36SMaxime Chevallier
2137db9d7d36SMaxime Chevallier mutex_unlock(&port->gather_stats_lock);
2138db9d7d36SMaxime Chevallier }
2139db9d7d36SMaxime Chevallier
mvpp2_ethtool_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2140db9d7d36SMaxime Chevallier static void mvpp2_ethtool_get_stats(struct net_device *dev,
2141db9d7d36SMaxime Chevallier struct ethtool_stats *stats, u64 *data)
2142db9d7d36SMaxime Chevallier {
2143db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
2144db9d7d36SMaxime Chevallier
2145db9d7d36SMaxime Chevallier /* Update statistics for the given port, then take the lock to avoid
2146db9d7d36SMaxime Chevallier * concurrent accesses on the ethtool_stats structure during its copy.
2147db9d7d36SMaxime Chevallier */
2148db9d7d36SMaxime Chevallier mvpp2_gather_hw_statistics(&port->stats_work.work);
2149db9d7d36SMaxime Chevallier
2150db9d7d36SMaxime Chevallier mutex_lock(&port->gather_stats_lock);
2151db9d7d36SMaxime Chevallier memcpy(data, port->ethtool_stats,
21529bea6897SMaxime Chevallier sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
2153db9d7d36SMaxime Chevallier mutex_unlock(&port->gather_stats_lock);
2154db9d7d36SMaxime Chevallier }
2155db9d7d36SMaxime Chevallier
mvpp2_ethtool_get_sset_count(struct net_device * dev,int sset)2156db9d7d36SMaxime Chevallier static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
2157db9d7d36SMaxime Chevallier {
21589bea6897SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
21599bea6897SMaxime Chevallier
2160db9d7d36SMaxime Chevallier if (sset == ETH_SS_STATS)
21619bea6897SMaxime Chevallier return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
2162db9d7d36SMaxime Chevallier
2163db9d7d36SMaxime Chevallier return -EOPNOTSUPP;
2164db9d7d36SMaxime Chevallier }
2165db9d7d36SMaxime Chevallier
mvpp2_mac_reset_assert(struct mvpp2_port * port)2166649e51d5SAntoine Tenart static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
2167db9d7d36SMaxime Chevallier {
2168649e51d5SAntoine Tenart u32 val;
2169db9d7d36SMaxime Chevallier
2170316734fdSRussell King val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2171316734fdSRussell King MVPP2_GMAC_PORT_RESET_MASK;
2172db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2173649e51d5SAntoine Tenart
2174f704177eSStefan Chulski if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
2175649e51d5SAntoine Tenart val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
2176649e51d5SAntoine Tenart ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
2177649e51d5SAntoine Tenart writel(val, port->base + MVPP22_XLG_CTRL0_REG);
2178649e51d5SAntoine Tenart }
2179db9d7d36SMaxime Chevallier }
2180db9d7d36SMaxime Chevallier
mvpp22_pcs_reset_assert(struct mvpp2_port * port)21817409e66eSAntoine Tenart static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
21827409e66eSAntoine Tenart {
21837409e66eSAntoine Tenart struct mvpp2 *priv = port->priv;
21847409e66eSAntoine Tenart void __iomem *mpcs, *xpcs;
21857409e66eSAntoine Tenart u32 val;
21867409e66eSAntoine Tenart
218760dcd6b7SStefan Chulski if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
21887409e66eSAntoine Tenart return;
21897409e66eSAntoine Tenart
21907409e66eSAntoine Tenart mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
21917409e66eSAntoine Tenart xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
21927409e66eSAntoine Tenart
21937409e66eSAntoine Tenart val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
21947409e66eSAntoine Tenart val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
21957409e66eSAntoine Tenart val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
21967409e66eSAntoine Tenart writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
21977409e66eSAntoine Tenart
21987409e66eSAntoine Tenart val = readl(xpcs + MVPP22_XPCS_CFG0);
21997409e66eSAntoine Tenart writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
22007409e66eSAntoine Tenart }
22017409e66eSAntoine Tenart
mvpp22_pcs_reset_deassert(struct mvpp2_port * port,phy_interface_t interface)2202bb7bbb6eSMarek Behún static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
2203bb7bbb6eSMarek Behún phy_interface_t interface)
22047409e66eSAntoine Tenart {
22057409e66eSAntoine Tenart struct mvpp2 *priv = port->priv;
22067409e66eSAntoine Tenart void __iomem *mpcs, *xpcs;
22077409e66eSAntoine Tenart u32 val;
22087409e66eSAntoine Tenart
220960dcd6b7SStefan Chulski if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
22107409e66eSAntoine Tenart return;
22117409e66eSAntoine Tenart
22127409e66eSAntoine Tenart mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
22137409e66eSAntoine Tenart xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
22147409e66eSAntoine Tenart
2215bb7bbb6eSMarek Behún switch (interface) {
22164043ec70SMarek Behún case PHY_INTERFACE_MODE_5GBASER:
2217e0f909bcSRussell King case PHY_INTERFACE_MODE_10GBASER:
22187409e66eSAntoine Tenart val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
22197409e66eSAntoine Tenart val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
22207409e66eSAntoine Tenart MAC_CLK_RESET_SD_TX;
22217409e66eSAntoine Tenart val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
22227409e66eSAntoine Tenart writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
22237409e66eSAntoine Tenart break;
22247409e66eSAntoine Tenart case PHY_INTERFACE_MODE_XAUI:
22257409e66eSAntoine Tenart case PHY_INTERFACE_MODE_RXAUI:
22267409e66eSAntoine Tenart val = readl(xpcs + MVPP22_XPCS_CFG0);
22277409e66eSAntoine Tenart writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
22287409e66eSAntoine Tenart break;
22297409e66eSAntoine Tenart default:
22307409e66eSAntoine Tenart break;
22317409e66eSAntoine Tenart }
22327409e66eSAntoine Tenart }
22337409e66eSAntoine Tenart
2234db9d7d36SMaxime Chevallier /* Change maximum receive size of the port */
mvpp2_gmac_max_rx_size_set(struct mvpp2_port * port)2235db9d7d36SMaxime Chevallier static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2236db9d7d36SMaxime Chevallier {
2237db9d7d36SMaxime Chevallier u32 val;
2238db9d7d36SMaxime Chevallier
2239db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2240db9d7d36SMaxime Chevallier val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2241db9d7d36SMaxime Chevallier val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2242db9d7d36SMaxime Chevallier MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2243db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2244db9d7d36SMaxime Chevallier }
2245db9d7d36SMaxime Chevallier
2246db9d7d36SMaxime Chevallier /* Change maximum receive size of the port */
mvpp2_xlg_max_rx_size_set(struct mvpp2_port * port)2247db9d7d36SMaxime Chevallier static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
2248db9d7d36SMaxime Chevallier {
2249db9d7d36SMaxime Chevallier u32 val;
2250db9d7d36SMaxime Chevallier
2251db9d7d36SMaxime Chevallier val = readl(port->base + MVPP22_XLG_CTRL1_REG);
2252db9d7d36SMaxime Chevallier val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
2253db9d7d36SMaxime Chevallier val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2254db9d7d36SMaxime Chevallier MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
2255db9d7d36SMaxime Chevallier writel(val, port->base + MVPP22_XLG_CTRL1_REG);
2256db9d7d36SMaxime Chevallier }
2257db9d7d36SMaxime Chevallier
2258db9d7d36SMaxime Chevallier /* Set defaults to the MVPP2 port */
mvpp2_defaults_set(struct mvpp2_port * port)2259db9d7d36SMaxime Chevallier static void mvpp2_defaults_set(struct mvpp2_port *port)
2260db9d7d36SMaxime Chevallier {
226121808437SAntoine Tenart int tx_port_num, val, queue, lrxq;
2262db9d7d36SMaxime Chevallier
2263db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21) {
2264db9d7d36SMaxime Chevallier /* Update TX FIFO MIN Threshold */
2265db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2266db9d7d36SMaxime Chevallier val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2267db9d7d36SMaxime Chevallier /* Min. TX threshold must be less than minimal packet length */
2268db9d7d36SMaxime Chevallier val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2269db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2270db9d7d36SMaxime Chevallier }
2271db9d7d36SMaxime Chevallier
2272db9d7d36SMaxime Chevallier /* Disable Legacy WRR, Disable EJP, Release from reset */
2273db9d7d36SMaxime Chevallier tx_port_num = mvpp2_egress_port(port);
2274db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2275db9d7d36SMaxime Chevallier tx_port_num);
2276db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2277db9d7d36SMaxime Chevallier
22784251ea5bSMaxime Chevallier /* Set TXQ scheduling to Round-Robin */
22794251ea5bSMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
22804251ea5bSMaxime Chevallier
2281db9d7d36SMaxime Chevallier /* Close bandwidth for all queues */
228221808437SAntoine Tenart for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2283db9d7d36SMaxime Chevallier mvpp2_write(port->priv,
228421808437SAntoine Tenart MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2285db9d7d36SMaxime Chevallier
2286db9d7d36SMaxime Chevallier /* Set refill period to 1 usec, refill tokens
2287db9d7d36SMaxime Chevallier * and bucket size to maximum
2288db9d7d36SMaxime Chevallier */
2289db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
2290db9d7d36SMaxime Chevallier port->priv->tclk / USEC_PER_SEC);
2291db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2292db9d7d36SMaxime Chevallier val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2293db9d7d36SMaxime Chevallier val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2294db9d7d36SMaxime Chevallier val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2295db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2296db9d7d36SMaxime Chevallier val = MVPP2_TXP_TOKEN_SIZE_MAX;
2297db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2298db9d7d36SMaxime Chevallier
2299db9d7d36SMaxime Chevallier /* Set MaximumLowLatencyPacketSize value to 256 */
2300db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2301db9d7d36SMaxime Chevallier MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2302db9d7d36SMaxime Chevallier MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2303db9d7d36SMaxime Chevallier
2304db9d7d36SMaxime Chevallier /* Enable Rx cache snoop */
2305db9d7d36SMaxime Chevallier for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2306db9d7d36SMaxime Chevallier queue = port->rxqs[lrxq]->id;
2307db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2308db9d7d36SMaxime Chevallier val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2309db9d7d36SMaxime Chevallier MVPP2_SNOOP_BUF_HDR_MASK;
2310db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2311db9d7d36SMaxime Chevallier }
2312db9d7d36SMaxime Chevallier
2313db9d7d36SMaxime Chevallier /* At default, mask all interrupts to all present cpus */
2314db9d7d36SMaxime Chevallier mvpp2_interrupts_disable(port);
2315db9d7d36SMaxime Chevallier }
2316db9d7d36SMaxime Chevallier
2317db9d7d36SMaxime Chevallier /* Enable/disable receiving packets */
mvpp2_ingress_enable(struct mvpp2_port * port)2318db9d7d36SMaxime Chevallier static void mvpp2_ingress_enable(struct mvpp2_port *port)
2319db9d7d36SMaxime Chevallier {
2320db9d7d36SMaxime Chevallier u32 val;
2321db9d7d36SMaxime Chevallier int lrxq, queue;
2322db9d7d36SMaxime Chevallier
2323db9d7d36SMaxime Chevallier for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2324db9d7d36SMaxime Chevallier queue = port->rxqs[lrxq]->id;
2325db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2326db9d7d36SMaxime Chevallier val &= ~MVPP2_RXQ_DISABLE_MASK;
2327db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2328db9d7d36SMaxime Chevallier }
2329db9d7d36SMaxime Chevallier }
2330db9d7d36SMaxime Chevallier
mvpp2_ingress_disable(struct mvpp2_port * port)2331db9d7d36SMaxime Chevallier static void mvpp2_ingress_disable(struct mvpp2_port *port)
2332db9d7d36SMaxime Chevallier {
2333db9d7d36SMaxime Chevallier u32 val;
2334db9d7d36SMaxime Chevallier int lrxq, queue;
2335db9d7d36SMaxime Chevallier
2336db9d7d36SMaxime Chevallier for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2337db9d7d36SMaxime Chevallier queue = port->rxqs[lrxq]->id;
2338db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2339db9d7d36SMaxime Chevallier val |= MVPP2_RXQ_DISABLE_MASK;
2340db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2341db9d7d36SMaxime Chevallier }
2342db9d7d36SMaxime Chevallier }
2343db9d7d36SMaxime Chevallier
2344db9d7d36SMaxime Chevallier /* Enable transmit via physical egress queue
2345db9d7d36SMaxime Chevallier * - HW starts take descriptors from DRAM
2346db9d7d36SMaxime Chevallier */
mvpp2_egress_enable(struct mvpp2_port * port)2347db9d7d36SMaxime Chevallier static void mvpp2_egress_enable(struct mvpp2_port *port)
2348db9d7d36SMaxime Chevallier {
2349db9d7d36SMaxime Chevallier u32 qmap;
2350db9d7d36SMaxime Chevallier int queue;
2351db9d7d36SMaxime Chevallier int tx_port_num = mvpp2_egress_port(port);
2352db9d7d36SMaxime Chevallier
2353db9d7d36SMaxime Chevallier /* Enable all initialized TXs. */
2354db9d7d36SMaxime Chevallier qmap = 0;
2355db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
2356db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq = port->txqs[queue];
2357db9d7d36SMaxime Chevallier
2358db9d7d36SMaxime Chevallier if (txq->descs)
2359db9d7d36SMaxime Chevallier qmap |= (1 << queue);
2360db9d7d36SMaxime Chevallier }
2361db9d7d36SMaxime Chevallier
2362db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2363db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2364db9d7d36SMaxime Chevallier }
2365db9d7d36SMaxime Chevallier
2366db9d7d36SMaxime Chevallier /* Disable transmit via physical egress queue
2367db9d7d36SMaxime Chevallier * - HW doesn't take descriptors from DRAM
2368db9d7d36SMaxime Chevallier */
mvpp2_egress_disable(struct mvpp2_port * port)2369db9d7d36SMaxime Chevallier static void mvpp2_egress_disable(struct mvpp2_port *port)
2370db9d7d36SMaxime Chevallier {
2371db9d7d36SMaxime Chevallier u32 reg_data;
2372db9d7d36SMaxime Chevallier int delay;
2373db9d7d36SMaxime Chevallier int tx_port_num = mvpp2_egress_port(port);
2374db9d7d36SMaxime Chevallier
2375db9d7d36SMaxime Chevallier /* Issue stop command for active channels only */
2376db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2377db9d7d36SMaxime Chevallier reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2378db9d7d36SMaxime Chevallier MVPP2_TXP_SCHED_ENQ_MASK;
2379db9d7d36SMaxime Chevallier if (reg_data != 0)
2380db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2381db9d7d36SMaxime Chevallier (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2382db9d7d36SMaxime Chevallier
2383db9d7d36SMaxime Chevallier /* Wait for all Tx activity to terminate. */
2384db9d7d36SMaxime Chevallier delay = 0;
2385db9d7d36SMaxime Chevallier do {
2386db9d7d36SMaxime Chevallier if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2387db9d7d36SMaxime Chevallier netdev_warn(port->dev,
2388db9d7d36SMaxime Chevallier "Tx stop timed out, status=0x%08x\n",
2389db9d7d36SMaxime Chevallier reg_data);
2390db9d7d36SMaxime Chevallier break;
2391db9d7d36SMaxime Chevallier }
2392db9d7d36SMaxime Chevallier mdelay(1);
2393db9d7d36SMaxime Chevallier delay++;
2394db9d7d36SMaxime Chevallier
2395db9d7d36SMaxime Chevallier /* Check port TX Command register that all
2396db9d7d36SMaxime Chevallier * Tx queues are stopped
2397db9d7d36SMaxime Chevallier */
2398db9d7d36SMaxime Chevallier reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2399db9d7d36SMaxime Chevallier } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2400db9d7d36SMaxime Chevallier }
2401db9d7d36SMaxime Chevallier
2402db9d7d36SMaxime Chevallier /* Rx descriptors helper methods */
2403db9d7d36SMaxime Chevallier
2404db9d7d36SMaxime Chevallier /* Get number of Rx descriptors occupied by received packets */
2405db9d7d36SMaxime Chevallier static inline int
mvpp2_rxq_received(struct mvpp2_port * port,int rxq_id)2406db9d7d36SMaxime Chevallier mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2407db9d7d36SMaxime Chevallier {
2408db9d7d36SMaxime Chevallier u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2409db9d7d36SMaxime Chevallier
2410db9d7d36SMaxime Chevallier return val & MVPP2_RXQ_OCCUPIED_MASK;
2411db9d7d36SMaxime Chevallier }
2412db9d7d36SMaxime Chevallier
2413db9d7d36SMaxime Chevallier /* Update Rx queue status with the number of occupied and available
2414db9d7d36SMaxime Chevallier * Rx descriptor slots.
2415db9d7d36SMaxime Chevallier */
2416db9d7d36SMaxime Chevallier static inline void
mvpp2_rxq_status_update(struct mvpp2_port * port,int rxq_id,int used_count,int free_count)2417db9d7d36SMaxime Chevallier mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2418db9d7d36SMaxime Chevallier int used_count, int free_count)
2419db9d7d36SMaxime Chevallier {
2420db9d7d36SMaxime Chevallier /* Decrement the number of used descriptors and increment count
2421db9d7d36SMaxime Chevallier * increment the number of free descriptors.
2422db9d7d36SMaxime Chevallier */
2423db9d7d36SMaxime Chevallier u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2424db9d7d36SMaxime Chevallier
2425db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2426db9d7d36SMaxime Chevallier }
2427db9d7d36SMaxime Chevallier
2428db9d7d36SMaxime Chevallier /* Get pointer to next RX descriptor to be processed by SW */
2429db9d7d36SMaxime Chevallier static inline struct mvpp2_rx_desc *
mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue * rxq)2430db9d7d36SMaxime Chevallier mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2431db9d7d36SMaxime Chevallier {
2432db9d7d36SMaxime Chevallier int rx_desc = rxq->next_desc_to_proc;
2433db9d7d36SMaxime Chevallier
2434db9d7d36SMaxime Chevallier rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2435db9d7d36SMaxime Chevallier prefetch(rxq->descs + rxq->next_desc_to_proc);
2436db9d7d36SMaxime Chevallier return rxq->descs + rx_desc;
2437db9d7d36SMaxime Chevallier }
2438db9d7d36SMaxime Chevallier
2439db9d7d36SMaxime Chevallier /* Set rx queue offset */
mvpp2_rxq_offset_set(struct mvpp2_port * port,int prxq,int offset)2440db9d7d36SMaxime Chevallier static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2441db9d7d36SMaxime Chevallier int prxq, int offset)
2442db9d7d36SMaxime Chevallier {
2443db9d7d36SMaxime Chevallier u32 val;
2444db9d7d36SMaxime Chevallier
2445db9d7d36SMaxime Chevallier /* Convert offset from bytes to units of 32 bytes */
2446db9d7d36SMaxime Chevallier offset = offset >> 5;
2447db9d7d36SMaxime Chevallier
2448db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2449db9d7d36SMaxime Chevallier val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2450db9d7d36SMaxime Chevallier
2451db9d7d36SMaxime Chevallier /* Offset is in */
2452db9d7d36SMaxime Chevallier val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2453db9d7d36SMaxime Chevallier MVPP2_RXQ_PACKET_OFFSET_MASK);
2454db9d7d36SMaxime Chevallier
2455db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2456db9d7d36SMaxime Chevallier }
2457db9d7d36SMaxime Chevallier
2458db9d7d36SMaxime Chevallier /* Tx descriptors helper methods */
2459db9d7d36SMaxime Chevallier
2460db9d7d36SMaxime Chevallier /* Get pointer to next Tx descriptor to be processed (send) by HW */
2461db9d7d36SMaxime Chevallier static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue * txq)2462db9d7d36SMaxime Chevallier mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2463db9d7d36SMaxime Chevallier {
2464db9d7d36SMaxime Chevallier int tx_desc = txq->next_desc_to_proc;
2465db9d7d36SMaxime Chevallier
2466db9d7d36SMaxime Chevallier txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2467db9d7d36SMaxime Chevallier return txq->descs + tx_desc;
2468db9d7d36SMaxime Chevallier }
2469db9d7d36SMaxime Chevallier
2470db9d7d36SMaxime Chevallier /* Update HW with number of aggregated Tx descriptors to be sent
2471db9d7d36SMaxime Chevallier *
2472db9d7d36SMaxime Chevallier * Called only from mvpp2_tx(), so migration is disabled, using
2473db9d7d36SMaxime Chevallier * smp_processor_id() is OK.
2474db9d7d36SMaxime Chevallier */
mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port * port,int pending)2475db9d7d36SMaxime Chevallier static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2476db9d7d36SMaxime Chevallier {
2477db9d7d36SMaxime Chevallier /* aggregated access - relevant TXQ number is written in TX desc */
24781068549cSAntoine Tenart mvpp2_thread_write(port->priv,
2479e531f767SAntoine Tenart mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2480db9d7d36SMaxime Chevallier MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2481db9d7d36SMaxime Chevallier }
2482db9d7d36SMaxime Chevallier
2483db9d7d36SMaxime Chevallier /* Check if there are enough free descriptors in aggregated txq.
2484db9d7d36SMaxime Chevallier * If not, update the number of occupied descriptors and repeat the check.
2485db9d7d36SMaxime Chevallier *
2486db9d7d36SMaxime Chevallier * Called only from mvpp2_tx(), so migration is disabled, using
2487db9d7d36SMaxime Chevallier * smp_processor_id() is OK.
2488db9d7d36SMaxime Chevallier */
mvpp2_aggr_desc_num_check(struct mvpp2_port * port,struct mvpp2_tx_queue * aggr_txq,int num)2489e531f767SAntoine Tenart static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2490db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *aggr_txq, int num)
2491db9d7d36SMaxime Chevallier {
2492db9d7d36SMaxime Chevallier if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2493db9d7d36SMaxime Chevallier /* Update number of occupied aggregated Tx descriptors */
2494e531f767SAntoine Tenart unsigned int thread =
2495e531f767SAntoine Tenart mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2496e531f767SAntoine Tenart u32 val = mvpp2_read_relaxed(port->priv,
2497543ec376SAntoine Tenart MVPP2_AGGR_TXQ_STATUS_REG(thread));
2498db9d7d36SMaxime Chevallier
2499db9d7d36SMaxime Chevallier aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2500db9d7d36SMaxime Chevallier
2501db9d7d36SMaxime Chevallier if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2502db9d7d36SMaxime Chevallier return -ENOMEM;
2503db9d7d36SMaxime Chevallier }
2504db9d7d36SMaxime Chevallier return 0;
2505db9d7d36SMaxime Chevallier }
2506db9d7d36SMaxime Chevallier
2507db9d7d36SMaxime Chevallier /* Reserved Tx descriptors allocation request
2508db9d7d36SMaxime Chevallier *
2509db9d7d36SMaxime Chevallier * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2510db9d7d36SMaxime Chevallier * only by mvpp2_tx(), so migration is disabled, using
2511db9d7d36SMaxime Chevallier * smp_processor_id() is OK.
2512db9d7d36SMaxime Chevallier */
mvpp2_txq_alloc_reserved_desc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,int num)2513e531f767SAntoine Tenart static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2514db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq, int num)
2515db9d7d36SMaxime Chevallier {
2516e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2517e531f767SAntoine Tenart struct mvpp2 *priv = port->priv;
2518db9d7d36SMaxime Chevallier u32 val;
2519db9d7d36SMaxime Chevallier
2520db9d7d36SMaxime Chevallier val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
25211068549cSAntoine Tenart mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2522db9d7d36SMaxime Chevallier
25231068549cSAntoine Tenart val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2524db9d7d36SMaxime Chevallier
2525db9d7d36SMaxime Chevallier return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2526db9d7d36SMaxime Chevallier }
2527db9d7d36SMaxime Chevallier
2528db9d7d36SMaxime Chevallier /* Check if there are enough reserved descriptors for transmission.
2529db9d7d36SMaxime Chevallier * If not, request chunk of reserved descriptors and check again.
2530db9d7d36SMaxime Chevallier */
mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu,int num)2531074c74dfSAntoine Tenart static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2532db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq,
2533db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu,
2534db9d7d36SMaxime Chevallier int num)
2535db9d7d36SMaxime Chevallier {
2536850623b3SAntoine Tenart int req, desc_count;
2537074c74dfSAntoine Tenart unsigned int thread;
2538db9d7d36SMaxime Chevallier
2539db9d7d36SMaxime Chevallier if (txq_pcpu->reserved_num >= num)
2540db9d7d36SMaxime Chevallier return 0;
2541db9d7d36SMaxime Chevallier
2542db9d7d36SMaxime Chevallier /* Not enough descriptors reserved! Update the reserved descriptor
2543db9d7d36SMaxime Chevallier * count and check again.
2544db9d7d36SMaxime Chevallier */
2545db9d7d36SMaxime Chevallier
2546db9d7d36SMaxime Chevallier desc_count = 0;
2547db9d7d36SMaxime Chevallier /* Compute total of used descriptors */
2548e531f767SAntoine Tenart for (thread = 0; thread < port->priv->nthreads; thread++) {
2549db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu_aux;
2550db9d7d36SMaxime Chevallier
2551074c74dfSAntoine Tenart txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2552db9d7d36SMaxime Chevallier desc_count += txq_pcpu_aux->count;
2553db9d7d36SMaxime Chevallier desc_count += txq_pcpu_aux->reserved_num;
2554db9d7d36SMaxime Chevallier }
2555db9d7d36SMaxime Chevallier
2556db9d7d36SMaxime Chevallier req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2557db9d7d36SMaxime Chevallier desc_count += req;
2558db9d7d36SMaxime Chevallier
2559db9d7d36SMaxime Chevallier if (desc_count >
2560074c74dfSAntoine Tenart (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2561db9d7d36SMaxime Chevallier return -ENOMEM;
2562db9d7d36SMaxime Chevallier
2563e531f767SAntoine Tenart txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2564db9d7d36SMaxime Chevallier
2565db9d7d36SMaxime Chevallier /* OK, the descriptor could have been updated: check again. */
2566db9d7d36SMaxime Chevallier if (txq_pcpu->reserved_num < num)
2567db9d7d36SMaxime Chevallier return -ENOMEM;
2568db9d7d36SMaxime Chevallier return 0;
2569db9d7d36SMaxime Chevallier }
2570db9d7d36SMaxime Chevallier
2571db9d7d36SMaxime Chevallier /* Release the last allocated Tx descriptor. Useful to handle DMA
2572db9d7d36SMaxime Chevallier * mapping failures in the Tx path.
2573db9d7d36SMaxime Chevallier */
mvpp2_txq_desc_put(struct mvpp2_tx_queue * txq)2574db9d7d36SMaxime Chevallier static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2575db9d7d36SMaxime Chevallier {
2576db9d7d36SMaxime Chevallier if (txq->next_desc_to_proc == 0)
2577db9d7d36SMaxime Chevallier txq->next_desc_to_proc = txq->last_desc - 1;
2578db9d7d36SMaxime Chevallier else
2579db9d7d36SMaxime Chevallier txq->next_desc_to_proc--;
2580db9d7d36SMaxime Chevallier }
2581db9d7d36SMaxime Chevallier
2582db9d7d36SMaxime Chevallier /* Set Tx descriptors fields relevant for CSUM calculation */
mvpp2_txq_desc_csum(int l3_offs,__be16 l3_proto,int ip_hdr_len,int l4_proto)258335f3625cSMaxime Chevallier static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2584db9d7d36SMaxime Chevallier int ip_hdr_len, int l4_proto)
2585db9d7d36SMaxime Chevallier {
2586db9d7d36SMaxime Chevallier u32 command;
2587db9d7d36SMaxime Chevallier
2588db9d7d36SMaxime Chevallier /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2589db9d7d36SMaxime Chevallier * G_L4_chk, L4_type required only for checksum calculation
2590db9d7d36SMaxime Chevallier */
2591db9d7d36SMaxime Chevallier command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2592db9d7d36SMaxime Chevallier command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2593db9d7d36SMaxime Chevallier command |= MVPP2_TXD_IP_CSUM_DISABLE;
2594db9d7d36SMaxime Chevallier
2595dc734dbeSMaxime Chevallier if (l3_proto == htons(ETH_P_IP)) {
2596db9d7d36SMaxime Chevallier command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
2597db9d7d36SMaxime Chevallier command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
2598db9d7d36SMaxime Chevallier } else {
2599db9d7d36SMaxime Chevallier command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
2600db9d7d36SMaxime Chevallier }
2601db9d7d36SMaxime Chevallier
2602db9d7d36SMaxime Chevallier if (l4_proto == IPPROTO_TCP) {
2603db9d7d36SMaxime Chevallier command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
2604db9d7d36SMaxime Chevallier command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2605db9d7d36SMaxime Chevallier } else if (l4_proto == IPPROTO_UDP) {
2606db9d7d36SMaxime Chevallier command |= MVPP2_TXD_L4_UDP; /* enable UDP */
2607db9d7d36SMaxime Chevallier command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2608db9d7d36SMaxime Chevallier } else {
2609db9d7d36SMaxime Chevallier command |= MVPP2_TXD_L4_CSUM_NOT;
2610db9d7d36SMaxime Chevallier }
2611db9d7d36SMaxime Chevallier
2612db9d7d36SMaxime Chevallier return command;
2613db9d7d36SMaxime Chevallier }
2614db9d7d36SMaxime Chevallier
2615db9d7d36SMaxime Chevallier /* Get number of sent descriptors and decrement counter.
2616db9d7d36SMaxime Chevallier * The number of sent descriptors is returned.
2617543ec376SAntoine Tenart * Per-thread access
2618db9d7d36SMaxime Chevallier *
2619db9d7d36SMaxime Chevallier * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2620db9d7d36SMaxime Chevallier * (migration disabled) and from the TX completion tasklet (migration
2621db9d7d36SMaxime Chevallier * disabled) so using smp_processor_id() is OK.
2622db9d7d36SMaxime Chevallier */
mvpp2_txq_sent_desc_proc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)2623db9d7d36SMaxime Chevallier static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2624db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq)
2625db9d7d36SMaxime Chevallier {
2626db9d7d36SMaxime Chevallier u32 val;
2627db9d7d36SMaxime Chevallier
2628db9d7d36SMaxime Chevallier /* Reading status reg resets transmitted descriptor counter */
26291068549cSAntoine Tenart val = mvpp2_thread_read_relaxed(port->priv,
2630e531f767SAntoine Tenart mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2631db9d7d36SMaxime Chevallier MVPP2_TXQ_SENT_REG(txq->id));
2632db9d7d36SMaxime Chevallier
2633db9d7d36SMaxime Chevallier return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2634db9d7d36SMaxime Chevallier MVPP2_TRANSMITTED_COUNT_OFFSET;
2635db9d7d36SMaxime Chevallier }
2636db9d7d36SMaxime Chevallier
2637db9d7d36SMaxime Chevallier /* Called through on_each_cpu(), so runs on all CPUs, with migration
2638db9d7d36SMaxime Chevallier * disabled, therefore using smp_processor_id() is OK.
2639db9d7d36SMaxime Chevallier */
mvpp2_txq_sent_counter_clear(void * arg)2640db9d7d36SMaxime Chevallier static void mvpp2_txq_sent_counter_clear(void *arg)
2641db9d7d36SMaxime Chevallier {
2642db9d7d36SMaxime Chevallier struct mvpp2_port *port = arg;
2643db9d7d36SMaxime Chevallier int queue;
2644db9d7d36SMaxime Chevallier
2645e531f767SAntoine Tenart /* If the thread isn't used, don't do anything */
26467867299cSStefan Chulski if (smp_processor_id() >= port->priv->nthreads)
2647e531f767SAntoine Tenart return;
2648e531f767SAntoine Tenart
2649db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
2650db9d7d36SMaxime Chevallier int id = port->txqs[queue]->id;
2651db9d7d36SMaxime Chevallier
26521068549cSAntoine Tenart mvpp2_thread_read(port->priv,
2653e531f767SAntoine Tenart mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2654db9d7d36SMaxime Chevallier MVPP2_TXQ_SENT_REG(id));
2655db9d7d36SMaxime Chevallier }
2656db9d7d36SMaxime Chevallier }
2657db9d7d36SMaxime Chevallier
2658db9d7d36SMaxime Chevallier /* Set max sizes for Tx queues */
mvpp2_txp_max_tx_size_set(struct mvpp2_port * port)2659db9d7d36SMaxime Chevallier static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2660db9d7d36SMaxime Chevallier {
2661db9d7d36SMaxime Chevallier u32 val, size, mtu;
2662db9d7d36SMaxime Chevallier int txq, tx_port_num;
2663db9d7d36SMaxime Chevallier
2664db9d7d36SMaxime Chevallier mtu = port->pkt_size * 8;
2665db9d7d36SMaxime Chevallier if (mtu > MVPP2_TXP_MTU_MAX)
2666db9d7d36SMaxime Chevallier mtu = MVPP2_TXP_MTU_MAX;
2667db9d7d36SMaxime Chevallier
2668db9d7d36SMaxime Chevallier /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2669db9d7d36SMaxime Chevallier mtu = 3 * mtu;
2670db9d7d36SMaxime Chevallier
2671db9d7d36SMaxime Chevallier /* Indirect access to registers */
2672db9d7d36SMaxime Chevallier tx_port_num = mvpp2_egress_port(port);
2673db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2674db9d7d36SMaxime Chevallier
2675db9d7d36SMaxime Chevallier /* Set MTU */
2676db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2677db9d7d36SMaxime Chevallier val &= ~MVPP2_TXP_MTU_MAX;
2678db9d7d36SMaxime Chevallier val |= mtu;
2679db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2680db9d7d36SMaxime Chevallier
2681db9d7d36SMaxime Chevallier /* TXP token size and all TXQs token size must be larger that MTU */
2682db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2683db9d7d36SMaxime Chevallier size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2684db9d7d36SMaxime Chevallier if (size < mtu) {
2685db9d7d36SMaxime Chevallier size = mtu;
2686db9d7d36SMaxime Chevallier val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2687db9d7d36SMaxime Chevallier val |= size;
2688db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2689db9d7d36SMaxime Chevallier }
2690db9d7d36SMaxime Chevallier
2691db9d7d36SMaxime Chevallier for (txq = 0; txq < port->ntxqs; txq++) {
2692db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv,
2693db9d7d36SMaxime Chevallier MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2694db9d7d36SMaxime Chevallier size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2695db9d7d36SMaxime Chevallier
2696db9d7d36SMaxime Chevallier if (size < mtu) {
2697db9d7d36SMaxime Chevallier size = mtu;
2698db9d7d36SMaxime Chevallier val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2699db9d7d36SMaxime Chevallier val |= size;
2700db9d7d36SMaxime Chevallier mvpp2_write(port->priv,
2701db9d7d36SMaxime Chevallier MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2702db9d7d36SMaxime Chevallier val);
2703db9d7d36SMaxime Chevallier }
2704db9d7d36SMaxime Chevallier }
2705db9d7d36SMaxime Chevallier }
2706db9d7d36SMaxime Chevallier
2707bf270fa3SStefan Chulski /* Set the number of non-occupied descriptors threshold */
mvpp2_set_rxq_free_tresh(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2708bf270fa3SStefan Chulski static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
2709bf270fa3SStefan Chulski struct mvpp2_rx_queue *rxq)
2710bf270fa3SStefan Chulski {
2711bf270fa3SStefan Chulski u32 val;
2712bf270fa3SStefan Chulski
2713bf270fa3SStefan Chulski mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2714bf270fa3SStefan Chulski
2715bf270fa3SStefan Chulski val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
2716bf270fa3SStefan Chulski val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
2717bf270fa3SStefan Chulski val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
2718bf270fa3SStefan Chulski mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
2719bf270fa3SStefan Chulski }
2720bf270fa3SStefan Chulski
2721db9d7d36SMaxime Chevallier /* Set the number of packets that will be received before Rx interrupt
2722db9d7d36SMaxime Chevallier * will be generated by HW.
2723db9d7d36SMaxime Chevallier */
mvpp2_rx_pkts_coal_set(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2724db9d7d36SMaxime Chevallier static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2725db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq)
2726db9d7d36SMaxime Chevallier {
2727e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2728db9d7d36SMaxime Chevallier
2729db9d7d36SMaxime Chevallier if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2730db9d7d36SMaxime Chevallier rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2731db9d7d36SMaxime Chevallier
27321068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
27331068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2734db9d7d36SMaxime Chevallier rxq->pkts_coal);
2735db9d7d36SMaxime Chevallier
2736db9d7d36SMaxime Chevallier put_cpu();
2737db9d7d36SMaxime Chevallier }
2738db9d7d36SMaxime Chevallier
2739db9d7d36SMaxime Chevallier /* For some reason in the LSP this is done on each CPU. Why ? */
mvpp2_tx_pkts_coal_set(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)2740db9d7d36SMaxime Chevallier static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2741db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq)
2742db9d7d36SMaxime Chevallier {
27434f374d2cSStefan Chulski unsigned int thread;
2744db9d7d36SMaxime Chevallier u32 val;
2745db9d7d36SMaxime Chevallier
2746db9d7d36SMaxime Chevallier if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2747db9d7d36SMaxime Chevallier txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2748db9d7d36SMaxime Chevallier
2749db9d7d36SMaxime Chevallier val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
27504f374d2cSStefan Chulski /* PKT-coalescing registers are per-queue + per-thread */
27514f374d2cSStefan Chulski for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
27521068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
27531068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
27544f374d2cSStefan Chulski }
2755db9d7d36SMaxime Chevallier }
2756db9d7d36SMaxime Chevallier
mvpp2_usec_to_cycles(u32 usec,unsigned long clk_hz)2757db9d7d36SMaxime Chevallier static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2758db9d7d36SMaxime Chevallier {
2759db9d7d36SMaxime Chevallier u64 tmp = (u64)clk_hz * usec;
2760db9d7d36SMaxime Chevallier
2761db9d7d36SMaxime Chevallier do_div(tmp, USEC_PER_SEC);
2762db9d7d36SMaxime Chevallier
2763db9d7d36SMaxime Chevallier return tmp > U32_MAX ? U32_MAX : tmp;
2764db9d7d36SMaxime Chevallier }
2765db9d7d36SMaxime Chevallier
mvpp2_cycles_to_usec(u32 cycles,unsigned long clk_hz)2766db9d7d36SMaxime Chevallier static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2767db9d7d36SMaxime Chevallier {
2768db9d7d36SMaxime Chevallier u64 tmp = (u64)cycles * USEC_PER_SEC;
2769db9d7d36SMaxime Chevallier
2770db9d7d36SMaxime Chevallier do_div(tmp, clk_hz);
2771db9d7d36SMaxime Chevallier
2772db9d7d36SMaxime Chevallier return tmp > U32_MAX ? U32_MAX : tmp;
2773db9d7d36SMaxime Chevallier }
2774db9d7d36SMaxime Chevallier
2775db9d7d36SMaxime Chevallier /* Set the time delay in usec before Rx interrupt */
mvpp2_rx_time_coal_set(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2776db9d7d36SMaxime Chevallier static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2777db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq)
2778db9d7d36SMaxime Chevallier {
2779db9d7d36SMaxime Chevallier unsigned long freq = port->priv->tclk;
2780db9d7d36SMaxime Chevallier u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2781db9d7d36SMaxime Chevallier
2782db9d7d36SMaxime Chevallier if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2783db9d7d36SMaxime Chevallier rxq->time_coal =
2784db9d7d36SMaxime Chevallier mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2785db9d7d36SMaxime Chevallier
2786db9d7d36SMaxime Chevallier /* re-evaluate to get actual register value */
2787db9d7d36SMaxime Chevallier val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2788db9d7d36SMaxime Chevallier }
2789db9d7d36SMaxime Chevallier
2790db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2791db9d7d36SMaxime Chevallier }
2792db9d7d36SMaxime Chevallier
mvpp2_tx_time_coal_set(struct mvpp2_port * port)2793db9d7d36SMaxime Chevallier static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2794db9d7d36SMaxime Chevallier {
2795db9d7d36SMaxime Chevallier unsigned long freq = port->priv->tclk;
2796db9d7d36SMaxime Chevallier u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2797db9d7d36SMaxime Chevallier
2798db9d7d36SMaxime Chevallier if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2799db9d7d36SMaxime Chevallier port->tx_time_coal =
2800db9d7d36SMaxime Chevallier mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2801db9d7d36SMaxime Chevallier
2802db9d7d36SMaxime Chevallier /* re-evaluate to get actual register value */
2803db9d7d36SMaxime Chevallier val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2804db9d7d36SMaxime Chevallier }
2805db9d7d36SMaxime Chevallier
2806db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2807db9d7d36SMaxime Chevallier }
2808db9d7d36SMaxime Chevallier
2809db9d7d36SMaxime Chevallier /* Free Tx queue skbuffs */
mvpp2_txq_bufs_free(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu,int num)2810db9d7d36SMaxime Chevallier static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2811db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq,
2812db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu, int num)
2813db9d7d36SMaxime Chevallier {
2814dbef19ccSLorenzo Bianconi struct xdp_frame_bulk bq;
2815db9d7d36SMaxime Chevallier int i;
2816db9d7d36SMaxime Chevallier
2817dbef19ccSLorenzo Bianconi xdp_frame_bulk_init(&bq);
2818dbef19ccSLorenzo Bianconi
2819dbef19ccSLorenzo Bianconi rcu_read_lock(); /* need for xdp_return_frame_bulk */
2820dbef19ccSLorenzo Bianconi
2821db9d7d36SMaxime Chevallier for (i = 0; i < num; i++) {
2822db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu_buf *tx_buf =
2823db9d7d36SMaxime Chevallier txq_pcpu->buffs + txq_pcpu->txq_get_index;
2824db9d7d36SMaxime Chevallier
2825c2d6fe61SMatteo Croce if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2826c2d6fe61SMatteo Croce tx_buf->type != MVPP2_TYPE_XDP_TX)
2827db9d7d36SMaxime Chevallier dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2828db9d7d36SMaxime Chevallier tx_buf->size, DMA_TO_DEVICE);
2829c2d6fe61SMatteo Croce if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2830db9d7d36SMaxime Chevallier dev_kfree_skb_any(tx_buf->skb);
2831c2d6fe61SMatteo Croce else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2832c2d6fe61SMatteo Croce tx_buf->type == MVPP2_TYPE_XDP_NDO)
2833dbef19ccSLorenzo Bianconi xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2834db9d7d36SMaxime Chevallier
2835db9d7d36SMaxime Chevallier mvpp2_txq_inc_get(txq_pcpu);
2836db9d7d36SMaxime Chevallier }
2837dbef19ccSLorenzo Bianconi xdp_flush_frame_bulk(&bq);
2838dbef19ccSLorenzo Bianconi
2839dbef19ccSLorenzo Bianconi rcu_read_unlock();
2840db9d7d36SMaxime Chevallier }
2841db9d7d36SMaxime Chevallier
mvpp2_get_rx_queue(struct mvpp2_port * port,u32 cause)2842db9d7d36SMaxime Chevallier static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2843db9d7d36SMaxime Chevallier u32 cause)
2844db9d7d36SMaxime Chevallier {
2845db9d7d36SMaxime Chevallier int queue = fls(cause) - 1;
2846db9d7d36SMaxime Chevallier
2847db9d7d36SMaxime Chevallier return port->rxqs[queue];
2848db9d7d36SMaxime Chevallier }
2849db9d7d36SMaxime Chevallier
mvpp2_get_tx_queue(struct mvpp2_port * port,u32 cause)2850db9d7d36SMaxime Chevallier static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2851db9d7d36SMaxime Chevallier u32 cause)
2852db9d7d36SMaxime Chevallier {
2853db9d7d36SMaxime Chevallier int queue = fls(cause) - 1;
2854db9d7d36SMaxime Chevallier
2855db9d7d36SMaxime Chevallier return port->txqs[queue];
2856db9d7d36SMaxime Chevallier }
2857db9d7d36SMaxime Chevallier
2858db9d7d36SMaxime Chevallier /* Handle end of transmission */
mvpp2_txq_done(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu)2859db9d7d36SMaxime Chevallier static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2860db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu)
2861db9d7d36SMaxime Chevallier {
2862db9d7d36SMaxime Chevallier struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2863db9d7d36SMaxime Chevallier int tx_done;
2864db9d7d36SMaxime Chevallier
2865e531f767SAntoine Tenart if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2866db9d7d36SMaxime Chevallier netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2867db9d7d36SMaxime Chevallier
2868db9d7d36SMaxime Chevallier tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2869db9d7d36SMaxime Chevallier if (!tx_done)
2870db9d7d36SMaxime Chevallier return;
2871db9d7d36SMaxime Chevallier mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2872db9d7d36SMaxime Chevallier
2873db9d7d36SMaxime Chevallier txq_pcpu->count -= tx_done;
2874db9d7d36SMaxime Chevallier
2875db9d7d36SMaxime Chevallier if (netif_tx_queue_stopped(nq))
2876db9d7d36SMaxime Chevallier if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2877db9d7d36SMaxime Chevallier netif_tx_wake_queue(nq);
2878db9d7d36SMaxime Chevallier }
2879db9d7d36SMaxime Chevallier
mvpp2_tx_done(struct mvpp2_port * port,u32 cause,unsigned int thread)2880db9d7d36SMaxime Chevallier static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2881543ec376SAntoine Tenart unsigned int thread)
2882db9d7d36SMaxime Chevallier {
2883db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq;
2884db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu;
2885db9d7d36SMaxime Chevallier unsigned int tx_todo = 0;
2886db9d7d36SMaxime Chevallier
2887db9d7d36SMaxime Chevallier while (cause) {
2888db9d7d36SMaxime Chevallier txq = mvpp2_get_tx_queue(port, cause);
2889db9d7d36SMaxime Chevallier if (!txq)
2890db9d7d36SMaxime Chevallier break;
2891db9d7d36SMaxime Chevallier
2892543ec376SAntoine Tenart txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2893db9d7d36SMaxime Chevallier
2894db9d7d36SMaxime Chevallier if (txq_pcpu->count) {
2895db9d7d36SMaxime Chevallier mvpp2_txq_done(port, txq, txq_pcpu);
2896db9d7d36SMaxime Chevallier tx_todo += txq_pcpu->count;
2897db9d7d36SMaxime Chevallier }
2898db9d7d36SMaxime Chevallier
2899db9d7d36SMaxime Chevallier cause &= ~(1 << txq->log_id);
2900db9d7d36SMaxime Chevallier }
2901db9d7d36SMaxime Chevallier return tx_todo;
2902db9d7d36SMaxime Chevallier }
2903db9d7d36SMaxime Chevallier
2904db9d7d36SMaxime Chevallier /* Rx/Tx queue initialization/cleanup methods */
2905db9d7d36SMaxime Chevallier
2906db9d7d36SMaxime Chevallier /* Allocate and initialize descriptors for aggr TXQ */
mvpp2_aggr_txq_init(struct platform_device * pdev,struct mvpp2_tx_queue * aggr_txq,unsigned int thread,struct mvpp2 * priv)2907db9d7d36SMaxime Chevallier static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2908850623b3SAntoine Tenart struct mvpp2_tx_queue *aggr_txq,
2909543ec376SAntoine Tenart unsigned int thread, struct mvpp2 *priv)
2910db9d7d36SMaxime Chevallier {
2911db9d7d36SMaxime Chevallier u32 txq_dma;
2912db9d7d36SMaxime Chevallier
2913db9d7d36SMaxime Chevallier /* Allocate memory for TX descriptors */
2914750afb08SLuis Chamberlain aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2915db9d7d36SMaxime Chevallier MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2916db9d7d36SMaxime Chevallier &aggr_txq->descs_dma, GFP_KERNEL);
2917db9d7d36SMaxime Chevallier if (!aggr_txq->descs)
2918db9d7d36SMaxime Chevallier return -ENOMEM;
2919db9d7d36SMaxime Chevallier
2920db9d7d36SMaxime Chevallier aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2921db9d7d36SMaxime Chevallier
2922db9d7d36SMaxime Chevallier /* Aggr TXQ no reset WA */
2923db9d7d36SMaxime Chevallier aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2924543ec376SAntoine Tenart MVPP2_AGGR_TXQ_INDEX_REG(thread));
2925db9d7d36SMaxime Chevallier
2926db9d7d36SMaxime Chevallier /* Set Tx descriptors queue starting address indirect
2927db9d7d36SMaxime Chevallier * access
2928db9d7d36SMaxime Chevallier */
2929db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21)
2930db9d7d36SMaxime Chevallier txq_dma = aggr_txq->descs_dma;
2931db9d7d36SMaxime Chevallier else
2932db9d7d36SMaxime Chevallier txq_dma = aggr_txq->descs_dma >>
2933db9d7d36SMaxime Chevallier MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2934db9d7d36SMaxime Chevallier
2935543ec376SAntoine Tenart mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2936543ec376SAntoine Tenart mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2937db9d7d36SMaxime Chevallier MVPP2_AGGR_TXQ_SIZE);
2938db9d7d36SMaxime Chevallier
2939db9d7d36SMaxime Chevallier return 0;
2940db9d7d36SMaxime Chevallier }
2941db9d7d36SMaxime Chevallier
2942db9d7d36SMaxime Chevallier /* Create a specified Rx queue */
mvpp2_rxq_init(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)2943db9d7d36SMaxime Chevallier static int mvpp2_rxq_init(struct mvpp2_port *port,
2944db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq)
2945db9d7d36SMaxime Chevallier {
2946b27db227SMatteo Croce struct mvpp2 *priv = port->priv;
2947543ec376SAntoine Tenart unsigned int thread;
2948db9d7d36SMaxime Chevallier u32 rxq_dma;
2949b27db227SMatteo Croce int err;
2950db9d7d36SMaxime Chevallier
2951db9d7d36SMaxime Chevallier rxq->size = port->rx_ring_size;
2952db9d7d36SMaxime Chevallier
2953db9d7d36SMaxime Chevallier /* Allocate memory for RX descriptors */
2954db9d7d36SMaxime Chevallier rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2955db9d7d36SMaxime Chevallier rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2956db9d7d36SMaxime Chevallier &rxq->descs_dma, GFP_KERNEL);
2957db9d7d36SMaxime Chevallier if (!rxq->descs)
2958db9d7d36SMaxime Chevallier return -ENOMEM;
2959db9d7d36SMaxime Chevallier
2960db9d7d36SMaxime Chevallier rxq->last_desc = rxq->size - 1;
2961db9d7d36SMaxime Chevallier
2962db9d7d36SMaxime Chevallier /* Zero occupied and non-occupied counters - direct access */
2963db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2964db9d7d36SMaxime Chevallier
2965db9d7d36SMaxime Chevallier /* Set Rx descriptors queue starting address - indirect access */
2966e531f767SAntoine Tenart thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
29671068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2968db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21)
2969db9d7d36SMaxime Chevallier rxq_dma = rxq->descs_dma;
2970db9d7d36SMaxime Chevallier else
2971db9d7d36SMaxime Chevallier rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
29721068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
29731068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
29741068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2975db9d7d36SMaxime Chevallier put_cpu();
2976db9d7d36SMaxime Chevallier
2977db9d7d36SMaxime Chevallier /* Set Offset */
297807dd0a7aSMatteo Croce mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2979db9d7d36SMaxime Chevallier
2980db9d7d36SMaxime Chevallier /* Set coalescing pkts and time */
2981db9d7d36SMaxime Chevallier mvpp2_rx_pkts_coal_set(port, rxq);
2982db9d7d36SMaxime Chevallier mvpp2_rx_time_coal_set(port, rxq);
2983db9d7d36SMaxime Chevallier
2984bf270fa3SStefan Chulski /* Set the number of non occupied descriptors threshold */
2985bf270fa3SStefan Chulski mvpp2_set_rxq_free_tresh(port, rxq);
2986bf270fa3SStefan Chulski
2987db9d7d36SMaxime Chevallier /* Add number of descriptors ready for receiving packets */
2988db9d7d36SMaxime Chevallier mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2989db9d7d36SMaxime Chevallier
2990b27db227SMatteo Croce if (priv->percpu_pools) {
2991a50e659bSLouis Amas err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
2992b27db227SMatteo Croce if (err < 0)
2993b27db227SMatteo Croce goto err_free_dma;
2994b27db227SMatteo Croce
2995a50e659bSLouis Amas err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
2996b27db227SMatteo Croce if (err < 0)
2997b27db227SMatteo Croce goto err_unregister_rxq_short;
2998b27db227SMatteo Croce
2999b27db227SMatteo Croce /* Every RXQ has a pool for short and another for long packets */
3000b27db227SMatteo Croce err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
3001b27db227SMatteo Croce MEM_TYPE_PAGE_POOL,
3002b27db227SMatteo Croce priv->page_pool[rxq->logic_rxq]);
3003b27db227SMatteo Croce if (err < 0)
3004b27db227SMatteo Croce goto err_unregister_rxq_long;
3005b27db227SMatteo Croce
3006b27db227SMatteo Croce err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
3007b27db227SMatteo Croce MEM_TYPE_PAGE_POOL,
3008b27db227SMatteo Croce priv->page_pool[rxq->logic_rxq +
3009b27db227SMatteo Croce port->nrxqs]);
3010b27db227SMatteo Croce if (err < 0)
3011b27db227SMatteo Croce goto err_unregister_mem_rxq_short;
3012b27db227SMatteo Croce }
3013b27db227SMatteo Croce
3014db9d7d36SMaxime Chevallier return 0;
3015b27db227SMatteo Croce
3016b27db227SMatteo Croce err_unregister_mem_rxq_short:
3017b27db227SMatteo Croce xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
3018b27db227SMatteo Croce err_unregister_rxq_long:
3019b27db227SMatteo Croce xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3020b27db227SMatteo Croce err_unregister_rxq_short:
3021b27db227SMatteo Croce xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3022b27db227SMatteo Croce err_free_dma:
3023b27db227SMatteo Croce dma_free_coherent(port->dev->dev.parent,
3024b27db227SMatteo Croce rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3025b27db227SMatteo Croce rxq->descs, rxq->descs_dma);
3026b27db227SMatteo Croce return err;
3027db9d7d36SMaxime Chevallier }
3028db9d7d36SMaxime Chevallier
3029db9d7d36SMaxime Chevallier /* Push packets received by the RXQ to BM pool */
mvpp2_rxq_drop_pkts(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)3030db9d7d36SMaxime Chevallier static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3031db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq)
3032db9d7d36SMaxime Chevallier {
3033db9d7d36SMaxime Chevallier int rx_received, i;
3034db9d7d36SMaxime Chevallier
3035db9d7d36SMaxime Chevallier rx_received = mvpp2_rxq_received(port, rxq->id);
3036db9d7d36SMaxime Chevallier if (!rx_received)
3037db9d7d36SMaxime Chevallier return;
3038db9d7d36SMaxime Chevallier
3039db9d7d36SMaxime Chevallier for (i = 0; i < rx_received; i++) {
3040db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3041db9d7d36SMaxime Chevallier u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3042db9d7d36SMaxime Chevallier int pool;
3043db9d7d36SMaxime Chevallier
3044db9d7d36SMaxime Chevallier pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3045db9d7d36SMaxime Chevallier MVPP2_RXD_BM_POOL_ID_OFFS;
3046db9d7d36SMaxime Chevallier
3047db9d7d36SMaxime Chevallier mvpp2_bm_pool_put(port, pool,
3048db9d7d36SMaxime Chevallier mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3049db9d7d36SMaxime Chevallier mvpp2_rxdesc_cookie_get(port, rx_desc));
3050db9d7d36SMaxime Chevallier }
3051db9d7d36SMaxime Chevallier mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3052db9d7d36SMaxime Chevallier }
3053db9d7d36SMaxime Chevallier
3054db9d7d36SMaxime Chevallier /* Cleanup Rx queue */
mvpp2_rxq_deinit(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)3055db9d7d36SMaxime Chevallier static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3056db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq)
3057db9d7d36SMaxime Chevallier {
3058543ec376SAntoine Tenart unsigned int thread;
3059db9d7d36SMaxime Chevallier
3060b27db227SMatteo Croce if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
3061b27db227SMatteo Croce xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3062b27db227SMatteo Croce
3063b27db227SMatteo Croce if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
3064b27db227SMatteo Croce xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3065b27db227SMatteo Croce
3066db9d7d36SMaxime Chevallier mvpp2_rxq_drop_pkts(port, rxq);
3067db9d7d36SMaxime Chevallier
3068db9d7d36SMaxime Chevallier if (rxq->descs)
3069db9d7d36SMaxime Chevallier dma_free_coherent(port->dev->dev.parent,
3070db9d7d36SMaxime Chevallier rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3071db9d7d36SMaxime Chevallier rxq->descs,
3072db9d7d36SMaxime Chevallier rxq->descs_dma);
3073db9d7d36SMaxime Chevallier
3074db9d7d36SMaxime Chevallier rxq->descs = NULL;
3075db9d7d36SMaxime Chevallier rxq->last_desc = 0;
3076db9d7d36SMaxime Chevallier rxq->next_desc_to_proc = 0;
3077db9d7d36SMaxime Chevallier rxq->descs_dma = 0;
3078db9d7d36SMaxime Chevallier
3079db9d7d36SMaxime Chevallier /* Clear Rx descriptors queue starting address and size;
3080db9d7d36SMaxime Chevallier * free descriptor number
3081db9d7d36SMaxime Chevallier */
3082db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3083e531f767SAntoine Tenart thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
30841068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
30851068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
30861068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
3087db9d7d36SMaxime Chevallier put_cpu();
3088db9d7d36SMaxime Chevallier }
3089db9d7d36SMaxime Chevallier
3090db9d7d36SMaxime Chevallier /* Create and initialize a Tx queue */
mvpp2_txq_init(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3091db9d7d36SMaxime Chevallier static int mvpp2_txq_init(struct mvpp2_port *port,
3092db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq)
3093db9d7d36SMaxime Chevallier {
3094db9d7d36SMaxime Chevallier u32 val;
3095074c74dfSAntoine Tenart unsigned int thread;
3096850623b3SAntoine Tenart int desc, desc_per_txq, tx_port_num;
3097db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu;
3098db9d7d36SMaxime Chevallier
3099db9d7d36SMaxime Chevallier txq->size = port->tx_ring_size;
3100db9d7d36SMaxime Chevallier
3101db9d7d36SMaxime Chevallier /* Allocate memory for Tx descriptors */
3102db9d7d36SMaxime Chevallier txq->descs = dma_alloc_coherent(port->dev->dev.parent,
3103db9d7d36SMaxime Chevallier txq->size * MVPP2_DESC_ALIGNED_SIZE,
3104db9d7d36SMaxime Chevallier &txq->descs_dma, GFP_KERNEL);
3105db9d7d36SMaxime Chevallier if (!txq->descs)
3106db9d7d36SMaxime Chevallier return -ENOMEM;
3107db9d7d36SMaxime Chevallier
3108db9d7d36SMaxime Chevallier txq->last_desc = txq->size - 1;
3109db9d7d36SMaxime Chevallier
3110db9d7d36SMaxime Chevallier /* Set Tx descriptors queue starting address - indirect access */
3111e531f767SAntoine Tenart thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
31121068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
31131068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
3114db9d7d36SMaxime Chevallier txq->descs_dma);
31151068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
3116db9d7d36SMaxime Chevallier txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
31171068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
31181068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
3119db9d7d36SMaxime Chevallier txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
31201068549cSAntoine Tenart val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
3121db9d7d36SMaxime Chevallier val &= ~MVPP2_TXQ_PENDING_MASK;
31221068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
3123db9d7d36SMaxime Chevallier
3124db9d7d36SMaxime Chevallier /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3125db9d7d36SMaxime Chevallier * for each existing TXQ.
3126db9d7d36SMaxime Chevallier * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3127db9d7d36SMaxime Chevallier * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
3128db9d7d36SMaxime Chevallier */
3129db9d7d36SMaxime Chevallier desc_per_txq = 16;
3130db9d7d36SMaxime Chevallier desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3131db9d7d36SMaxime Chevallier (txq->log_id * desc_per_txq);
3132db9d7d36SMaxime Chevallier
31331068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
3134db9d7d36SMaxime Chevallier MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3135db9d7d36SMaxime Chevallier MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3136db9d7d36SMaxime Chevallier put_cpu();
3137db9d7d36SMaxime Chevallier
3138db9d7d36SMaxime Chevallier /* WRR / EJP configuration - indirect access */
3139db9d7d36SMaxime Chevallier tx_port_num = mvpp2_egress_port(port);
3140db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3141db9d7d36SMaxime Chevallier
3142db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3143db9d7d36SMaxime Chevallier val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3144db9d7d36SMaxime Chevallier val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3145db9d7d36SMaxime Chevallier val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3146db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3147db9d7d36SMaxime Chevallier
3148db9d7d36SMaxime Chevallier val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3149db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3150db9d7d36SMaxime Chevallier val);
3151db9d7d36SMaxime Chevallier
3152e531f767SAntoine Tenart for (thread = 0; thread < port->priv->nthreads; thread++) {
3153074c74dfSAntoine Tenart txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3154db9d7d36SMaxime Chevallier txq_pcpu->size = txq->size;
3155db9d7d36SMaxime Chevallier txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
3156db9d7d36SMaxime Chevallier sizeof(*txq_pcpu->buffs),
3157db9d7d36SMaxime Chevallier GFP_KERNEL);
3158db9d7d36SMaxime Chevallier if (!txq_pcpu->buffs)
3159db9d7d36SMaxime Chevallier return -ENOMEM;
3160db9d7d36SMaxime Chevallier
3161db9d7d36SMaxime Chevallier txq_pcpu->count = 0;
3162db9d7d36SMaxime Chevallier txq_pcpu->reserved_num = 0;
3163db9d7d36SMaxime Chevallier txq_pcpu->txq_put_index = 0;
3164db9d7d36SMaxime Chevallier txq_pcpu->txq_get_index = 0;
3165db9d7d36SMaxime Chevallier txq_pcpu->tso_headers = NULL;
3166db9d7d36SMaxime Chevallier
3167db9d7d36SMaxime Chevallier txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
3168db9d7d36SMaxime Chevallier txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
3169db9d7d36SMaxime Chevallier
3170db9d7d36SMaxime Chevallier txq_pcpu->tso_headers =
3171db9d7d36SMaxime Chevallier dma_alloc_coherent(port->dev->dev.parent,
3172db9d7d36SMaxime Chevallier txq_pcpu->size * TSO_HEADER_SIZE,
3173db9d7d36SMaxime Chevallier &txq_pcpu->tso_headers_dma,
3174db9d7d36SMaxime Chevallier GFP_KERNEL);
3175db9d7d36SMaxime Chevallier if (!txq_pcpu->tso_headers)
3176db9d7d36SMaxime Chevallier return -ENOMEM;
3177db9d7d36SMaxime Chevallier }
3178db9d7d36SMaxime Chevallier
3179db9d7d36SMaxime Chevallier return 0;
3180db9d7d36SMaxime Chevallier }
3181db9d7d36SMaxime Chevallier
3182db9d7d36SMaxime Chevallier /* Free allocated TXQ resources */
mvpp2_txq_deinit(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3183db9d7d36SMaxime Chevallier static void mvpp2_txq_deinit(struct mvpp2_port *port,
3184db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq)
3185db9d7d36SMaxime Chevallier {
3186db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu;
3187074c74dfSAntoine Tenart unsigned int thread;
3188db9d7d36SMaxime Chevallier
3189e531f767SAntoine Tenart for (thread = 0; thread < port->priv->nthreads; thread++) {
3190074c74dfSAntoine Tenart txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3191db9d7d36SMaxime Chevallier kfree(txq_pcpu->buffs);
3192db9d7d36SMaxime Chevallier
3193db9d7d36SMaxime Chevallier if (txq_pcpu->tso_headers)
3194db9d7d36SMaxime Chevallier dma_free_coherent(port->dev->dev.parent,
3195db9d7d36SMaxime Chevallier txq_pcpu->size * TSO_HEADER_SIZE,
3196db9d7d36SMaxime Chevallier txq_pcpu->tso_headers,
3197db9d7d36SMaxime Chevallier txq_pcpu->tso_headers_dma);
3198db9d7d36SMaxime Chevallier
3199db9d7d36SMaxime Chevallier txq_pcpu->tso_headers = NULL;
3200db9d7d36SMaxime Chevallier }
3201db9d7d36SMaxime Chevallier
3202db9d7d36SMaxime Chevallier if (txq->descs)
3203db9d7d36SMaxime Chevallier dma_free_coherent(port->dev->dev.parent,
3204db9d7d36SMaxime Chevallier txq->size * MVPP2_DESC_ALIGNED_SIZE,
3205db9d7d36SMaxime Chevallier txq->descs, txq->descs_dma);
3206db9d7d36SMaxime Chevallier
3207db9d7d36SMaxime Chevallier txq->descs = NULL;
3208db9d7d36SMaxime Chevallier txq->last_desc = 0;
3209db9d7d36SMaxime Chevallier txq->next_desc_to_proc = 0;
3210db9d7d36SMaxime Chevallier txq->descs_dma = 0;
3211db9d7d36SMaxime Chevallier
3212db9d7d36SMaxime Chevallier /* Set minimum bandwidth for disabled TXQs */
321321808437SAntoine Tenart mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
3214db9d7d36SMaxime Chevallier
3215db9d7d36SMaxime Chevallier /* Set Tx descriptors queue starting address and size */
3216e531f767SAntoine Tenart thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
32171068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
32181068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
32191068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
3220db9d7d36SMaxime Chevallier put_cpu();
3221db9d7d36SMaxime Chevallier }
3222db9d7d36SMaxime Chevallier
3223db9d7d36SMaxime Chevallier /* Cleanup Tx ports */
mvpp2_txq_clean(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3224db9d7d36SMaxime Chevallier static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3225db9d7d36SMaxime Chevallier {
3226db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu;
3227850623b3SAntoine Tenart int delay, pending;
3228e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3229db9d7d36SMaxime Chevallier u32 val;
3230db9d7d36SMaxime Chevallier
32311068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
32321068549cSAntoine Tenart val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
3233db9d7d36SMaxime Chevallier val |= MVPP2_TXQ_DRAIN_EN_MASK;
32341068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3235db9d7d36SMaxime Chevallier
3236db9d7d36SMaxime Chevallier /* The napi queue has been stopped so wait for all packets
3237db9d7d36SMaxime Chevallier * to be transmitted.
3238db9d7d36SMaxime Chevallier */
3239db9d7d36SMaxime Chevallier delay = 0;
3240db9d7d36SMaxime Chevallier do {
3241db9d7d36SMaxime Chevallier if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3242db9d7d36SMaxime Chevallier netdev_warn(port->dev,
3243db9d7d36SMaxime Chevallier "port %d: cleaning queue %d timed out\n",
3244db9d7d36SMaxime Chevallier port->id, txq->log_id);
3245db9d7d36SMaxime Chevallier break;
3246db9d7d36SMaxime Chevallier }
3247db9d7d36SMaxime Chevallier mdelay(1);
3248db9d7d36SMaxime Chevallier delay++;
3249db9d7d36SMaxime Chevallier
32501068549cSAntoine Tenart pending = mvpp2_thread_read(port->priv, thread,
3251db9d7d36SMaxime Chevallier MVPP2_TXQ_PENDING_REG);
3252db9d7d36SMaxime Chevallier pending &= MVPP2_TXQ_PENDING_MASK;
3253db9d7d36SMaxime Chevallier } while (pending);
3254db9d7d36SMaxime Chevallier
3255db9d7d36SMaxime Chevallier val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
32561068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3257db9d7d36SMaxime Chevallier put_cpu();
3258db9d7d36SMaxime Chevallier
3259e531f767SAntoine Tenart for (thread = 0; thread < port->priv->nthreads; thread++) {
3260074c74dfSAntoine Tenart txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3261db9d7d36SMaxime Chevallier
3262db9d7d36SMaxime Chevallier /* Release all packets */
3263db9d7d36SMaxime Chevallier mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3264db9d7d36SMaxime Chevallier
3265db9d7d36SMaxime Chevallier /* Reset queue */
3266db9d7d36SMaxime Chevallier txq_pcpu->count = 0;
3267db9d7d36SMaxime Chevallier txq_pcpu->txq_put_index = 0;
3268db9d7d36SMaxime Chevallier txq_pcpu->txq_get_index = 0;
3269db9d7d36SMaxime Chevallier }
3270db9d7d36SMaxime Chevallier }
3271db9d7d36SMaxime Chevallier
3272db9d7d36SMaxime Chevallier /* Cleanup all Tx queues */
mvpp2_cleanup_txqs(struct mvpp2_port * port)3273db9d7d36SMaxime Chevallier static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3274db9d7d36SMaxime Chevallier {
3275db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq;
3276db9d7d36SMaxime Chevallier int queue;
3277db9d7d36SMaxime Chevallier u32 val;
3278db9d7d36SMaxime Chevallier
3279db9d7d36SMaxime Chevallier val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3280db9d7d36SMaxime Chevallier
3281db9d7d36SMaxime Chevallier /* Reset Tx ports and delete Tx queues */
3282db9d7d36SMaxime Chevallier val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3283db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3284db9d7d36SMaxime Chevallier
3285db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
3286db9d7d36SMaxime Chevallier txq = port->txqs[queue];
3287db9d7d36SMaxime Chevallier mvpp2_txq_clean(port, txq);
3288db9d7d36SMaxime Chevallier mvpp2_txq_deinit(port, txq);
3289db9d7d36SMaxime Chevallier }
3290db9d7d36SMaxime Chevallier
3291db9d7d36SMaxime Chevallier on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3292db9d7d36SMaxime Chevallier
3293db9d7d36SMaxime Chevallier val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3294db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3295db9d7d36SMaxime Chevallier }
3296db9d7d36SMaxime Chevallier
3297db9d7d36SMaxime Chevallier /* Cleanup all Rx queues */
mvpp2_cleanup_rxqs(struct mvpp2_port * port)3298db9d7d36SMaxime Chevallier static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3299db9d7d36SMaxime Chevallier {
3300db9d7d36SMaxime Chevallier int queue;
3301db9d7d36SMaxime Chevallier
3302db9d7d36SMaxime Chevallier for (queue = 0; queue < port->nrxqs; queue++)
3303db9d7d36SMaxime Chevallier mvpp2_rxq_deinit(port, port->rxqs[queue]);
33043bd17fdcSStefan Chulski
33053bd17fdcSStefan Chulski if (port->tx_fc)
33063bd17fdcSStefan Chulski mvpp2_rxq_disable_fc(port);
3307db9d7d36SMaxime Chevallier }
3308db9d7d36SMaxime Chevallier
3309db9d7d36SMaxime Chevallier /* Init all Rx queues for port */
mvpp2_setup_rxqs(struct mvpp2_port * port)3310db9d7d36SMaxime Chevallier static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3311db9d7d36SMaxime Chevallier {
3312db9d7d36SMaxime Chevallier int queue, err;
3313db9d7d36SMaxime Chevallier
3314db9d7d36SMaxime Chevallier for (queue = 0; queue < port->nrxqs; queue++) {
3315db9d7d36SMaxime Chevallier err = mvpp2_rxq_init(port, port->rxqs[queue]);
3316db9d7d36SMaxime Chevallier if (err)
3317db9d7d36SMaxime Chevallier goto err_cleanup;
3318db9d7d36SMaxime Chevallier }
33193bd17fdcSStefan Chulski
33203bd17fdcSStefan Chulski if (port->tx_fc)
33213bd17fdcSStefan Chulski mvpp2_rxq_enable_fc(port);
33223bd17fdcSStefan Chulski
3323db9d7d36SMaxime Chevallier return 0;
3324db9d7d36SMaxime Chevallier
3325db9d7d36SMaxime Chevallier err_cleanup:
3326db9d7d36SMaxime Chevallier mvpp2_cleanup_rxqs(port);
3327db9d7d36SMaxime Chevallier return err;
3328db9d7d36SMaxime Chevallier }
3329db9d7d36SMaxime Chevallier
3330db9d7d36SMaxime Chevallier /* Init all tx queues for port */
mvpp2_setup_txqs(struct mvpp2_port * port)3331db9d7d36SMaxime Chevallier static int mvpp2_setup_txqs(struct mvpp2_port *port)
3332db9d7d36SMaxime Chevallier {
3333db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq;
3334c2d6fe61SMatteo Croce int queue, err;
3335db9d7d36SMaxime Chevallier
3336db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
3337db9d7d36SMaxime Chevallier txq = port->txqs[queue];
3338db9d7d36SMaxime Chevallier err = mvpp2_txq_init(port, txq);
3339db9d7d36SMaxime Chevallier if (err)
3340db9d7d36SMaxime Chevallier goto err_cleanup;
33410d283ab5SMaxime Chevallier
33420d283ab5SMaxime Chevallier /* Assign this queue to a CPU */
3343c2d6fe61SMatteo Croce if (queue < num_possible_cpus())
3344c2d6fe61SMatteo Croce netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
3345db9d7d36SMaxime Chevallier }
3346db9d7d36SMaxime Chevallier
3347db9d7d36SMaxime Chevallier if (port->has_tx_irqs) {
3348db9d7d36SMaxime Chevallier mvpp2_tx_time_coal_set(port);
3349db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
3350db9d7d36SMaxime Chevallier txq = port->txqs[queue];
3351db9d7d36SMaxime Chevallier mvpp2_tx_pkts_coal_set(port, txq);
3352db9d7d36SMaxime Chevallier }
3353db9d7d36SMaxime Chevallier }
3354db9d7d36SMaxime Chevallier
3355db9d7d36SMaxime Chevallier on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3356db9d7d36SMaxime Chevallier return 0;
3357db9d7d36SMaxime Chevallier
3358db9d7d36SMaxime Chevallier err_cleanup:
3359db9d7d36SMaxime Chevallier mvpp2_cleanup_txqs(port);
3360db9d7d36SMaxime Chevallier return err;
3361db9d7d36SMaxime Chevallier }
3362db9d7d36SMaxime Chevallier
3363db9d7d36SMaxime Chevallier /* The callback for per-port interrupt */
mvpp2_isr(int irq,void * dev_id)3364db9d7d36SMaxime Chevallier static irqreturn_t mvpp2_isr(int irq, void *dev_id)
3365db9d7d36SMaxime Chevallier {
3366db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *qv = dev_id;
3367db9d7d36SMaxime Chevallier
3368db9d7d36SMaxime Chevallier mvpp2_qvec_interrupt_disable(qv);
3369db9d7d36SMaxime Chevallier
3370db9d7d36SMaxime Chevallier napi_schedule(&qv->napi);
3371db9d7d36SMaxime Chevallier
3372db9d7d36SMaxime Chevallier return IRQ_HANDLED;
3373db9d7d36SMaxime Chevallier }
3374db9d7d36SMaxime Chevallier
mvpp2_isr_handle_ptp_queue(struct mvpp2_port * port,int nq)3375f5015a59SRussell King static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
3376f5015a59SRussell King {
3377f5015a59SRussell King struct skb_shared_hwtstamps shhwtstamps;
3378f5015a59SRussell King struct mvpp2_hwtstamp_queue *queue;
3379f5015a59SRussell King struct sk_buff *skb;
3380f5015a59SRussell King void __iomem *ptp_q;
3381f5015a59SRussell King unsigned int id;
3382f5015a59SRussell King u32 r0, r1, r2;
3383f5015a59SRussell King
3384f5015a59SRussell King ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3385f5015a59SRussell King if (nq)
3386f5015a59SRussell King ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3387f5015a59SRussell King
3388f5015a59SRussell King queue = &port->tx_hwtstamp_queue[nq];
3389f5015a59SRussell King
3390f5015a59SRussell King while (1) {
3391f5015a59SRussell King r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3392f5015a59SRussell King if (!r0)
3393f5015a59SRussell King break;
3394f5015a59SRussell King
3395f5015a59SRussell King r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3396f5015a59SRussell King r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3397f5015a59SRussell King
3398f5015a59SRussell King id = (r0 >> 1) & 31;
3399f5015a59SRussell King
3400f5015a59SRussell King skb = queue->skb[id];
3401f5015a59SRussell King queue->skb[id] = NULL;
3402f5015a59SRussell King if (skb) {
3403f5015a59SRussell King u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3404f5015a59SRussell King
3405f5015a59SRussell King mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3406f5015a59SRussell King skb_tstamp_tx(skb, &shhwtstamps);
3407f5015a59SRussell King dev_kfree_skb_any(skb);
3408f5015a59SRussell King }
3409f5015a59SRussell King }
3410f5015a59SRussell King }
3411f5015a59SRussell King
mvpp2_isr_handle_ptp(struct mvpp2_port * port)3412f5015a59SRussell King static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3413f5015a59SRussell King {
3414f5015a59SRussell King void __iomem *ptp;
3415f5015a59SRussell King u32 val;
3416f5015a59SRussell King
3417f5015a59SRussell King ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3418f5015a59SRussell King val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3419f5015a59SRussell King if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3420f5015a59SRussell King mvpp2_isr_handle_ptp_queue(port, 0);
3421f5015a59SRussell King if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3422f5015a59SRussell King mvpp2_isr_handle_ptp_queue(port, 1);
3423f5015a59SRussell King }
3424f5015a59SRussell King
mvpp2_isr_handle_link(struct mvpp2_port * port,bool link)342536cfd3a6SRussell King static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3426db9d7d36SMaxime Chevallier {
3427db9d7d36SMaxime Chevallier struct net_device *dev = port->dev;
3428db9d7d36SMaxime Chevallier
3429db9d7d36SMaxime Chevallier if (port->phylink) {
3430db9d7d36SMaxime Chevallier phylink_mac_change(port->phylink, link);
343136cfd3a6SRussell King return;
3432db9d7d36SMaxime Chevallier }
3433db9d7d36SMaxime Chevallier
343436cfd3a6SRussell King if (!netif_running(dev))
343536cfd3a6SRussell King return;
3436db9d7d36SMaxime Chevallier
3437db9d7d36SMaxime Chevallier if (link) {
3438db9d7d36SMaxime Chevallier mvpp2_interrupts_enable(port);
3439db9d7d36SMaxime Chevallier
3440db9d7d36SMaxime Chevallier mvpp2_egress_enable(port);
3441db9d7d36SMaxime Chevallier mvpp2_ingress_enable(port);
3442db9d7d36SMaxime Chevallier netif_carrier_on(dev);
3443db9d7d36SMaxime Chevallier netif_tx_wake_all_queues(dev);
3444db9d7d36SMaxime Chevallier } else {
3445db9d7d36SMaxime Chevallier netif_tx_stop_all_queues(dev);
3446db9d7d36SMaxime Chevallier netif_carrier_off(dev);
3447db9d7d36SMaxime Chevallier mvpp2_ingress_disable(port);
3448db9d7d36SMaxime Chevallier mvpp2_egress_disable(port);
3449db9d7d36SMaxime Chevallier
3450db9d7d36SMaxime Chevallier mvpp2_interrupts_disable(port);
3451db9d7d36SMaxime Chevallier }
345236cfd3a6SRussell King }
3453db9d7d36SMaxime Chevallier
mvpp2_isr_handle_xlg(struct mvpp2_port * port)345436cfd3a6SRussell King static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
345536cfd3a6SRussell King {
345636cfd3a6SRussell King bool link;
345736cfd3a6SRussell King u32 val;
345836cfd3a6SRussell King
345936cfd3a6SRussell King val = readl(port->base + MVPP22_XLG_INT_STAT);
346036cfd3a6SRussell King if (val & MVPP22_XLG_INT_STAT_LINK) {
346136cfd3a6SRussell King val = readl(port->base + MVPP22_XLG_STATUS);
3462cdd0a379SAlex Dewar link = (val & MVPP22_XLG_STATUS_LINK_UP);
346336cfd3a6SRussell King mvpp2_isr_handle_link(port, link);
346436cfd3a6SRussell King }
346536cfd3a6SRussell King }
346636cfd3a6SRussell King
mvpp2_isr_handle_gmac_internal(struct mvpp2_port * port)346736cfd3a6SRussell King static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
346836cfd3a6SRussell King {
346936cfd3a6SRussell King bool link;
347036cfd3a6SRussell King u32 val;
347136cfd3a6SRussell King
347236cfd3a6SRussell King if (phy_interface_mode_is_rgmii(port->phy_interface) ||
347336cfd3a6SRussell King phy_interface_mode_is_8023z(port->phy_interface) ||
347436cfd3a6SRussell King port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
347536cfd3a6SRussell King val = readl(port->base + MVPP22_GMAC_INT_STAT);
347636cfd3a6SRussell King if (val & MVPP22_GMAC_INT_STAT_LINK) {
347736cfd3a6SRussell King val = readl(port->base + MVPP2_GMAC_STATUS0);
3478cdd0a379SAlex Dewar link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
347936cfd3a6SRussell King mvpp2_isr_handle_link(port, link);
348036cfd3a6SRussell King }
348136cfd3a6SRussell King }
348236cfd3a6SRussell King }
348336cfd3a6SRussell King
348436cfd3a6SRussell King /* Per-port interrupt for link status changes */
mvpp2_port_isr(int irq,void * dev_id)348589141972SRussell King static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
348636cfd3a6SRussell King {
348736cfd3a6SRussell King struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3488b4b17714SRussell King u32 val;
348936cfd3a6SRussell King
349036cfd3a6SRussell King mvpp22_gop_mask_irq(port);
349136cfd3a6SRussell King
349236cfd3a6SRussell King if (mvpp2_port_supports_xlg(port) &&
349336cfd3a6SRussell King mvpp2_is_xlg(port->phy_interface)) {
3494b4b17714SRussell King /* Check the external status register */
3495b4b17714SRussell King val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3496b4b17714SRussell King if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
349736cfd3a6SRussell King mvpp2_isr_handle_xlg(port);
3498f5015a59SRussell King if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3499f5015a59SRussell King mvpp2_isr_handle_ptp(port);
350036cfd3a6SRussell King } else {
3501b4b17714SRussell King /* If it's not the XLG, we must be using the GMAC.
3502b4b17714SRussell King * Check the summary status.
3503b4b17714SRussell King */
3504b4b17714SRussell King val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3505b4b17714SRussell King if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
350636cfd3a6SRussell King mvpp2_isr_handle_gmac_internal(port);
3507f5015a59SRussell King if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3508f5015a59SRussell King mvpp2_isr_handle_ptp(port);
350936cfd3a6SRussell King }
351036cfd3a6SRussell King
3511db9d7d36SMaxime Chevallier mvpp22_gop_unmask_irq(port);
3512db9d7d36SMaxime Chevallier return IRQ_HANDLED;
3513db9d7d36SMaxime Chevallier }
3514db9d7d36SMaxime Chevallier
mvpp2_hr_timer_cb(struct hrtimer * timer)3515ecb9f80dSThomas Gleixner static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3516db9d7d36SMaxime Chevallier {
3517ecb9f80dSThomas Gleixner struct net_device *dev;
3518ecb9f80dSThomas Gleixner struct mvpp2_port *port;
3519074c74dfSAntoine Tenart struct mvpp2_port_pcpu *port_pcpu;
3520db9d7d36SMaxime Chevallier unsigned int tx_todo, cause;
3521db9d7d36SMaxime Chevallier
3522ecb9f80dSThomas Gleixner port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3523ecb9f80dSThomas Gleixner dev = port_pcpu->dev;
3524074c74dfSAntoine Tenart
3525db9d7d36SMaxime Chevallier if (!netif_running(dev))
3526ecb9f80dSThomas Gleixner return HRTIMER_NORESTART;
3527ecb9f80dSThomas Gleixner
3528db9d7d36SMaxime Chevallier port_pcpu->timer_scheduled = false;
3529ecb9f80dSThomas Gleixner port = netdev_priv(dev);
3530db9d7d36SMaxime Chevallier
3531db9d7d36SMaxime Chevallier /* Process all the Tx queues */
3532db9d7d36SMaxime Chevallier cause = (1 << port->ntxqs) - 1;
3533074c74dfSAntoine Tenart tx_todo = mvpp2_tx_done(port, cause,
3534e531f767SAntoine Tenart mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3535db9d7d36SMaxime Chevallier
3536db9d7d36SMaxime Chevallier /* Set the timer in case not all the packets were processed */
3537ecb9f80dSThomas Gleixner if (tx_todo && !port_pcpu->timer_scheduled) {
3538ecb9f80dSThomas Gleixner port_pcpu->timer_scheduled = true;
3539ecb9f80dSThomas Gleixner hrtimer_forward_now(&port_pcpu->tx_done_timer,
3540ecb9f80dSThomas Gleixner MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3541ecb9f80dSThomas Gleixner
3542ecb9f80dSThomas Gleixner return HRTIMER_RESTART;
3543db9d7d36SMaxime Chevallier }
3544db9d7d36SMaxime Chevallier return HRTIMER_NORESTART;
3545db9d7d36SMaxime Chevallier }
3546db9d7d36SMaxime Chevallier
3547db9d7d36SMaxime Chevallier /* Main RX/TX processing routines */
3548db9d7d36SMaxime Chevallier
3549db9d7d36SMaxime Chevallier /* Display more error info */
mvpp2_rx_error(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)3550db9d7d36SMaxime Chevallier static void mvpp2_rx_error(struct mvpp2_port *port,
3551db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc)
3552db9d7d36SMaxime Chevallier {
3553db9d7d36SMaxime Chevallier u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3554db9d7d36SMaxime Chevallier size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3555db9d7d36SMaxime Chevallier char *err_str = NULL;
3556db9d7d36SMaxime Chevallier
3557db9d7d36SMaxime Chevallier switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3558db9d7d36SMaxime Chevallier case MVPP2_RXD_ERR_CRC:
3559db9d7d36SMaxime Chevallier err_str = "crc";
3560db9d7d36SMaxime Chevallier break;
3561db9d7d36SMaxime Chevallier case MVPP2_RXD_ERR_OVERRUN:
3562db9d7d36SMaxime Chevallier err_str = "overrun";
3563db9d7d36SMaxime Chevallier break;
3564db9d7d36SMaxime Chevallier case MVPP2_RXD_ERR_RESOURCE:
3565db9d7d36SMaxime Chevallier err_str = "resource";
3566db9d7d36SMaxime Chevallier break;
3567db9d7d36SMaxime Chevallier }
3568db9d7d36SMaxime Chevallier if (err_str && net_ratelimit())
3569db9d7d36SMaxime Chevallier netdev_err(port->dev,
3570db9d7d36SMaxime Chevallier "bad rx status %08x (%s error), size=%zu\n",
3571db9d7d36SMaxime Chevallier status, err_str, sz);
3572db9d7d36SMaxime Chevallier }
3573db9d7d36SMaxime Chevallier
3574db9d7d36SMaxime Chevallier /* Handle RX checksum offload */
mvpp2_rx_csum(struct mvpp2_port * port,u32 status)3575aff0824dSLorenzo Bianconi static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
3576db9d7d36SMaxime Chevallier {
3577db9d7d36SMaxime Chevallier if (((status & MVPP2_RXD_L3_IP4) &&
3578db9d7d36SMaxime Chevallier !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3579db9d7d36SMaxime Chevallier (status & MVPP2_RXD_L3_IP6))
3580db9d7d36SMaxime Chevallier if (((status & MVPP2_RXD_L4_UDP) ||
3581db9d7d36SMaxime Chevallier (status & MVPP2_RXD_L4_TCP)) &&
3582aff0824dSLorenzo Bianconi (status & MVPP2_RXD_L4_CSUM_OK))
3583aff0824dSLorenzo Bianconi return CHECKSUM_UNNECESSARY;
3584db9d7d36SMaxime Chevallier
3585aff0824dSLorenzo Bianconi return CHECKSUM_NONE;
3586db9d7d36SMaxime Chevallier }
3587db9d7d36SMaxime Chevallier
358880f60a91SMatteo Croce /* Allocate a new skb and add it to BM pool */
mvpp2_rx_refill(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,struct page_pool * page_pool,int pool)3589db9d7d36SMaxime Chevallier static int mvpp2_rx_refill(struct mvpp2_port *port,
3590b27db227SMatteo Croce struct mvpp2_bm_pool *bm_pool,
3591b27db227SMatteo Croce struct page_pool *page_pool, int pool)
3592db9d7d36SMaxime Chevallier {
3593db9d7d36SMaxime Chevallier dma_addr_t dma_addr;
3594db9d7d36SMaxime Chevallier phys_addr_t phys_addr;
3595db9d7d36SMaxime Chevallier void *buf;
3596db9d7d36SMaxime Chevallier
3597b27db227SMatteo Croce buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3598b27db227SMatteo Croce &dma_addr, &phys_addr, GFP_ATOMIC);
3599db9d7d36SMaxime Chevallier if (!buf)
3600db9d7d36SMaxime Chevallier return -ENOMEM;
3601db9d7d36SMaxime Chevallier
3602db9d7d36SMaxime Chevallier mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3603db9d7d36SMaxime Chevallier
3604db9d7d36SMaxime Chevallier return 0;
3605db9d7d36SMaxime Chevallier }
3606db9d7d36SMaxime Chevallier
3607db9d7d36SMaxime Chevallier /* Handle tx checksum */
mvpp2_skb_tx_csum(struct mvpp2_port * port,struct sk_buff * skb)3608db9d7d36SMaxime Chevallier static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3609db9d7d36SMaxime Chevallier {
3610db9d7d36SMaxime Chevallier if (skb->ip_summed == CHECKSUM_PARTIAL) {
3611db9d7d36SMaxime Chevallier int ip_hdr_len = 0;
3612db9d7d36SMaxime Chevallier u8 l4_proto;
361335f3625cSMaxime Chevallier __be16 l3_proto = vlan_get_protocol(skb);
3614db9d7d36SMaxime Chevallier
361535f3625cSMaxime Chevallier if (l3_proto == htons(ETH_P_IP)) {
3616db9d7d36SMaxime Chevallier struct iphdr *ip4h = ip_hdr(skb);
3617db9d7d36SMaxime Chevallier
3618db9d7d36SMaxime Chevallier /* Calculate IPv4 checksum and L4 checksum */
3619db9d7d36SMaxime Chevallier ip_hdr_len = ip4h->ihl;
3620db9d7d36SMaxime Chevallier l4_proto = ip4h->protocol;
362135f3625cSMaxime Chevallier } else if (l3_proto == htons(ETH_P_IPV6)) {
3622db9d7d36SMaxime Chevallier struct ipv6hdr *ip6h = ipv6_hdr(skb);
3623db9d7d36SMaxime Chevallier
3624db9d7d36SMaxime Chevallier /* Read l4_protocol from one of IPv6 extra headers */
3625db9d7d36SMaxime Chevallier if (skb_network_header_len(skb) > 0)
3626db9d7d36SMaxime Chevallier ip_hdr_len = (skb_network_header_len(skb) >> 2);
3627db9d7d36SMaxime Chevallier l4_proto = ip6h->nexthdr;
3628db9d7d36SMaxime Chevallier } else {
3629db9d7d36SMaxime Chevallier return MVPP2_TXD_L4_CSUM_NOT;
3630db9d7d36SMaxime Chevallier }
3631db9d7d36SMaxime Chevallier
3632db9d7d36SMaxime Chevallier return mvpp2_txq_desc_csum(skb_network_offset(skb),
363335f3625cSMaxime Chevallier l3_proto, ip_hdr_len, l4_proto);
3634db9d7d36SMaxime Chevallier }
3635db9d7d36SMaxime Chevallier
3636db9d7d36SMaxime Chevallier return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3637db9d7d36SMaxime Chevallier }
3638db9d7d36SMaxime Chevallier
mvpp2_xdp_finish_tx(struct mvpp2_port * port,u16 txq_id,int nxmit,int nxmit_byte)3639c2d6fe61SMatteo Croce static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3640c2d6fe61SMatteo Croce {
3641c2d6fe61SMatteo Croce unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3642c2d6fe61SMatteo Croce struct mvpp2_tx_queue *aggr_txq;
3643c2d6fe61SMatteo Croce struct mvpp2_txq_pcpu *txq_pcpu;
3644c2d6fe61SMatteo Croce struct mvpp2_tx_queue *txq;
3645c2d6fe61SMatteo Croce struct netdev_queue *nq;
3646c2d6fe61SMatteo Croce
3647c2d6fe61SMatteo Croce txq = port->txqs[txq_id];
3648c2d6fe61SMatteo Croce txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3649c2d6fe61SMatteo Croce nq = netdev_get_tx_queue(port->dev, txq_id);
3650c2d6fe61SMatteo Croce aggr_txq = &port->priv->aggr_txqs[thread];
3651c2d6fe61SMatteo Croce
3652c2d6fe61SMatteo Croce txq_pcpu->reserved_num -= nxmit;
3653c2d6fe61SMatteo Croce txq_pcpu->count += nxmit;
3654c2d6fe61SMatteo Croce aggr_txq->count += nxmit;
3655c2d6fe61SMatteo Croce
3656c2d6fe61SMatteo Croce /* Enable transmit */
3657c2d6fe61SMatteo Croce wmb();
3658c2d6fe61SMatteo Croce mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3659c2d6fe61SMatteo Croce
3660c2d6fe61SMatteo Croce if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3661c2d6fe61SMatteo Croce netif_tx_stop_queue(nq);
3662c2d6fe61SMatteo Croce
3663c2d6fe61SMatteo Croce /* Finalize TX processing */
3664c2d6fe61SMatteo Croce if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3665c2d6fe61SMatteo Croce mvpp2_txq_done(port, txq, txq_pcpu);
3666c2d6fe61SMatteo Croce }
3667c2d6fe61SMatteo Croce
3668c2d6fe61SMatteo Croce static int
mvpp2_xdp_submit_frame(struct mvpp2_port * port,u16 txq_id,struct xdp_frame * xdpf,bool dma_map)3669c2d6fe61SMatteo Croce mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3670c2d6fe61SMatteo Croce struct xdp_frame *xdpf, bool dma_map)
3671c2d6fe61SMatteo Croce {
3672c2d6fe61SMatteo Croce unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3673c2d6fe61SMatteo Croce u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3674c2d6fe61SMatteo Croce MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3675c2d6fe61SMatteo Croce enum mvpp2_tx_buf_type buf_type;
3676c2d6fe61SMatteo Croce struct mvpp2_txq_pcpu *txq_pcpu;
3677c2d6fe61SMatteo Croce struct mvpp2_tx_queue *aggr_txq;
3678c2d6fe61SMatteo Croce struct mvpp2_tx_desc *tx_desc;
3679c2d6fe61SMatteo Croce struct mvpp2_tx_queue *txq;
3680c2d6fe61SMatteo Croce int ret = MVPP2_XDP_TX;
3681c2d6fe61SMatteo Croce dma_addr_t dma_addr;
3682c2d6fe61SMatteo Croce
3683c2d6fe61SMatteo Croce txq = port->txqs[txq_id];
3684c2d6fe61SMatteo Croce txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3685c2d6fe61SMatteo Croce aggr_txq = &port->priv->aggr_txqs[thread];
3686c2d6fe61SMatteo Croce
3687c2d6fe61SMatteo Croce /* Check number of available descriptors */
3688c2d6fe61SMatteo Croce if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3689c2d6fe61SMatteo Croce mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3690c2d6fe61SMatteo Croce ret = MVPP2_XDP_DROPPED;
3691c2d6fe61SMatteo Croce goto out;
3692c2d6fe61SMatteo Croce }
3693c2d6fe61SMatteo Croce
3694c2d6fe61SMatteo Croce /* Get a descriptor for the first part of the packet */
3695c2d6fe61SMatteo Croce tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3696c2d6fe61SMatteo Croce mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3697c2d6fe61SMatteo Croce mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3698c2d6fe61SMatteo Croce
3699c2d6fe61SMatteo Croce if (dma_map) {
3700c2d6fe61SMatteo Croce /* XDP_REDIRECT or AF_XDP */
3701c2d6fe61SMatteo Croce dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3702c2d6fe61SMatteo Croce xdpf->len, DMA_TO_DEVICE);
3703c2d6fe61SMatteo Croce
3704c2d6fe61SMatteo Croce if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3705c2d6fe61SMatteo Croce mvpp2_txq_desc_put(txq);
3706c2d6fe61SMatteo Croce ret = MVPP2_XDP_DROPPED;
3707c2d6fe61SMatteo Croce goto out;
3708c2d6fe61SMatteo Croce }
3709c2d6fe61SMatteo Croce
3710c2d6fe61SMatteo Croce buf_type = MVPP2_TYPE_XDP_NDO;
3711c2d6fe61SMatteo Croce } else {
3712c2d6fe61SMatteo Croce /* XDP_TX */
3713c2d6fe61SMatteo Croce struct page *page = virt_to_page(xdpf->data);
3714c2d6fe61SMatteo Croce
3715c2d6fe61SMatteo Croce dma_addr = page_pool_get_dma_addr(page) +
3716c2d6fe61SMatteo Croce sizeof(*xdpf) + xdpf->headroom;
3717c2d6fe61SMatteo Croce dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3718c2d6fe61SMatteo Croce xdpf->len, DMA_BIDIRECTIONAL);
3719c2d6fe61SMatteo Croce
3720c2d6fe61SMatteo Croce buf_type = MVPP2_TYPE_XDP_TX;
3721c2d6fe61SMatteo Croce }
3722c2d6fe61SMatteo Croce
3723c2d6fe61SMatteo Croce mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3724c2d6fe61SMatteo Croce
3725c2d6fe61SMatteo Croce mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3726c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3727c2d6fe61SMatteo Croce
3728c2d6fe61SMatteo Croce out:
3729c2d6fe61SMatteo Croce return ret;
3730c2d6fe61SMatteo Croce }
3731c2d6fe61SMatteo Croce
3732c2d6fe61SMatteo Croce static int
mvpp2_xdp_xmit_back(struct mvpp2_port * port,struct xdp_buff * xdp)3733c2d6fe61SMatteo Croce mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3734c2d6fe61SMatteo Croce {
373539b96315SSven Auhagen struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3736c2d6fe61SMatteo Croce struct xdp_frame *xdpf;
3737c2d6fe61SMatteo Croce u16 txq_id;
3738c2d6fe61SMatteo Croce int ret;
3739c2d6fe61SMatteo Croce
3740c2d6fe61SMatteo Croce xdpf = xdp_convert_buff_to_frame(xdp);
3741c2d6fe61SMatteo Croce if (unlikely(!xdpf))
3742c2d6fe61SMatteo Croce return MVPP2_XDP_DROPPED;
3743c2d6fe61SMatteo Croce
3744c2d6fe61SMatteo Croce /* The first of the TX queues are used for XPS,
3745c2d6fe61SMatteo Croce * the second half for XDP_TX
3746c2d6fe61SMatteo Croce */
3747c2d6fe61SMatteo Croce txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3748c2d6fe61SMatteo Croce
3749c2d6fe61SMatteo Croce ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
375039b96315SSven Auhagen if (ret == MVPP2_XDP_TX) {
375139b96315SSven Auhagen u64_stats_update_begin(&stats->syncp);
375239b96315SSven Auhagen stats->tx_bytes += xdpf->len;
375339b96315SSven Auhagen stats->tx_packets++;
375439b96315SSven Auhagen stats->xdp_tx++;
375539b96315SSven Auhagen u64_stats_update_end(&stats->syncp);
375639b96315SSven Auhagen
3757c2d6fe61SMatteo Croce mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
375839b96315SSven Auhagen } else {
375939b96315SSven Auhagen u64_stats_update_begin(&stats->syncp);
376039b96315SSven Auhagen stats->xdp_tx_err++;
376139b96315SSven Auhagen u64_stats_update_end(&stats->syncp);
376239b96315SSven Auhagen }
3763c2d6fe61SMatteo Croce
3764c2d6fe61SMatteo Croce return ret;
3765c2d6fe61SMatteo Croce }
3766c2d6fe61SMatteo Croce
3767c2d6fe61SMatteo Croce static int
mvpp2_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)3768c2d6fe61SMatteo Croce mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3769c2d6fe61SMatteo Croce struct xdp_frame **frames, u32 flags)
3770c2d6fe61SMatteo Croce {
3771c2d6fe61SMatteo Croce struct mvpp2_port *port = netdev_priv(dev);
3772fdc13979SLorenzo Bianconi int i, nxmit_byte = 0, nxmit = 0;
377339b96315SSven Auhagen struct mvpp2_pcpu_stats *stats;
3774c2d6fe61SMatteo Croce u16 txq_id;
3775c2d6fe61SMatteo Croce u32 ret;
3776c2d6fe61SMatteo Croce
3777c2d6fe61SMatteo Croce if (unlikely(test_bit(0, &port->state)))
3778c2d6fe61SMatteo Croce return -ENETDOWN;
3779c2d6fe61SMatteo Croce
3780c2d6fe61SMatteo Croce if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3781c2d6fe61SMatteo Croce return -EINVAL;
3782c2d6fe61SMatteo Croce
3783c2d6fe61SMatteo Croce /* The first of the TX queues are used for XPS,
3784c2d6fe61SMatteo Croce * the second half for XDP_TX
3785c2d6fe61SMatteo Croce */
3786c2d6fe61SMatteo Croce txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3787c2d6fe61SMatteo Croce
3788c2d6fe61SMatteo Croce for (i = 0; i < num_frame; i++) {
3789c2d6fe61SMatteo Croce ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3790fdc13979SLorenzo Bianconi if (ret != MVPP2_XDP_TX)
3791fdc13979SLorenzo Bianconi break;
3792fdc13979SLorenzo Bianconi
3793c2d6fe61SMatteo Croce nxmit_byte += frames[i]->len;
3794fdc13979SLorenzo Bianconi nxmit++;
3795c2d6fe61SMatteo Croce }
3796c2d6fe61SMatteo Croce
379739b96315SSven Auhagen if (likely(nxmit > 0))
3798c2d6fe61SMatteo Croce mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3799c2d6fe61SMatteo Croce
380039b96315SSven Auhagen stats = this_cpu_ptr(port->stats);
380139b96315SSven Auhagen u64_stats_update_begin(&stats->syncp);
380239b96315SSven Auhagen stats->tx_bytes += nxmit_byte;
380339b96315SSven Auhagen stats->tx_packets += nxmit;
380439b96315SSven Auhagen stats->xdp_xmit += nxmit;
380539b96315SSven Auhagen stats->xdp_xmit_err += num_frame - nxmit;
380639b96315SSven Auhagen u64_stats_update_end(&stats->syncp);
380739b96315SSven Auhagen
3808c2d6fe61SMatteo Croce return nxmit;
3809c2d6fe61SMatteo Croce }
3810c2d6fe61SMatteo Croce
381107dd0a7aSMatteo Croce static int
mvpp2_run_xdp(struct mvpp2_port * port,struct bpf_prog * prog,struct xdp_buff * xdp,struct page_pool * pp,struct mvpp2_pcpu_stats * stats)3812376d6892SMatteo Croce mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
3813376d6892SMatteo Croce struct xdp_buff *xdp, struct page_pool *pp,
3814376d6892SMatteo Croce struct mvpp2_pcpu_stats *stats)
381507dd0a7aSMatteo Croce {
381607dd0a7aSMatteo Croce unsigned int len, sync, err;
381707dd0a7aSMatteo Croce struct page *page;
381807dd0a7aSMatteo Croce u32 ret, act;
381907dd0a7aSMatteo Croce
382007dd0a7aSMatteo Croce len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
382107dd0a7aSMatteo Croce act = bpf_prog_run_xdp(prog, xdp);
382207dd0a7aSMatteo Croce
382307dd0a7aSMatteo Croce /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
382407dd0a7aSMatteo Croce sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
382507dd0a7aSMatteo Croce sync = max(sync, len);
382607dd0a7aSMatteo Croce
382707dd0a7aSMatteo Croce switch (act) {
382807dd0a7aSMatteo Croce case XDP_PASS:
382939b96315SSven Auhagen stats->xdp_pass++;
383007dd0a7aSMatteo Croce ret = MVPP2_XDP_PASS;
383107dd0a7aSMatteo Croce break;
383207dd0a7aSMatteo Croce case XDP_REDIRECT:
383307dd0a7aSMatteo Croce err = xdp_do_redirect(port->dev, xdp, prog);
383407dd0a7aSMatteo Croce if (unlikely(err)) {
383507dd0a7aSMatteo Croce ret = MVPP2_XDP_DROPPED;
383607dd0a7aSMatteo Croce page = virt_to_head_page(xdp->data);
383707dd0a7aSMatteo Croce page_pool_put_page(pp, page, sync, true);
383807dd0a7aSMatteo Croce } else {
383907dd0a7aSMatteo Croce ret = MVPP2_XDP_REDIR;
384039b96315SSven Auhagen stats->xdp_redirect++;
384107dd0a7aSMatteo Croce }
384207dd0a7aSMatteo Croce break;
3843c2d6fe61SMatteo Croce case XDP_TX:
3844c2d6fe61SMatteo Croce ret = mvpp2_xdp_xmit_back(port, xdp);
3845c2d6fe61SMatteo Croce if (ret != MVPP2_XDP_TX) {
3846c2d6fe61SMatteo Croce page = virt_to_head_page(xdp->data);
3847c2d6fe61SMatteo Croce page_pool_put_page(pp, page, sync, true);
3848c2d6fe61SMatteo Croce }
3849c2d6fe61SMatteo Croce break;
385007dd0a7aSMatteo Croce default:
3851c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(port->dev, prog, act);
385207dd0a7aSMatteo Croce fallthrough;
385307dd0a7aSMatteo Croce case XDP_ABORTED:
385407dd0a7aSMatteo Croce trace_xdp_exception(port->dev, prog, act);
385507dd0a7aSMatteo Croce fallthrough;
385607dd0a7aSMatteo Croce case XDP_DROP:
385707dd0a7aSMatteo Croce page = virt_to_head_page(xdp->data);
385807dd0a7aSMatteo Croce page_pool_put_page(pp, page, sync, true);
385907dd0a7aSMatteo Croce ret = MVPP2_XDP_DROPPED;
386039b96315SSven Auhagen stats->xdp_drop++;
386107dd0a7aSMatteo Croce break;
386207dd0a7aSMatteo Croce }
386307dd0a7aSMatteo Croce
386407dd0a7aSMatteo Croce return ret;
386507dd0a7aSMatteo Croce }
386607dd0a7aSMatteo Croce
mvpp2_buff_hdr_pool_put(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc,int pool,u32 rx_status)386717f9c1b6SStefan Chulski static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
386817f9c1b6SStefan Chulski int pool, u32 rx_status)
386917f9c1b6SStefan Chulski {
387017f9c1b6SStefan Chulski phys_addr_t phys_addr, phys_addr_next;
387117f9c1b6SStefan Chulski dma_addr_t dma_addr, dma_addr_next;
387217f9c1b6SStefan Chulski struct mvpp2_buff_hdr *buff_hdr;
387317f9c1b6SStefan Chulski
387417f9c1b6SStefan Chulski phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
387517f9c1b6SStefan Chulski dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
387617f9c1b6SStefan Chulski
387717f9c1b6SStefan Chulski do {
387817f9c1b6SStefan Chulski buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
387917f9c1b6SStefan Chulski
388017f9c1b6SStefan Chulski phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
388117f9c1b6SStefan Chulski dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
388217f9c1b6SStefan Chulski
388317f9c1b6SStefan Chulski if (port->priv->hw_version >= MVPP22) {
388417f9c1b6SStefan Chulski phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
388517f9c1b6SStefan Chulski dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
388617f9c1b6SStefan Chulski }
388717f9c1b6SStefan Chulski
388817f9c1b6SStefan Chulski mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
388917f9c1b6SStefan Chulski
389017f9c1b6SStefan Chulski phys_addr = phys_addr_next;
389117f9c1b6SStefan Chulski dma_addr = dma_addr_next;
389217f9c1b6SStefan Chulski
389317f9c1b6SStefan Chulski } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
389417f9c1b6SStefan Chulski }
389517f9c1b6SStefan Chulski
3896db9d7d36SMaxime Chevallier /* Main rx processing */
mvpp2_rx(struct mvpp2_port * port,struct napi_struct * napi,int rx_todo,struct mvpp2_rx_queue * rxq)3897db9d7d36SMaxime Chevallier static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3898db9d7d36SMaxime Chevallier int rx_todo, struct mvpp2_rx_queue *rxq)
3899db9d7d36SMaxime Chevallier {
3900db9d7d36SMaxime Chevallier struct net_device *dev = port->dev;
390139b96315SSven Auhagen struct mvpp2_pcpu_stats ps = {};
3902c2d6fe61SMatteo Croce enum dma_data_direction dma_dir;
390307dd0a7aSMatteo Croce struct bpf_prog *xdp_prog;
390407dd0a7aSMatteo Croce struct xdp_buff xdp;
3905db9d7d36SMaxime Chevallier int rx_received;
3906db9d7d36SMaxime Chevallier int rx_done = 0;
390707dd0a7aSMatteo Croce u32 xdp_ret = 0;
3908db9d7d36SMaxime Chevallier
390907dd0a7aSMatteo Croce xdp_prog = READ_ONCE(port->xdp_prog);
391007dd0a7aSMatteo Croce
3911db9d7d36SMaxime Chevallier /* Get number of received packets and clamp the to-do */
3912db9d7d36SMaxime Chevallier rx_received = mvpp2_rxq_received(port, rxq->id);
3913db9d7d36SMaxime Chevallier if (rx_todo > rx_received)
3914db9d7d36SMaxime Chevallier rx_todo = rx_received;
3915db9d7d36SMaxime Chevallier
3916db9d7d36SMaxime Chevallier while (rx_done < rx_todo) {
3917db9d7d36SMaxime Chevallier struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3918db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool;
3919b27db227SMatteo Croce struct page_pool *pp = NULL;
3920db9d7d36SMaxime Chevallier struct sk_buff *skb;
3921db9d7d36SMaxime Chevallier unsigned int frag_size;
3922db9d7d36SMaxime Chevallier dma_addr_t dma_addr;
3923db9d7d36SMaxime Chevallier phys_addr_t phys_addr;
3924ce3497e2SRussell King u32 rx_status, timestamp;
392507dd0a7aSMatteo Croce int pool, rx_bytes, err, ret;
39262f128eb3SMatteo Croce struct page *page;
3927db9d7d36SMaxime Chevallier void *data;
3928db9d7d36SMaxime Chevallier
39292f128eb3SMatteo Croce phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
39302f128eb3SMatteo Croce data = (void *)phys_to_virt(phys_addr);
39312f128eb3SMatteo Croce page = virt_to_page(data);
39322f128eb3SMatteo Croce prefetch(page);
39332f128eb3SMatteo Croce
3934db9d7d36SMaxime Chevallier rx_done++;
3935db9d7d36SMaxime Chevallier rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3936db9d7d36SMaxime Chevallier rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3937db9d7d36SMaxime Chevallier rx_bytes -= MVPP2_MH_SIZE;
3938db9d7d36SMaxime Chevallier dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3939db9d7d36SMaxime Chevallier
3940db9d7d36SMaxime Chevallier pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3941db9d7d36SMaxime Chevallier MVPP2_RXD_BM_POOL_ID_OFFS;
3942db9d7d36SMaxime Chevallier bm_pool = &port->priv->bm_pools[pool];
3943db9d7d36SMaxime Chevallier
3944c2d6fe61SMatteo Croce if (port->priv->percpu_pools) {
3945c2d6fe61SMatteo Croce pp = port->priv->page_pool[pool];
3946c2d6fe61SMatteo Croce dma_dir = page_pool_get_dma_dir(pp);
3947c2d6fe61SMatteo Croce } else {
3948c2d6fe61SMatteo Croce dma_dir = DMA_FROM_DEVICE;
3949c2d6fe61SMatteo Croce }
3950c2d6fe61SMatteo Croce
3951e1921168SMatteo Croce dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3952e1921168SMatteo Croce rx_bytes + MVPP2_MH_SIZE,
3953c2d6fe61SMatteo Croce dma_dir);
3954e1921168SMatteo Croce
395517f9c1b6SStefan Chulski /* Buffer header not supported */
395617f9c1b6SStefan Chulski if (rx_status & MVPP2_RXD_BUF_HDR)
395717f9c1b6SStefan Chulski goto err_drop_frame;
395817f9c1b6SStefan Chulski
395917f9c1b6SStefan Chulski /* In case of an error, release the requested buffer pointer
396017f9c1b6SStefan Chulski * to the Buffer Manager. This request process is controlled
396117f9c1b6SStefan Chulski * by the hardware, and the information about the buffer is
396217f9c1b6SStefan Chulski * comprised by the RX descriptor.
396317f9c1b6SStefan Chulski */
396417f9c1b6SStefan Chulski if (rx_status & MVPP2_RXD_ERR_SUMMARY)
396517f9c1b6SStefan Chulski goto err_drop_frame;
396617f9c1b6SStefan Chulski
3967c2d6fe61SMatteo Croce /* Prefetch header */
3968d8ea89feSMatteo Croce prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3969b27db227SMatteo Croce
3970db9d7d36SMaxime Chevallier if (bm_pool->frag_size > PAGE_SIZE)
3971db9d7d36SMaxime Chevallier frag_size = 0;
3972db9d7d36SMaxime Chevallier else
3973db9d7d36SMaxime Chevallier frag_size = bm_pool->frag_size;
3974db9d7d36SMaxime Chevallier
397507dd0a7aSMatteo Croce if (xdp_prog) {
397643b5169dSLorenzo Bianconi struct xdp_rxq_info *xdp_rxq;
397707dd0a7aSMatteo Croce
397807dd0a7aSMatteo Croce if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
397943b5169dSLorenzo Bianconi xdp_rxq = &rxq->xdp_rxq_short;
398007dd0a7aSMatteo Croce else
398143b5169dSLorenzo Bianconi xdp_rxq = &rxq->xdp_rxq_long;
398207dd0a7aSMatteo Croce
398343b5169dSLorenzo Bianconi xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
3984be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, data,
3985be9df4afSLorenzo Bianconi MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
3986be9df4afSLorenzo Bianconi rx_bytes, false);
398707dd0a7aSMatteo Croce
3988376d6892SMatteo Croce ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
398907dd0a7aSMatteo Croce
399007dd0a7aSMatteo Croce if (ret) {
399107dd0a7aSMatteo Croce xdp_ret |= ret;
399207dd0a7aSMatteo Croce err = mvpp2_rx_refill(port, bm_pool, pp, pool);
399307dd0a7aSMatteo Croce if (err) {
399407dd0a7aSMatteo Croce netdev_err(port->dev, "failed to refill BM pools\n");
399507dd0a7aSMatteo Croce goto err_drop_frame;
399607dd0a7aSMatteo Croce }
399707dd0a7aSMatteo Croce
399839b96315SSven Auhagen ps.rx_packets++;
399939b96315SSven Auhagen ps.rx_bytes += rx_bytes;
400007dd0a7aSMatteo Croce continue;
400107dd0a7aSMatteo Croce }
400207dd0a7aSMatteo Croce }
400307dd0a7aSMatteo Croce
400423a52ca6SAryan Srivastava if (frag_size)
4005db9d7d36SMaxime Chevallier skb = build_skb(data, frag_size);
400623a52ca6SAryan Srivastava else
400723a52ca6SAryan Srivastava skb = slab_build_skb(data);
4008db9d7d36SMaxime Chevallier if (!skb) {
4009db9d7d36SMaxime Chevallier netdev_warn(port->dev, "skb build failed\n");
4010db9d7d36SMaxime Chevallier goto err_drop_frame;
4011db9d7d36SMaxime Chevallier }
4012db9d7d36SMaxime Chevallier
4013ce3497e2SRussell King /* If we have RX hardware timestamping enabled, grab the
4014ce3497e2SRussell King * timestamp from the queue and convert.
4015ce3497e2SRussell King */
4016ce3497e2SRussell King if (mvpp22_rx_hwtstamping(port)) {
4017ce3497e2SRussell King timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
4018ce3497e2SRussell King mvpp22_tai_tstamp(port->priv->tai, timestamp,
4019ce3497e2SRussell King skb_hwtstamps(skb));
4020ce3497e2SRussell King }
4021ce3497e2SRussell King
4022b27db227SMatteo Croce err = mvpp2_rx_refill(port, bm_pool, pp, pool);
4023db9d7d36SMaxime Chevallier if (err) {
4024db9d7d36SMaxime Chevallier netdev_err(port->dev, "failed to refill BM pools\n");
4025d6526926SLorenzo Bianconi dev_kfree_skb_any(skb);
4026db9d7d36SMaxime Chevallier goto err_drop_frame;
4027db9d7d36SMaxime Chevallier }
4028db9d7d36SMaxime Chevallier
4029b27db227SMatteo Croce if (pp)
403057f05bc2SYunsheng Lin skb_mark_for_recycle(skb);
4031b27db227SMatteo Croce else
4032e1921168SMatteo Croce dma_unmap_single_attrs(dev->dev.parent, dma_addr,
4033e1921168SMatteo Croce bm_pool->buf_size, DMA_FROM_DEVICE,
4034e1921168SMatteo Croce DMA_ATTR_SKIP_CPU_SYNC);
4035db9d7d36SMaxime Chevallier
403639b96315SSven Auhagen ps.rx_packets++;
403739b96315SSven Auhagen ps.rx_bytes += rx_bytes;
4038db9d7d36SMaxime Chevallier
403907dd0a7aSMatteo Croce skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
4040db9d7d36SMaxime Chevallier skb_put(skb, rx_bytes);
4041aff0824dSLorenzo Bianconi skb->ip_summed = mvpp2_rx_csum(port, rx_status);
4042d8ea89feSMatteo Croce skb->protocol = eth_type_trans(skb, dev);
4043db9d7d36SMaxime Chevallier
4044db9d7d36SMaxime Chevallier napi_gro_receive(napi, skb);
40457f7183afSMatteo Croce continue;
40467f7183afSMatteo Croce
40477f7183afSMatteo Croce err_drop_frame:
40487f7183afSMatteo Croce dev->stats.rx_errors++;
40497f7183afSMatteo Croce mvpp2_rx_error(port, rx_desc);
40507f7183afSMatteo Croce /* Return the buffer to the pool */
405117f9c1b6SStefan Chulski if (rx_status & MVPP2_RXD_BUF_HDR)
405217f9c1b6SStefan Chulski mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
405317f9c1b6SStefan Chulski else
40547f7183afSMatteo Croce mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
4055db9d7d36SMaxime Chevallier }
4056db9d7d36SMaxime Chevallier
4057c2d6fe61SMatteo Croce if (xdp_ret & MVPP2_XDP_REDIR)
4058c2d6fe61SMatteo Croce xdp_do_flush_map();
4059c2d6fe61SMatteo Croce
406039b96315SSven Auhagen if (ps.rx_packets) {
4061db9d7d36SMaxime Chevallier struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4062db9d7d36SMaxime Chevallier
4063db9d7d36SMaxime Chevallier u64_stats_update_begin(&stats->syncp);
406439b96315SSven Auhagen stats->rx_packets += ps.rx_packets;
406539b96315SSven Auhagen stats->rx_bytes += ps.rx_bytes;
406639b96315SSven Auhagen /* xdp */
406739b96315SSven Auhagen stats->xdp_redirect += ps.xdp_redirect;
406839b96315SSven Auhagen stats->xdp_pass += ps.xdp_pass;
406939b96315SSven Auhagen stats->xdp_drop += ps.xdp_drop;
4070db9d7d36SMaxime Chevallier u64_stats_update_end(&stats->syncp);
4071db9d7d36SMaxime Chevallier }
4072db9d7d36SMaxime Chevallier
4073db9d7d36SMaxime Chevallier /* Update Rx queue management counters */
4074db9d7d36SMaxime Chevallier wmb();
4075db9d7d36SMaxime Chevallier mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
4076db9d7d36SMaxime Chevallier
4077db9d7d36SMaxime Chevallier return rx_todo;
4078db9d7d36SMaxime Chevallier }
4079db9d7d36SMaxime Chevallier
4080db9d7d36SMaxime Chevallier static inline void
tx_desc_unmap_put(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_tx_desc * desc)4081db9d7d36SMaxime Chevallier tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4082db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *desc)
4083db9d7d36SMaxime Chevallier {
4084e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4085074c74dfSAntoine Tenart struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4086db9d7d36SMaxime Chevallier
4087db9d7d36SMaxime Chevallier dma_addr_t buf_dma_addr =
4088db9d7d36SMaxime Chevallier mvpp2_txdesc_dma_addr_get(port, desc);
4089db9d7d36SMaxime Chevallier size_t buf_sz =
4090db9d7d36SMaxime Chevallier mvpp2_txdesc_size_get(port, desc);
4091db9d7d36SMaxime Chevallier if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
4092db9d7d36SMaxime Chevallier dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
4093db9d7d36SMaxime Chevallier buf_sz, DMA_TO_DEVICE);
4094db9d7d36SMaxime Chevallier mvpp2_txq_desc_put(txq);
4095db9d7d36SMaxime Chevallier }
4096db9d7d36SMaxime Chevallier
mvpp2_txdesc_clear_ptp(struct mvpp2_port * port,struct mvpp2_tx_desc * desc)4097f5015a59SRussell King static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
4098f5015a59SRussell King struct mvpp2_tx_desc *desc)
4099f5015a59SRussell King {
4100f5015a59SRussell King /* We only need to clear the low bits */
4101f704177eSStefan Chulski if (port->priv->hw_version >= MVPP22)
4102f5015a59SRussell King desc->pp22.ptp_descriptor &=
4103f5015a59SRussell King cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4104f5015a59SRussell King }
4105f5015a59SRussell King
mvpp2_tx_hw_tstamp(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,struct sk_buff * skb)4106f5015a59SRussell King static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
4107f5015a59SRussell King struct mvpp2_tx_desc *tx_desc,
4108f5015a59SRussell King struct sk_buff *skb)
4109f5015a59SRussell King {
4110f5015a59SRussell King struct mvpp2_hwtstamp_queue *queue;
4111f5015a59SRussell King unsigned int mtype, type, i;
4112f5015a59SRussell King struct ptp_header *hdr;
4113f5015a59SRussell King u64 ptpdesc;
4114f5015a59SRussell King
4115f5015a59SRussell King if (port->priv->hw_version == MVPP21 ||
4116f5015a59SRussell King port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
4117f5015a59SRussell King return false;
4118f5015a59SRussell King
4119f5015a59SRussell King type = ptp_classify_raw(skb);
4120f5015a59SRussell King if (!type)
4121f5015a59SRussell King return false;
4122f5015a59SRussell King
4123f5015a59SRussell King hdr = ptp_parse_header(skb, type);
4124f5015a59SRussell King if (!hdr)
4125f5015a59SRussell King return false;
4126f5015a59SRussell King
4127068b6214SRussell King skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4128068b6214SRussell King
4129f5015a59SRussell King ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
4130f5015a59SRussell King MVPP22_PTP_ACTION_CAPTURE;
4131f5015a59SRussell King queue = &port->tx_hwtstamp_queue[0];
4132f5015a59SRussell King
4133f5015a59SRussell King switch (type & PTP_CLASS_VMASK) {
4134f5015a59SRussell King case PTP_CLASS_V1:
4135f5015a59SRussell King ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
4136f5015a59SRussell King break;
4137f5015a59SRussell King
4138f5015a59SRussell King case PTP_CLASS_V2:
4139f5015a59SRussell King ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
4140f5015a59SRussell King mtype = hdr->tsmt & 15;
4141f5015a59SRussell King /* Direct PTP Sync messages to queue 1 */
4142f5015a59SRussell King if (mtype == 0) {
4143f5015a59SRussell King ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
4144f5015a59SRussell King queue = &port->tx_hwtstamp_queue[1];
4145f5015a59SRussell King }
4146f5015a59SRussell King break;
4147f5015a59SRussell King }
4148f5015a59SRussell King
4149f5015a59SRussell King /* Take a reference on the skb and insert into our queue */
4150f5015a59SRussell King i = queue->next;
4151f5015a59SRussell King queue->next = (i + 1) & 31;
4152f5015a59SRussell King if (queue->skb[i])
4153f5015a59SRussell King dev_kfree_skb_any(queue->skb[i]);
4154f5015a59SRussell King queue->skb[i] = skb_get(skb);
4155f5015a59SRussell King
4156f5015a59SRussell King ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
4157f5015a59SRussell King
4158f5015a59SRussell King /*
4159f5015a59SRussell King * 3:0 - PTPAction
4160f5015a59SRussell King * 6:4 - PTPPacketFormat
4161f5015a59SRussell King * 7 - PTP_CF_WraparoundCheckEn
4162f5015a59SRussell King * 9:8 - IngressTimestampSeconds[1:0]
4163f5015a59SRussell King * 10 - Reserved
4164f5015a59SRussell King * 11 - MACTimestampingEn
4165f5015a59SRussell King * 17:12 - PTP_TimestampQueueEntryID[5:0]
4166f5015a59SRussell King * 18 - PTPTimestampQueueSelect
4167f5015a59SRussell King * 19 - UDPChecksumUpdateEn
4168f5015a59SRussell King * 27:20 - TimestampOffset
4169f5015a59SRussell King * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
4170f5015a59SRussell King * NTPTs, Y.1731 - L3 to timestamp entry
4171f5015a59SRussell King * 35:28 - UDP Checksum Offset
4172f5015a59SRussell King *
4173f5015a59SRussell King * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
4174f5015a59SRussell King */
4175f5015a59SRussell King tx_desc->pp22.ptp_descriptor &=
4176f5015a59SRussell King cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4177f5015a59SRussell King tx_desc->pp22.ptp_descriptor |=
4178f5015a59SRussell King cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
4179f5015a59SRussell King tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
4180f5015a59SRussell King tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
4181f5015a59SRussell King
4182f5015a59SRussell King return true;
4183f5015a59SRussell King }
4184f5015a59SRussell King
4185db9d7d36SMaxime Chevallier /* Handle tx fragmentation processing */
mvpp2_tx_frag_process(struct mvpp2_port * port,struct sk_buff * skb,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_tx_queue * txq)4186db9d7d36SMaxime Chevallier static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
4187db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *aggr_txq,
4188db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq)
4189db9d7d36SMaxime Chevallier {
4190e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4191074c74dfSAntoine Tenart struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4192db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc;
4193db9d7d36SMaxime Chevallier int i;
4194db9d7d36SMaxime Chevallier dma_addr_t buf_dma_addr;
4195db9d7d36SMaxime Chevallier
4196db9d7d36SMaxime Chevallier for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4197db9d7d36SMaxime Chevallier skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4198d7840976SMatthew Wilcox (Oracle) void *addr = skb_frag_address(frag);
4199db9d7d36SMaxime Chevallier
4200db9d7d36SMaxime Chevallier tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4201f5015a59SRussell King mvpp2_txdesc_clear_ptp(port, tx_desc);
4202db9d7d36SMaxime Chevallier mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4203d7840976SMatthew Wilcox (Oracle) mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
4204db9d7d36SMaxime Chevallier
4205db9d7d36SMaxime Chevallier buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
4206d7840976SMatthew Wilcox (Oracle) skb_frag_size(frag),
4207d7840976SMatthew Wilcox (Oracle) DMA_TO_DEVICE);
4208db9d7d36SMaxime Chevallier if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
4209db9d7d36SMaxime Chevallier mvpp2_txq_desc_put(txq);
4210db9d7d36SMaxime Chevallier goto cleanup;
4211db9d7d36SMaxime Chevallier }
4212db9d7d36SMaxime Chevallier
4213db9d7d36SMaxime Chevallier mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4214db9d7d36SMaxime Chevallier
4215db9d7d36SMaxime Chevallier if (i == (skb_shinfo(skb)->nr_frags - 1)) {
4216db9d7d36SMaxime Chevallier /* Last descriptor */
4217db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc,
4218db9d7d36SMaxime Chevallier MVPP2_TXD_L_DESC);
4219c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4220db9d7d36SMaxime Chevallier } else {
4221db9d7d36SMaxime Chevallier /* Descriptor in the middle: Not First, Not Last */
4222db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4223c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4224db9d7d36SMaxime Chevallier }
4225db9d7d36SMaxime Chevallier }
4226db9d7d36SMaxime Chevallier
4227db9d7d36SMaxime Chevallier return 0;
4228db9d7d36SMaxime Chevallier cleanup:
4229db9d7d36SMaxime Chevallier /* Release all descriptors that were used to map fragments of
4230db9d7d36SMaxime Chevallier * this packet, as well as the corresponding DMA mappings
4231db9d7d36SMaxime Chevallier */
4232db9d7d36SMaxime Chevallier for (i = i - 1; i >= 0; i--) {
4233db9d7d36SMaxime Chevallier tx_desc = txq->descs + i;
4234db9d7d36SMaxime Chevallier tx_desc_unmap_put(port, txq, tx_desc);
4235db9d7d36SMaxime Chevallier }
4236db9d7d36SMaxime Chevallier
4237db9d7d36SMaxime Chevallier return -ENOMEM;
4238db9d7d36SMaxime Chevallier }
4239db9d7d36SMaxime Chevallier
mvpp2_tso_put_hdr(struct sk_buff * skb,struct net_device * dev,struct mvpp2_tx_queue * txq,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_txq_pcpu * txq_pcpu,int hdr_sz)4240db9d7d36SMaxime Chevallier static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
4241db9d7d36SMaxime Chevallier struct net_device *dev,
4242db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq,
4243db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *aggr_txq,
4244db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu,
4245db9d7d36SMaxime Chevallier int hdr_sz)
4246db9d7d36SMaxime Chevallier {
4247db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4248db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4249db9d7d36SMaxime Chevallier dma_addr_t addr;
4250db9d7d36SMaxime Chevallier
4251f5015a59SRussell King mvpp2_txdesc_clear_ptp(port, tx_desc);
4252db9d7d36SMaxime Chevallier mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4253db9d7d36SMaxime Chevallier mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
4254db9d7d36SMaxime Chevallier
4255db9d7d36SMaxime Chevallier addr = txq_pcpu->tso_headers_dma +
4256db9d7d36SMaxime Chevallier txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4257db9d7d36SMaxime Chevallier mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
4258db9d7d36SMaxime Chevallier
4259db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
4260db9d7d36SMaxime Chevallier MVPP2_TXD_F_DESC |
4261db9d7d36SMaxime Chevallier MVPP2_TXD_PADDING_DISABLE);
4262c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4263db9d7d36SMaxime Chevallier }
4264db9d7d36SMaxime Chevallier
mvpp2_tso_put_data(struct sk_buff * skb,struct net_device * dev,struct tso_t * tso,struct mvpp2_tx_queue * txq,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_txq_pcpu * txq_pcpu,int sz,bool left,bool last)4265db9d7d36SMaxime Chevallier static inline int mvpp2_tso_put_data(struct sk_buff *skb,
4266db9d7d36SMaxime Chevallier struct net_device *dev, struct tso_t *tso,
4267db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq,
4268db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *aggr_txq,
4269db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu,
4270db9d7d36SMaxime Chevallier int sz, bool left, bool last)
4271db9d7d36SMaxime Chevallier {
4272db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4273db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4274db9d7d36SMaxime Chevallier dma_addr_t buf_dma_addr;
4275db9d7d36SMaxime Chevallier
4276f5015a59SRussell King mvpp2_txdesc_clear_ptp(port, tx_desc);
4277db9d7d36SMaxime Chevallier mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4278db9d7d36SMaxime Chevallier mvpp2_txdesc_size_set(port, tx_desc, sz);
4279db9d7d36SMaxime Chevallier
4280db9d7d36SMaxime Chevallier buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
4281db9d7d36SMaxime Chevallier DMA_TO_DEVICE);
4282db9d7d36SMaxime Chevallier if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4283db9d7d36SMaxime Chevallier mvpp2_txq_desc_put(txq);
4284db9d7d36SMaxime Chevallier return -ENOMEM;
4285db9d7d36SMaxime Chevallier }
4286db9d7d36SMaxime Chevallier
4287db9d7d36SMaxime Chevallier mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4288db9d7d36SMaxime Chevallier
4289db9d7d36SMaxime Chevallier if (!left) {
4290db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
4291db9d7d36SMaxime Chevallier if (last) {
4292c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4293db9d7d36SMaxime Chevallier return 0;
4294db9d7d36SMaxime Chevallier }
4295db9d7d36SMaxime Chevallier } else {
4296db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4297db9d7d36SMaxime Chevallier }
4298db9d7d36SMaxime Chevallier
4299c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4300db9d7d36SMaxime Chevallier return 0;
4301db9d7d36SMaxime Chevallier }
4302db9d7d36SMaxime Chevallier
mvpp2_tx_tso(struct sk_buff * skb,struct net_device * dev,struct mvpp2_tx_queue * txq,struct mvpp2_tx_queue * aggr_txq,struct mvpp2_txq_pcpu * txq_pcpu)4303db9d7d36SMaxime Chevallier static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
4304db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq,
4305db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *aggr_txq,
4306db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu)
4307db9d7d36SMaxime Chevallier {
4308db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4309761b331cSEric Dumazet int hdr_sz, i, len, descs = 0;
4310db9d7d36SMaxime Chevallier struct tso_t tso;
4311db9d7d36SMaxime Chevallier
4312db9d7d36SMaxime Chevallier /* Check number of available descriptors */
4313e531f767SAntoine Tenart if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
4314074c74dfSAntoine Tenart mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
4315db9d7d36SMaxime Chevallier tso_count_descs(skb)))
4316db9d7d36SMaxime Chevallier return 0;
4317db9d7d36SMaxime Chevallier
4318761b331cSEric Dumazet hdr_sz = tso_start(skb, &tso);
4319761b331cSEric Dumazet
4320db9d7d36SMaxime Chevallier len = skb->len - hdr_sz;
4321db9d7d36SMaxime Chevallier while (len > 0) {
4322db9d7d36SMaxime Chevallier int left = min_t(int, skb_shinfo(skb)->gso_size, len);
4323db9d7d36SMaxime Chevallier char *hdr = txq_pcpu->tso_headers +
4324db9d7d36SMaxime Chevallier txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4325db9d7d36SMaxime Chevallier
4326db9d7d36SMaxime Chevallier len -= left;
4327db9d7d36SMaxime Chevallier descs++;
4328db9d7d36SMaxime Chevallier
4329db9d7d36SMaxime Chevallier tso_build_hdr(skb, hdr, &tso, left, len == 0);
4330db9d7d36SMaxime Chevallier mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
4331db9d7d36SMaxime Chevallier
4332db9d7d36SMaxime Chevallier while (left > 0) {
4333db9d7d36SMaxime Chevallier int sz = min_t(int, tso.size, left);
4334db9d7d36SMaxime Chevallier left -= sz;
4335db9d7d36SMaxime Chevallier descs++;
4336db9d7d36SMaxime Chevallier
4337db9d7d36SMaxime Chevallier if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
4338db9d7d36SMaxime Chevallier txq_pcpu, sz, left, len == 0))
4339db9d7d36SMaxime Chevallier goto release;
4340db9d7d36SMaxime Chevallier tso_build_data(skb, &tso, sz);
4341db9d7d36SMaxime Chevallier }
4342db9d7d36SMaxime Chevallier }
4343db9d7d36SMaxime Chevallier
4344db9d7d36SMaxime Chevallier return descs;
4345db9d7d36SMaxime Chevallier
4346db9d7d36SMaxime Chevallier release:
4347db9d7d36SMaxime Chevallier for (i = descs - 1; i >= 0; i--) {
4348db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc = txq->descs + i;
4349db9d7d36SMaxime Chevallier tx_desc_unmap_put(port, txq, tx_desc);
4350db9d7d36SMaxime Chevallier }
4351db9d7d36SMaxime Chevallier return 0;
4352db9d7d36SMaxime Chevallier }
4353db9d7d36SMaxime Chevallier
4354db9d7d36SMaxime Chevallier /* Main tx processing */
mvpp2_tx(struct sk_buff * skb,struct net_device * dev)4355f03508ceSYueHaibing static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
4356db9d7d36SMaxime Chevallier {
4357db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4358db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq, *aggr_txq;
4359db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu;
4360db9d7d36SMaxime Chevallier struct mvpp2_tx_desc *tx_desc;
4361db9d7d36SMaxime Chevallier dma_addr_t buf_dma_addr;
4362e531f767SAntoine Tenart unsigned long flags = 0;
4363074c74dfSAntoine Tenart unsigned int thread;
4364db9d7d36SMaxime Chevallier int frags = 0;
4365db9d7d36SMaxime Chevallier u16 txq_id;
4366db9d7d36SMaxime Chevallier u32 tx_cmd;
4367db9d7d36SMaxime Chevallier
4368e531f767SAntoine Tenart thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4369074c74dfSAntoine Tenart
4370db9d7d36SMaxime Chevallier txq_id = skb_get_queue_mapping(skb);
4371db9d7d36SMaxime Chevallier txq = port->txqs[txq_id];
4372074c74dfSAntoine Tenart txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4373074c74dfSAntoine Tenart aggr_txq = &port->priv->aggr_txqs[thread];
4374db9d7d36SMaxime Chevallier
4375e531f767SAntoine Tenart if (test_bit(thread, &port->priv->lock_map))
4376e531f767SAntoine Tenart spin_lock_irqsave(&port->tx_lock[thread], flags);
4377e531f767SAntoine Tenart
4378db9d7d36SMaxime Chevallier if (skb_is_gso(skb)) {
4379db9d7d36SMaxime Chevallier frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
4380db9d7d36SMaxime Chevallier goto out;
4381db9d7d36SMaxime Chevallier }
4382db9d7d36SMaxime Chevallier frags = skb_shinfo(skb)->nr_frags + 1;
4383db9d7d36SMaxime Chevallier
4384db9d7d36SMaxime Chevallier /* Check number of available descriptors */
4385e531f767SAntoine Tenart if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4386074c74dfSAntoine Tenart mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
4387db9d7d36SMaxime Chevallier frags = 0;
4388db9d7d36SMaxime Chevallier goto out;
4389db9d7d36SMaxime Chevallier }
4390db9d7d36SMaxime Chevallier
4391db9d7d36SMaxime Chevallier /* Get a descriptor for the first part of the packet */
4392db9d7d36SMaxime Chevallier tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4393f5015a59SRussell King if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4394f5015a59SRussell King !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4395f5015a59SRussell King mvpp2_txdesc_clear_ptp(port, tx_desc);
4396db9d7d36SMaxime Chevallier mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4397db9d7d36SMaxime Chevallier mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
4398db9d7d36SMaxime Chevallier
4399db9d7d36SMaxime Chevallier buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
4400db9d7d36SMaxime Chevallier skb_headlen(skb), DMA_TO_DEVICE);
4401db9d7d36SMaxime Chevallier if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4402db9d7d36SMaxime Chevallier mvpp2_txq_desc_put(txq);
4403db9d7d36SMaxime Chevallier frags = 0;
4404db9d7d36SMaxime Chevallier goto out;
4405db9d7d36SMaxime Chevallier }
4406db9d7d36SMaxime Chevallier
4407db9d7d36SMaxime Chevallier mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4408db9d7d36SMaxime Chevallier
4409db9d7d36SMaxime Chevallier tx_cmd = mvpp2_skb_tx_csum(port, skb);
4410db9d7d36SMaxime Chevallier
4411db9d7d36SMaxime Chevallier if (frags == 1) {
4412db9d7d36SMaxime Chevallier /* First and Last descriptor */
4413db9d7d36SMaxime Chevallier tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
4414db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4415c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4416db9d7d36SMaxime Chevallier } else {
4417db9d7d36SMaxime Chevallier /* First but not Last */
4418db9d7d36SMaxime Chevallier tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4419db9d7d36SMaxime Chevallier mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4420c2d6fe61SMatteo Croce mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4421db9d7d36SMaxime Chevallier
4422db9d7d36SMaxime Chevallier /* Continue with other skb fragments */
4423db9d7d36SMaxime Chevallier if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4424db9d7d36SMaxime Chevallier tx_desc_unmap_put(port, txq, tx_desc);
4425db9d7d36SMaxime Chevallier frags = 0;
4426db9d7d36SMaxime Chevallier }
4427db9d7d36SMaxime Chevallier }
4428db9d7d36SMaxime Chevallier
4429db9d7d36SMaxime Chevallier out:
4430db9d7d36SMaxime Chevallier if (frags > 0) {
4431074c74dfSAntoine Tenart struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4432db9d7d36SMaxime Chevallier struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4433db9d7d36SMaxime Chevallier
4434db9d7d36SMaxime Chevallier txq_pcpu->reserved_num -= frags;
4435db9d7d36SMaxime Chevallier txq_pcpu->count += frags;
4436db9d7d36SMaxime Chevallier aggr_txq->count += frags;
4437db9d7d36SMaxime Chevallier
4438db9d7d36SMaxime Chevallier /* Enable transmit */
4439db9d7d36SMaxime Chevallier wmb();
4440db9d7d36SMaxime Chevallier mvpp2_aggr_txq_pend_desc_add(port, frags);
4441db9d7d36SMaxime Chevallier
4442db9d7d36SMaxime Chevallier if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4443db9d7d36SMaxime Chevallier netif_tx_stop_queue(nq);
4444db9d7d36SMaxime Chevallier
4445db9d7d36SMaxime Chevallier u64_stats_update_begin(&stats->syncp);
4446db9d7d36SMaxime Chevallier stats->tx_packets++;
4447db9d7d36SMaxime Chevallier stats->tx_bytes += skb->len;
4448db9d7d36SMaxime Chevallier u64_stats_update_end(&stats->syncp);
4449db9d7d36SMaxime Chevallier } else {
4450db9d7d36SMaxime Chevallier dev->stats.tx_dropped++;
4451db9d7d36SMaxime Chevallier dev_kfree_skb_any(skb);
4452db9d7d36SMaxime Chevallier }
4453db9d7d36SMaxime Chevallier
4454db9d7d36SMaxime Chevallier /* Finalize TX processing */
4455db9d7d36SMaxime Chevallier if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4456db9d7d36SMaxime Chevallier mvpp2_txq_done(port, txq, txq_pcpu);
4457db9d7d36SMaxime Chevallier
4458db9d7d36SMaxime Chevallier /* Set the timer in case not all frags were processed */
4459db9d7d36SMaxime Chevallier if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4460db9d7d36SMaxime Chevallier txq_pcpu->count > 0) {
4461074c74dfSAntoine Tenart struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4462db9d7d36SMaxime Chevallier
4463ecb9f80dSThomas Gleixner if (!port_pcpu->timer_scheduled) {
4464ecb9f80dSThomas Gleixner port_pcpu->timer_scheduled = true;
4465ecb9f80dSThomas Gleixner hrtimer_start(&port_pcpu->tx_done_timer,
4466ecb9f80dSThomas Gleixner MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4467ecb9f80dSThomas Gleixner HRTIMER_MODE_REL_PINNED_SOFT);
4468ecb9f80dSThomas Gleixner }
4469db9d7d36SMaxime Chevallier }
4470db9d7d36SMaxime Chevallier
4471e531f767SAntoine Tenart if (test_bit(thread, &port->priv->lock_map))
4472e531f767SAntoine Tenart spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4473e531f767SAntoine Tenart
4474db9d7d36SMaxime Chevallier return NETDEV_TX_OK;
4475db9d7d36SMaxime Chevallier }
4476db9d7d36SMaxime Chevallier
mvpp2_cause_error(struct net_device * dev,int cause)4477db9d7d36SMaxime Chevallier static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4478db9d7d36SMaxime Chevallier {
4479db9d7d36SMaxime Chevallier if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4480db9d7d36SMaxime Chevallier netdev_err(dev, "FCS error\n");
4481db9d7d36SMaxime Chevallier if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4482db9d7d36SMaxime Chevallier netdev_err(dev, "rx fifo overrun error\n");
4483db9d7d36SMaxime Chevallier if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4484db9d7d36SMaxime Chevallier netdev_err(dev, "tx fifo underrun error\n");
4485db9d7d36SMaxime Chevallier }
4486db9d7d36SMaxime Chevallier
mvpp2_poll(struct napi_struct * napi,int budget)4487db9d7d36SMaxime Chevallier static int mvpp2_poll(struct napi_struct *napi, int budget)
4488db9d7d36SMaxime Chevallier {
4489db9d7d36SMaxime Chevallier u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4490db9d7d36SMaxime Chevallier int rx_done = 0;
4491db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(napi->dev);
4492db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *qv;
4493e531f767SAntoine Tenart unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4494db9d7d36SMaxime Chevallier
4495db9d7d36SMaxime Chevallier qv = container_of(napi, struct mvpp2_queue_vector, napi);
4496db9d7d36SMaxime Chevallier
4497db9d7d36SMaxime Chevallier /* Rx/Tx cause register
4498db9d7d36SMaxime Chevallier *
4499db9d7d36SMaxime Chevallier * Bits 0-15: each bit indicates received packets on the Rx queue
4500db9d7d36SMaxime Chevallier * (bit 0 is for Rx queue 0).
4501db9d7d36SMaxime Chevallier *
4502db9d7d36SMaxime Chevallier * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4503db9d7d36SMaxime Chevallier * (bit 16 is for Tx queue 0).
4504db9d7d36SMaxime Chevallier *
4505db9d7d36SMaxime Chevallier * Each CPU has its own Rx/Tx cause register
4506db9d7d36SMaxime Chevallier */
45071068549cSAntoine Tenart cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4508db9d7d36SMaxime Chevallier MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4509db9d7d36SMaxime Chevallier
4510db9d7d36SMaxime Chevallier cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4511db9d7d36SMaxime Chevallier if (cause_misc) {
4512db9d7d36SMaxime Chevallier mvpp2_cause_error(port->dev, cause_misc);
4513db9d7d36SMaxime Chevallier
4514db9d7d36SMaxime Chevallier /* Clear the cause register */
4515db9d7d36SMaxime Chevallier mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
45161068549cSAntoine Tenart mvpp2_thread_write(port->priv, thread,
4517db9d7d36SMaxime Chevallier MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4518db9d7d36SMaxime Chevallier cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4519db9d7d36SMaxime Chevallier }
4520db9d7d36SMaxime Chevallier
4521774268f3SAntoine Tenart if (port->has_tx_irqs) {
4522db9d7d36SMaxime Chevallier cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4523db9d7d36SMaxime Chevallier if (cause_tx) {
4524db9d7d36SMaxime Chevallier cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4525db9d7d36SMaxime Chevallier mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4526db9d7d36SMaxime Chevallier }
4527774268f3SAntoine Tenart }
4528db9d7d36SMaxime Chevallier
4529db9d7d36SMaxime Chevallier /* Process RX packets */
453070afb58eSAntoine Tenart cause_rx = cause_rx_tx &
453170afb58eSAntoine Tenart MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4532db9d7d36SMaxime Chevallier cause_rx <<= qv->first_rxq;
4533db9d7d36SMaxime Chevallier cause_rx |= qv->pending_cause_rx;
4534db9d7d36SMaxime Chevallier while (cause_rx && budget > 0) {
4535db9d7d36SMaxime Chevallier int count;
4536db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq;
4537db9d7d36SMaxime Chevallier
4538db9d7d36SMaxime Chevallier rxq = mvpp2_get_rx_queue(port, cause_rx);
4539db9d7d36SMaxime Chevallier if (!rxq)
4540db9d7d36SMaxime Chevallier break;
4541db9d7d36SMaxime Chevallier
4542db9d7d36SMaxime Chevallier count = mvpp2_rx(port, napi, budget, rxq);
4543db9d7d36SMaxime Chevallier rx_done += count;
4544db9d7d36SMaxime Chevallier budget -= count;
4545db9d7d36SMaxime Chevallier if (budget > 0) {
4546db9d7d36SMaxime Chevallier /* Clear the bit associated to this Rx queue
4547db9d7d36SMaxime Chevallier * so that next iteration will continue from
4548db9d7d36SMaxime Chevallier * the next Rx queue.
4549db9d7d36SMaxime Chevallier */
4550db9d7d36SMaxime Chevallier cause_rx &= ~(1 << rxq->logic_rxq);
4551db9d7d36SMaxime Chevallier }
4552db9d7d36SMaxime Chevallier }
4553db9d7d36SMaxime Chevallier
4554db9d7d36SMaxime Chevallier if (budget > 0) {
4555db9d7d36SMaxime Chevallier cause_rx = 0;
4556db9d7d36SMaxime Chevallier napi_complete_done(napi, rx_done);
4557db9d7d36SMaxime Chevallier
4558db9d7d36SMaxime Chevallier mvpp2_qvec_interrupt_enable(qv);
4559db9d7d36SMaxime Chevallier }
4560db9d7d36SMaxime Chevallier qv->pending_cause_rx = cause_rx;
4561db9d7d36SMaxime Chevallier return rx_done;
4562db9d7d36SMaxime Chevallier }
4563db9d7d36SMaxime Chevallier
mvpp22_mode_reconfigure(struct mvpp2_port * port,phy_interface_t interface)4564bb7bbb6eSMarek Behún static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
4565bb7bbb6eSMarek Behún phy_interface_t interface)
4566db9d7d36SMaxime Chevallier {
4567db9d7d36SMaxime Chevallier u32 ctrl3;
4568db9d7d36SMaxime Chevallier
45695434e8faSAntoine Tenart /* Set the GMAC & XLG MAC in reset */
45705434e8faSAntoine Tenart mvpp2_mac_reset_assert(port);
45715434e8faSAntoine Tenart
45727409e66eSAntoine Tenart /* Set the MPCS and XPCS in reset */
45737409e66eSAntoine Tenart mvpp22_pcs_reset_assert(port);
45747409e66eSAntoine Tenart
4575db9d7d36SMaxime Chevallier /* comphy reconfiguration */
4576bb7bbb6eSMarek Behún mvpp22_comphy_init(port, interface);
4577db9d7d36SMaxime Chevallier
4578db9d7d36SMaxime Chevallier /* gop reconfiguration */
4579bb7bbb6eSMarek Behún mvpp22_gop_init(port, interface);
4580db9d7d36SMaxime Chevallier
4581bb7bbb6eSMarek Behún mvpp22_pcs_reset_deassert(port, interface);
45827409e66eSAntoine Tenart
4583a9a33202SRussell King if (mvpp2_port_supports_xlg(port)) {
4584db9d7d36SMaxime Chevallier ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4585db9d7d36SMaxime Chevallier ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4586db9d7d36SMaxime Chevallier
4587bb7bbb6eSMarek Behún if (mvpp2_is_xlg(interface))
4588db9d7d36SMaxime Chevallier ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4589db9d7d36SMaxime Chevallier else
4590db9d7d36SMaxime Chevallier ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4591db9d7d36SMaxime Chevallier
4592db9d7d36SMaxime Chevallier writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4593db9d7d36SMaxime Chevallier }
4594db9d7d36SMaxime Chevallier
4595bb7bbb6eSMarek Behún if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
4596db9d7d36SMaxime Chevallier mvpp2_xlg_max_rx_size_set(port);
4597db9d7d36SMaxime Chevallier else
4598db9d7d36SMaxime Chevallier mvpp2_gmac_max_rx_size_set(port);
4599db9d7d36SMaxime Chevallier }
4600db9d7d36SMaxime Chevallier
4601db9d7d36SMaxime Chevallier /* Set hw internals when starting port */
mvpp2_start_dev(struct mvpp2_port * port)4602db9d7d36SMaxime Chevallier static void mvpp2_start_dev(struct mvpp2_port *port)
4603db9d7d36SMaxime Chevallier {
4604db9d7d36SMaxime Chevallier int i;
4605db9d7d36SMaxime Chevallier
4606db9d7d36SMaxime Chevallier mvpp2_txp_max_tx_size_set(port);
4607db9d7d36SMaxime Chevallier
4608db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++)
4609db9d7d36SMaxime Chevallier napi_enable(&port->qvecs[i].napi);
4610db9d7d36SMaxime Chevallier
4611543ec376SAntoine Tenart /* Enable interrupts on all threads */
4612db9d7d36SMaxime Chevallier mvpp2_interrupts_enable(port);
4613db9d7d36SMaxime Chevallier
4614f704177eSStefan Chulski if (port->priv->hw_version >= MVPP22)
4615bb7bbb6eSMarek Behún mvpp22_mode_reconfigure(port, port->phy_interface);
4616db9d7d36SMaxime Chevallier
4617db9d7d36SMaxime Chevallier if (port->phylink) {
4618db9d7d36SMaxime Chevallier phylink_start(port->phylink);
4619db9d7d36SMaxime Chevallier } else {
462087745c74SRussell King mvpp2_acpi_start(port);
4621db9d7d36SMaxime Chevallier }
4622db9d7d36SMaxime Chevallier
4623db9d7d36SMaxime Chevallier netif_tx_start_all_queues(port->dev);
462407dd0a7aSMatteo Croce
462507dd0a7aSMatteo Croce clear_bit(0, &port->state);
4626db9d7d36SMaxime Chevallier }
4627db9d7d36SMaxime Chevallier
4628db9d7d36SMaxime Chevallier /* Set hw internals when stopping port */
mvpp2_stop_dev(struct mvpp2_port * port)4629db9d7d36SMaxime Chevallier static void mvpp2_stop_dev(struct mvpp2_port *port)
4630db9d7d36SMaxime Chevallier {
4631db9d7d36SMaxime Chevallier int i;
4632db9d7d36SMaxime Chevallier
463307dd0a7aSMatteo Croce set_bit(0, &port->state);
463407dd0a7aSMatteo Croce
4635543ec376SAntoine Tenart /* Disable interrupts on all threads */
4636db9d7d36SMaxime Chevallier mvpp2_interrupts_disable(port);
4637db9d7d36SMaxime Chevallier
4638db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++)
4639db9d7d36SMaxime Chevallier napi_disable(&port->qvecs[i].napi);
4640db9d7d36SMaxime Chevallier
4641db9d7d36SMaxime Chevallier if (port->phylink)
4642db9d7d36SMaxime Chevallier phylink_stop(port->phylink);
4643db9d7d36SMaxime Chevallier phy_power_off(port->comphy);
4644db9d7d36SMaxime Chevallier }
4645db9d7d36SMaxime Chevallier
mvpp2_check_ringparam_valid(struct net_device * dev,struct ethtool_ringparam * ring)4646db9d7d36SMaxime Chevallier static int mvpp2_check_ringparam_valid(struct net_device *dev,
4647db9d7d36SMaxime Chevallier struct ethtool_ringparam *ring)
4648db9d7d36SMaxime Chevallier {
4649db9d7d36SMaxime Chevallier u16 new_rx_pending = ring->rx_pending;
4650db9d7d36SMaxime Chevallier u16 new_tx_pending = ring->tx_pending;
4651db9d7d36SMaxime Chevallier
4652db9d7d36SMaxime Chevallier if (ring->rx_pending == 0 || ring->tx_pending == 0)
4653db9d7d36SMaxime Chevallier return -EINVAL;
4654db9d7d36SMaxime Chevallier
4655db9d7d36SMaxime Chevallier if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4656db9d7d36SMaxime Chevallier new_rx_pending = MVPP2_MAX_RXD_MAX;
46573bd17fdcSStefan Chulski else if (ring->rx_pending < MSS_THRESHOLD_START)
46583bd17fdcSStefan Chulski new_rx_pending = MSS_THRESHOLD_START;
4659db9d7d36SMaxime Chevallier else if (!IS_ALIGNED(ring->rx_pending, 16))
4660db9d7d36SMaxime Chevallier new_rx_pending = ALIGN(ring->rx_pending, 16);
4661db9d7d36SMaxime Chevallier
4662db9d7d36SMaxime Chevallier if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4663db9d7d36SMaxime Chevallier new_tx_pending = MVPP2_MAX_TXD_MAX;
4664db9d7d36SMaxime Chevallier else if (!IS_ALIGNED(ring->tx_pending, 32))
4665db9d7d36SMaxime Chevallier new_tx_pending = ALIGN(ring->tx_pending, 32);
4666db9d7d36SMaxime Chevallier
4667db9d7d36SMaxime Chevallier /* The Tx ring size cannot be smaller than the minimum number of
4668db9d7d36SMaxime Chevallier * descriptors needed for TSO.
4669db9d7d36SMaxime Chevallier */
4670db9d7d36SMaxime Chevallier if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4671db9d7d36SMaxime Chevallier new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4672db9d7d36SMaxime Chevallier
4673db9d7d36SMaxime Chevallier if (ring->rx_pending != new_rx_pending) {
4674db9d7d36SMaxime Chevallier netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4675db9d7d36SMaxime Chevallier ring->rx_pending, new_rx_pending);
4676db9d7d36SMaxime Chevallier ring->rx_pending = new_rx_pending;
4677db9d7d36SMaxime Chevallier }
4678db9d7d36SMaxime Chevallier
4679db9d7d36SMaxime Chevallier if (ring->tx_pending != new_tx_pending) {
4680db9d7d36SMaxime Chevallier netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4681db9d7d36SMaxime Chevallier ring->tx_pending, new_tx_pending);
4682db9d7d36SMaxime Chevallier ring->tx_pending = new_tx_pending;
4683db9d7d36SMaxime Chevallier }
4684db9d7d36SMaxime Chevallier
4685db9d7d36SMaxime Chevallier return 0;
4686db9d7d36SMaxime Chevallier }
4687db9d7d36SMaxime Chevallier
mvpp21_get_mac_address(struct mvpp2_port * port,unsigned char * addr)4688db9d7d36SMaxime Chevallier static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4689db9d7d36SMaxime Chevallier {
4690db9d7d36SMaxime Chevallier u32 mac_addr_l, mac_addr_m, mac_addr_h;
4691db9d7d36SMaxime Chevallier
4692db9d7d36SMaxime Chevallier mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4693db9d7d36SMaxime Chevallier mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4694db9d7d36SMaxime Chevallier mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4695db9d7d36SMaxime Chevallier addr[0] = (mac_addr_h >> 24) & 0xFF;
4696db9d7d36SMaxime Chevallier addr[1] = (mac_addr_h >> 16) & 0xFF;
4697db9d7d36SMaxime Chevallier addr[2] = (mac_addr_h >> 8) & 0xFF;
4698db9d7d36SMaxime Chevallier addr[3] = mac_addr_h & 0xFF;
4699db9d7d36SMaxime Chevallier addr[4] = mac_addr_m & 0xFF;
4700db9d7d36SMaxime Chevallier addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4701db9d7d36SMaxime Chevallier }
4702db9d7d36SMaxime Chevallier
mvpp2_irqs_init(struct mvpp2_port * port)4703db9d7d36SMaxime Chevallier static int mvpp2_irqs_init(struct mvpp2_port *port)
4704db9d7d36SMaxime Chevallier {
4705db9d7d36SMaxime Chevallier int err, i;
4706db9d7d36SMaxime Chevallier
4707db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++) {
4708db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *qv = port->qvecs + i;
4709db9d7d36SMaxime Chevallier
4710a6b3a3faSMarc Zyngier if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4711a6b3a3faSMarc Zyngier qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4712a6b3a3faSMarc Zyngier if (!qv->mask) {
4713a6b3a3faSMarc Zyngier err = -ENOMEM;
4714a6b3a3faSMarc Zyngier goto err;
4715a6b3a3faSMarc Zyngier }
4716a6b3a3faSMarc Zyngier
4717db9d7d36SMaxime Chevallier irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4718a6b3a3faSMarc Zyngier }
4719db9d7d36SMaxime Chevallier
4720db9d7d36SMaxime Chevallier err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4721db9d7d36SMaxime Chevallier if (err)
4722db9d7d36SMaxime Chevallier goto err;
4723db9d7d36SMaxime Chevallier
4724e531f767SAntoine Tenart if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4725e531f767SAntoine Tenart unsigned int cpu;
4726e531f767SAntoine Tenart
4727e531f767SAntoine Tenart for_each_present_cpu(cpu) {
4728e531f767SAntoine Tenart if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4729e531f767SAntoine Tenart qv->sw_thread_id)
4730a6b3a3faSMarc Zyngier cpumask_set_cpu(cpu, qv->mask);
4731e531f767SAntoine Tenart }
4732e531f767SAntoine Tenart
4733a6b3a3faSMarc Zyngier irq_set_affinity_hint(qv->irq, qv->mask);
4734e531f767SAntoine Tenart }
4735db9d7d36SMaxime Chevallier }
4736db9d7d36SMaxime Chevallier
4737db9d7d36SMaxime Chevallier return 0;
4738db9d7d36SMaxime Chevallier err:
4739db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++) {
4740db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *qv = port->qvecs + i;
4741db9d7d36SMaxime Chevallier
4742db9d7d36SMaxime Chevallier irq_set_affinity_hint(qv->irq, NULL);
4743a6b3a3faSMarc Zyngier kfree(qv->mask);
4744a6b3a3faSMarc Zyngier qv->mask = NULL;
4745db9d7d36SMaxime Chevallier free_irq(qv->irq, qv);
4746db9d7d36SMaxime Chevallier }
4747db9d7d36SMaxime Chevallier
4748db9d7d36SMaxime Chevallier return err;
4749db9d7d36SMaxime Chevallier }
4750db9d7d36SMaxime Chevallier
mvpp2_irqs_deinit(struct mvpp2_port * port)4751db9d7d36SMaxime Chevallier static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4752db9d7d36SMaxime Chevallier {
4753db9d7d36SMaxime Chevallier int i;
4754db9d7d36SMaxime Chevallier
4755db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++) {
4756db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *qv = port->qvecs + i;
4757db9d7d36SMaxime Chevallier
4758db9d7d36SMaxime Chevallier irq_set_affinity_hint(qv->irq, NULL);
4759a6b3a3faSMarc Zyngier kfree(qv->mask);
4760a6b3a3faSMarc Zyngier qv->mask = NULL;
4761db9d7d36SMaxime Chevallier irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4762db9d7d36SMaxime Chevallier free_irq(qv->irq, qv);
4763db9d7d36SMaxime Chevallier }
4764db9d7d36SMaxime Chevallier }
4765db9d7d36SMaxime Chevallier
mvpp22_rss_is_supported(struct mvpp2_port * port)47660a8a8000SStefan Chulski static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
47674c4a5686SYan Markman {
47680a8a8000SStefan Chulski return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
47690a8a8000SStefan Chulski !(port->flags & MVPP2_F_LOOPBACK);
47704c4a5686SYan Markman }
47714c4a5686SYan Markman
mvpp2_open(struct net_device * dev)4772db9d7d36SMaxime Chevallier static int mvpp2_open(struct net_device *dev)
4773db9d7d36SMaxime Chevallier {
4774db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4775db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
4776db9d7d36SMaxime Chevallier unsigned char mac_bcast[ETH_ALEN] = {
4777db9d7d36SMaxime Chevallier 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4778db9d7d36SMaxime Chevallier bool valid = false;
4779db9d7d36SMaxime Chevallier int err;
4780db9d7d36SMaxime Chevallier
4781db9d7d36SMaxime Chevallier err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4782db9d7d36SMaxime Chevallier if (err) {
4783db9d7d36SMaxime Chevallier netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4784db9d7d36SMaxime Chevallier return err;
4785db9d7d36SMaxime Chevallier }
4786db9d7d36SMaxime Chevallier err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4787db9d7d36SMaxime Chevallier if (err) {
4788db9d7d36SMaxime Chevallier netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4789db9d7d36SMaxime Chevallier return err;
4790db9d7d36SMaxime Chevallier }
4791db9d7d36SMaxime Chevallier err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4792db9d7d36SMaxime Chevallier if (err) {
4793db9d7d36SMaxime Chevallier netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4794db9d7d36SMaxime Chevallier return err;
4795db9d7d36SMaxime Chevallier }
4796db9d7d36SMaxime Chevallier err = mvpp2_prs_def_flow(port);
4797db9d7d36SMaxime Chevallier if (err) {
4798db9d7d36SMaxime Chevallier netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4799db9d7d36SMaxime Chevallier return err;
4800db9d7d36SMaxime Chevallier }
4801db9d7d36SMaxime Chevallier
4802db9d7d36SMaxime Chevallier /* Allocate the Rx/Tx queues */
4803db9d7d36SMaxime Chevallier err = mvpp2_setup_rxqs(port);
4804db9d7d36SMaxime Chevallier if (err) {
4805db9d7d36SMaxime Chevallier netdev_err(port->dev, "cannot allocate Rx queues\n");
4806db9d7d36SMaxime Chevallier return err;
4807db9d7d36SMaxime Chevallier }
4808db9d7d36SMaxime Chevallier
4809db9d7d36SMaxime Chevallier err = mvpp2_setup_txqs(port);
4810db9d7d36SMaxime Chevallier if (err) {
4811db9d7d36SMaxime Chevallier netdev_err(port->dev, "cannot allocate Tx queues\n");
4812db9d7d36SMaxime Chevallier goto err_cleanup_rxqs;
4813db9d7d36SMaxime Chevallier }
4814db9d7d36SMaxime Chevallier
4815db9d7d36SMaxime Chevallier err = mvpp2_irqs_init(port);
4816db9d7d36SMaxime Chevallier if (err) {
4817db9d7d36SMaxime Chevallier netdev_err(port->dev, "cannot init IRQs\n");
4818db9d7d36SMaxime Chevallier goto err_cleanup_txqs;
4819db9d7d36SMaxime Chevallier }
4820db9d7d36SMaxime Chevallier
4821dfce1babSMarcin Wojtas if (port->phylink) {
4822dfce1babSMarcin Wojtas err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
4823db9d7d36SMaxime Chevallier if (err) {
4824db9d7d36SMaxime Chevallier netdev_err(port->dev, "could not attach PHY (%d)\n",
4825db9d7d36SMaxime Chevallier err);
4826db9d7d36SMaxime Chevallier goto err_free_irq;
4827db9d7d36SMaxime Chevallier }
4828db9d7d36SMaxime Chevallier
4829db9d7d36SMaxime Chevallier valid = true;
4830db9d7d36SMaxime Chevallier }
4831db9d7d36SMaxime Chevallier
4832f704177eSStefan Chulski if (priv->hw_version >= MVPP22 && port->port_irq) {
483389141972SRussell King err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4834db9d7d36SMaxime Chevallier dev->name, port);
4835db9d7d36SMaxime Chevallier if (err) {
483689141972SRussell King netdev_err(port->dev,
483789141972SRussell King "cannot request port link/ptp IRQ %d\n",
483889141972SRussell King port->port_irq);
4839db9d7d36SMaxime Chevallier goto err_free_irq;
4840db9d7d36SMaxime Chevallier }
4841db9d7d36SMaxime Chevallier
4842db9d7d36SMaxime Chevallier mvpp22_gop_setup_irq(port);
4843db9d7d36SMaxime Chevallier
4844db9d7d36SMaxime Chevallier /* In default link is down */
4845db9d7d36SMaxime Chevallier netif_carrier_off(port->dev);
4846db9d7d36SMaxime Chevallier
4847db9d7d36SMaxime Chevallier valid = true;
4848db9d7d36SMaxime Chevallier } else {
484989141972SRussell King port->port_irq = 0;
4850db9d7d36SMaxime Chevallier }
4851db9d7d36SMaxime Chevallier
4852db9d7d36SMaxime Chevallier if (!valid) {
4853db9d7d36SMaxime Chevallier netdev_err(port->dev,
4854db9d7d36SMaxime Chevallier "invalid configuration: no dt or link IRQ");
485582a10dc7SWang Hai err = -ENOENT;
4856db9d7d36SMaxime Chevallier goto err_free_irq;
4857db9d7d36SMaxime Chevallier }
4858db9d7d36SMaxime Chevallier
4859db9d7d36SMaxime Chevallier /* Unmask interrupts on all CPUs */
4860db9d7d36SMaxime Chevallier on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4861db9d7d36SMaxime Chevallier mvpp2_shared_interrupt_mask_unmask(port, false);
4862db9d7d36SMaxime Chevallier
4863db9d7d36SMaxime Chevallier mvpp2_start_dev(port);
4864db9d7d36SMaxime Chevallier
4865db9d7d36SMaxime Chevallier /* Start hardware statistics gathering */
4866db9d7d36SMaxime Chevallier queue_delayed_work(priv->stats_queue, &port->stats_work,
4867db9d7d36SMaxime Chevallier MVPP2_MIB_COUNTERS_STATS_DELAY);
4868db9d7d36SMaxime Chevallier
4869db9d7d36SMaxime Chevallier return 0;
4870db9d7d36SMaxime Chevallier
4871db9d7d36SMaxime Chevallier err_free_irq:
4872db9d7d36SMaxime Chevallier mvpp2_irqs_deinit(port);
4873db9d7d36SMaxime Chevallier err_cleanup_txqs:
4874db9d7d36SMaxime Chevallier mvpp2_cleanup_txqs(port);
4875db9d7d36SMaxime Chevallier err_cleanup_rxqs:
4876db9d7d36SMaxime Chevallier mvpp2_cleanup_rxqs(port);
4877db9d7d36SMaxime Chevallier return err;
4878db9d7d36SMaxime Chevallier }
4879db9d7d36SMaxime Chevallier
mvpp2_stop(struct net_device * dev)4880db9d7d36SMaxime Chevallier static int mvpp2_stop(struct net_device *dev)
4881db9d7d36SMaxime Chevallier {
4882db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4883db9d7d36SMaxime Chevallier struct mvpp2_port_pcpu *port_pcpu;
4884074c74dfSAntoine Tenart unsigned int thread;
4885db9d7d36SMaxime Chevallier
4886db9d7d36SMaxime Chevallier mvpp2_stop_dev(port);
4887db9d7d36SMaxime Chevallier
4888e531f767SAntoine Tenart /* Mask interrupts on all threads */
4889db9d7d36SMaxime Chevallier on_each_cpu(mvpp2_interrupts_mask, port, 1);
4890db9d7d36SMaxime Chevallier mvpp2_shared_interrupt_mask_unmask(port, true);
4891db9d7d36SMaxime Chevallier
4892db9d7d36SMaxime Chevallier if (port->phylink)
4893db9d7d36SMaxime Chevallier phylink_disconnect_phy(port->phylink);
489489141972SRussell King if (port->port_irq)
489589141972SRussell King free_irq(port->port_irq, port);
4896db9d7d36SMaxime Chevallier
4897db9d7d36SMaxime Chevallier mvpp2_irqs_deinit(port);
4898db9d7d36SMaxime Chevallier if (!port->has_tx_irqs) {
4899e531f767SAntoine Tenart for (thread = 0; thread < port->priv->nthreads; thread++) {
4900074c74dfSAntoine Tenart port_pcpu = per_cpu_ptr(port->pcpu, thread);
4901db9d7d36SMaxime Chevallier
4902db9d7d36SMaxime Chevallier hrtimer_cancel(&port_pcpu->tx_done_timer);
4903db9d7d36SMaxime Chevallier port_pcpu->timer_scheduled = false;
4904db9d7d36SMaxime Chevallier }
4905db9d7d36SMaxime Chevallier }
4906db9d7d36SMaxime Chevallier mvpp2_cleanup_rxqs(port);
4907db9d7d36SMaxime Chevallier mvpp2_cleanup_txqs(port);
4908db9d7d36SMaxime Chevallier
4909db9d7d36SMaxime Chevallier cancel_delayed_work_sync(&port->stats_work);
4910db9d7d36SMaxime Chevallier
49111f69afceSAntoine Tenart mvpp2_mac_reset_assert(port);
49121f69afceSAntoine Tenart mvpp22_pcs_reset_assert(port);
49131f69afceSAntoine Tenart
4914db9d7d36SMaxime Chevallier return 0;
4915db9d7d36SMaxime Chevallier }
4916db9d7d36SMaxime Chevallier
mvpp2_prs_mac_da_accept_list(struct mvpp2_port * port,struct netdev_hw_addr_list * list)4917db9d7d36SMaxime Chevallier static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4918db9d7d36SMaxime Chevallier struct netdev_hw_addr_list *list)
4919db9d7d36SMaxime Chevallier {
4920db9d7d36SMaxime Chevallier struct netdev_hw_addr *ha;
4921db9d7d36SMaxime Chevallier int ret;
4922db9d7d36SMaxime Chevallier
4923db9d7d36SMaxime Chevallier netdev_hw_addr_list_for_each(ha, list) {
4924db9d7d36SMaxime Chevallier ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4925db9d7d36SMaxime Chevallier if (ret)
4926db9d7d36SMaxime Chevallier return ret;
4927db9d7d36SMaxime Chevallier }
4928db9d7d36SMaxime Chevallier
4929db9d7d36SMaxime Chevallier return 0;
4930db9d7d36SMaxime Chevallier }
4931db9d7d36SMaxime Chevallier
mvpp2_set_rx_promisc(struct mvpp2_port * port,bool enable)4932db9d7d36SMaxime Chevallier static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4933db9d7d36SMaxime Chevallier {
4934db9d7d36SMaxime Chevallier if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4935db9d7d36SMaxime Chevallier mvpp2_prs_vid_enable_filtering(port);
4936db9d7d36SMaxime Chevallier else
4937db9d7d36SMaxime Chevallier mvpp2_prs_vid_disable_filtering(port);
4938db9d7d36SMaxime Chevallier
4939db9d7d36SMaxime Chevallier mvpp2_prs_mac_promisc_set(port->priv, port->id,
4940db9d7d36SMaxime Chevallier MVPP2_PRS_L2_UNI_CAST, enable);
4941db9d7d36SMaxime Chevallier
4942db9d7d36SMaxime Chevallier mvpp2_prs_mac_promisc_set(port->priv, port->id,
4943db9d7d36SMaxime Chevallier MVPP2_PRS_L2_MULTI_CAST, enable);
4944db9d7d36SMaxime Chevallier }
4945db9d7d36SMaxime Chevallier
mvpp2_set_rx_mode(struct net_device * dev)4946db9d7d36SMaxime Chevallier static void mvpp2_set_rx_mode(struct net_device *dev)
4947db9d7d36SMaxime Chevallier {
4948db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
4949db9d7d36SMaxime Chevallier
4950db9d7d36SMaxime Chevallier /* Clear the whole UC and MC list */
4951db9d7d36SMaxime Chevallier mvpp2_prs_mac_del_all(port);
4952db9d7d36SMaxime Chevallier
4953db9d7d36SMaxime Chevallier if (dev->flags & IFF_PROMISC) {
4954db9d7d36SMaxime Chevallier mvpp2_set_rx_promisc(port, true);
4955db9d7d36SMaxime Chevallier return;
4956db9d7d36SMaxime Chevallier }
4957db9d7d36SMaxime Chevallier
4958db9d7d36SMaxime Chevallier mvpp2_set_rx_promisc(port, false);
4959db9d7d36SMaxime Chevallier
4960db9d7d36SMaxime Chevallier if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4961db9d7d36SMaxime Chevallier mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4962db9d7d36SMaxime Chevallier mvpp2_prs_mac_promisc_set(port->priv, port->id,
4963db9d7d36SMaxime Chevallier MVPP2_PRS_L2_UNI_CAST, true);
4964db9d7d36SMaxime Chevallier
4965db9d7d36SMaxime Chevallier if (dev->flags & IFF_ALLMULTI) {
4966db9d7d36SMaxime Chevallier mvpp2_prs_mac_promisc_set(port->priv, port->id,
4967db9d7d36SMaxime Chevallier MVPP2_PRS_L2_MULTI_CAST, true);
4968db9d7d36SMaxime Chevallier return;
4969db9d7d36SMaxime Chevallier }
4970db9d7d36SMaxime Chevallier
4971db9d7d36SMaxime Chevallier if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4972db9d7d36SMaxime Chevallier mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4973db9d7d36SMaxime Chevallier mvpp2_prs_mac_promisc_set(port->priv, port->id,
4974db9d7d36SMaxime Chevallier MVPP2_PRS_L2_MULTI_CAST, true);
4975db9d7d36SMaxime Chevallier }
4976db9d7d36SMaxime Chevallier
mvpp2_set_mac_address(struct net_device * dev,void * p)4977db9d7d36SMaxime Chevallier static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4978db9d7d36SMaxime Chevallier {
4979db9d7d36SMaxime Chevallier const struct sockaddr *addr = p;
4980db9d7d36SMaxime Chevallier int err;
4981db9d7d36SMaxime Chevallier
4982db9d7d36SMaxime Chevallier if (!is_valid_ether_addr(addr->sa_data))
4983db9d7d36SMaxime Chevallier return -EADDRNOTAVAIL;
4984db9d7d36SMaxime Chevallier
4985db9d7d36SMaxime Chevallier err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4986db9d7d36SMaxime Chevallier if (err) {
4987db9d7d36SMaxime Chevallier /* Reconfigure parser accept the original MAC address */
4988db9d7d36SMaxime Chevallier mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4989db9d7d36SMaxime Chevallier netdev_err(dev, "failed to change MAC address\n");
4990db9d7d36SMaxime Chevallier }
4991db9d7d36SMaxime Chevallier return err;
4992db9d7d36SMaxime Chevallier }
4993db9d7d36SMaxime Chevallier
49947d04b0b1SMatteo Croce /* Shut down all the ports, reconfigure the pools as percpu or shared,
49957d04b0b1SMatteo Croce * then bring up again all ports.
49967d04b0b1SMatteo Croce */
mvpp2_bm_switch_buffers(struct mvpp2 * priv,bool percpu)49977d04b0b1SMatteo Croce static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
49987d04b0b1SMatteo Croce {
49993a616b92SStefan Chulski bool change_percpu = (percpu != priv->percpu_pools);
50007d04b0b1SMatteo Croce int numbufs = MVPP2_BM_POOLS_NUM, i;
50017d04b0b1SMatteo Croce struct mvpp2_port *port = NULL;
50027d04b0b1SMatteo Croce bool status[MVPP2_MAX_PORTS];
50037d04b0b1SMatteo Croce
50047d04b0b1SMatteo Croce for (i = 0; i < priv->port_count; i++) {
50057d04b0b1SMatteo Croce port = priv->port_list[i];
50067d04b0b1SMatteo Croce status[i] = netif_running(port->dev);
50077d04b0b1SMatteo Croce if (status[i])
50087d04b0b1SMatteo Croce mvpp2_stop(port->dev);
50097d04b0b1SMatteo Croce }
50107d04b0b1SMatteo Croce
50117d04b0b1SMatteo Croce /* nrxqs is the same for all ports */
50127d04b0b1SMatteo Croce if (priv->percpu_pools)
50137d04b0b1SMatteo Croce numbufs = port->nrxqs * 2;
50147d04b0b1SMatteo Croce
50153a616b92SStefan Chulski if (change_percpu)
50163a616b92SStefan Chulski mvpp2_bm_pool_update_priv_fc(priv, false);
50173a616b92SStefan Chulski
50187d04b0b1SMatteo Croce for (i = 0; i < numbufs; i++)
50197d04b0b1SMatteo Croce mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
50207d04b0b1SMatteo Croce
50217d04b0b1SMatteo Croce devm_kfree(port->dev->dev.parent, priv->bm_pools);
50227d04b0b1SMatteo Croce priv->percpu_pools = percpu;
50237d04b0b1SMatteo Croce mvpp2_bm_init(port->dev->dev.parent, priv);
50247d04b0b1SMatteo Croce
50257d04b0b1SMatteo Croce for (i = 0; i < priv->port_count; i++) {
50267d04b0b1SMatteo Croce port = priv->port_list[i];
5027481e96fcSMatteo Croce if (percpu && port->ntxqs >= num_possible_cpus() * 2)
5028481e96fcSMatteo Croce xdp_set_features_flag(port->dev,
5029481e96fcSMatteo Croce NETDEV_XDP_ACT_BASIC |
5030481e96fcSMatteo Croce NETDEV_XDP_ACT_REDIRECT |
5031481e96fcSMatteo Croce NETDEV_XDP_ACT_NDO_XMIT);
5032481e96fcSMatteo Croce else
5033481e96fcSMatteo Croce xdp_clear_features_flag(port->dev);
5034481e96fcSMatteo Croce
50357d04b0b1SMatteo Croce mvpp2_swf_bm_pool_init(port);
50367d04b0b1SMatteo Croce if (status[i])
50377d04b0b1SMatteo Croce mvpp2_open(port->dev);
50387d04b0b1SMatteo Croce }
50397d04b0b1SMatteo Croce
50403a616b92SStefan Chulski if (change_percpu)
50413a616b92SStefan Chulski mvpp2_bm_pool_update_priv_fc(priv, true);
50423a616b92SStefan Chulski
50437d04b0b1SMatteo Croce return 0;
50447d04b0b1SMatteo Croce }
50457d04b0b1SMatteo Croce
mvpp2_change_mtu(struct net_device * dev,int mtu)5046db9d7d36SMaxime Chevallier static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5047db9d7d36SMaxime Chevallier {
5048db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5049230bd958SMatteo Croce bool running = netif_running(dev);
50507d04b0b1SMatteo Croce struct mvpp2 *priv = port->priv;
5051db9d7d36SMaxime Chevallier int err;
5052db9d7d36SMaxime Chevallier
5053db9d7d36SMaxime Chevallier if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5054db9d7d36SMaxime Chevallier netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5055db9d7d36SMaxime Chevallier ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5056db9d7d36SMaxime Chevallier mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5057db9d7d36SMaxime Chevallier }
5058db9d7d36SMaxime Chevallier
50597b1b62bcSMarek Behún if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
50607b1b62bcSMarek Behún netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
50617b1b62bcSMarek Behún mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
506207dd0a7aSMatteo Croce return -EINVAL;
506307dd0a7aSMatteo Croce }
50647b1b62bcSMarek Behún
50657b1b62bcSMarek Behún if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
50667d04b0b1SMatteo Croce if (priv->percpu_pools) {
50677d04b0b1SMatteo Croce netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
50687d04b0b1SMatteo Croce mvpp2_bm_switch_buffers(priv, false);
50697d04b0b1SMatteo Croce }
50707d04b0b1SMatteo Croce } else {
50717d04b0b1SMatteo Croce bool jumbo = false;
50727d04b0b1SMatteo Croce int i;
50737d04b0b1SMatteo Croce
50747d04b0b1SMatteo Croce for (i = 0; i < priv->port_count; i++)
50757d04b0b1SMatteo Croce if (priv->port_list[i] != port &&
50767d04b0b1SMatteo Croce MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
50777d04b0b1SMatteo Croce MVPP2_BM_LONG_PKT_SIZE) {
50787d04b0b1SMatteo Croce jumbo = true;
50797d04b0b1SMatteo Croce break;
50807d04b0b1SMatteo Croce }
50817d04b0b1SMatteo Croce
50827d04b0b1SMatteo Croce /* No port is using jumbo frames */
50837d04b0b1SMatteo Croce if (!jumbo) {
50847d04b0b1SMatteo Croce dev_info(port->dev->dev.parent,
50857d04b0b1SMatteo Croce "all ports have a low MTU, switching to per-cpu buffers");
50867d04b0b1SMatteo Croce mvpp2_bm_switch_buffers(priv, true);
50877d04b0b1SMatteo Croce }
50887d04b0b1SMatteo Croce }
50897d04b0b1SMatteo Croce
5090230bd958SMatteo Croce if (running)
5091db9d7d36SMaxime Chevallier mvpp2_stop_dev(port);
5092db9d7d36SMaxime Chevallier
5093db9d7d36SMaxime Chevallier err = mvpp2_bm_update_mtu(dev, mtu);
5094230bd958SMatteo Croce if (err) {
5095230bd958SMatteo Croce netdev_err(dev, "failed to change MTU\n");
5096230bd958SMatteo Croce /* Reconfigure BM to the original MTU */
5097230bd958SMatteo Croce mvpp2_bm_update_mtu(dev, dev->mtu);
5098230bd958SMatteo Croce } else {
5099db9d7d36SMaxime Chevallier port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5100db9d7d36SMaxime Chevallier }
5101db9d7d36SMaxime Chevallier
5102230bd958SMatteo Croce if (running) {
5103db9d7d36SMaxime Chevallier mvpp2_start_dev(port);
5104db9d7d36SMaxime Chevallier mvpp2_egress_enable(port);
5105db9d7d36SMaxime Chevallier mvpp2_ingress_enable(port);
5106230bd958SMatteo Croce }
5107db9d7d36SMaxime Chevallier
5108db9d7d36SMaxime Chevallier return err;
5109db9d7d36SMaxime Chevallier }
5110db9d7d36SMaxime Chevallier
mvpp2_check_pagepool_dma(struct mvpp2_port * port)5111c2d6fe61SMatteo Croce static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
5112c2d6fe61SMatteo Croce {
5113c2d6fe61SMatteo Croce enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
5114c2d6fe61SMatteo Croce struct mvpp2 *priv = port->priv;
5115c2d6fe61SMatteo Croce int err = -1, i;
5116c2d6fe61SMatteo Croce
5117c2d6fe61SMatteo Croce if (!priv->percpu_pools)
5118c2d6fe61SMatteo Croce return err;
5119c2d6fe61SMatteo Croce
51204e48978cSMatteo Croce if (!priv->page_pool[0])
5121c2d6fe61SMatteo Croce return -ENOMEM;
5122c2d6fe61SMatteo Croce
5123c2d6fe61SMatteo Croce for (i = 0; i < priv->port_count; i++) {
5124c2d6fe61SMatteo Croce port = priv->port_list[i];
5125c2d6fe61SMatteo Croce if (port->xdp_prog) {
5126c2d6fe61SMatteo Croce dma_dir = DMA_BIDIRECTIONAL;
5127c2d6fe61SMatteo Croce break;
5128c2d6fe61SMatteo Croce }
5129c2d6fe61SMatteo Croce }
5130c2d6fe61SMatteo Croce
5131c2d6fe61SMatteo Croce /* All pools are equal in terms of DMA direction */
5132c2d6fe61SMatteo Croce if (priv->page_pool[0]->p.dma_dir != dma_dir)
5133c2d6fe61SMatteo Croce err = mvpp2_bm_switch_buffers(priv, true);
5134c2d6fe61SMatteo Croce
5135c2d6fe61SMatteo Croce return err;
5136c2d6fe61SMatteo Croce }
5137c2d6fe61SMatteo Croce
5138db9d7d36SMaxime Chevallier static void
mvpp2_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)5139db9d7d36SMaxime Chevallier mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5140db9d7d36SMaxime Chevallier {
5141db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5142db9d7d36SMaxime Chevallier unsigned int start;
5143850623b3SAntoine Tenart unsigned int cpu;
5144db9d7d36SMaxime Chevallier
5145db9d7d36SMaxime Chevallier for_each_possible_cpu(cpu) {
5146db9d7d36SMaxime Chevallier struct mvpp2_pcpu_stats *cpu_stats;
5147db9d7d36SMaxime Chevallier u64 rx_packets;
5148db9d7d36SMaxime Chevallier u64 rx_bytes;
5149db9d7d36SMaxime Chevallier u64 tx_packets;
5150db9d7d36SMaxime Chevallier u64 tx_bytes;
5151db9d7d36SMaxime Chevallier
5152db9d7d36SMaxime Chevallier cpu_stats = per_cpu_ptr(port->stats, cpu);
5153db9d7d36SMaxime Chevallier do {
5154068c38adSThomas Gleixner start = u64_stats_fetch_begin(&cpu_stats->syncp);
5155db9d7d36SMaxime Chevallier rx_packets = cpu_stats->rx_packets;
5156db9d7d36SMaxime Chevallier rx_bytes = cpu_stats->rx_bytes;
5157db9d7d36SMaxime Chevallier tx_packets = cpu_stats->tx_packets;
5158db9d7d36SMaxime Chevallier tx_bytes = cpu_stats->tx_bytes;
5159068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
5160db9d7d36SMaxime Chevallier
5161db9d7d36SMaxime Chevallier stats->rx_packets += rx_packets;
5162db9d7d36SMaxime Chevallier stats->rx_bytes += rx_bytes;
5163db9d7d36SMaxime Chevallier stats->tx_packets += tx_packets;
5164db9d7d36SMaxime Chevallier stats->tx_bytes += tx_bytes;
5165db9d7d36SMaxime Chevallier }
5166db9d7d36SMaxime Chevallier
5167db9d7d36SMaxime Chevallier stats->rx_errors = dev->stats.rx_errors;
5168db9d7d36SMaxime Chevallier stats->rx_dropped = dev->stats.rx_dropped;
5169db9d7d36SMaxime Chevallier stats->tx_dropped = dev->stats.tx_dropped;
5170db9d7d36SMaxime Chevallier }
5171db9d7d36SMaxime Chevallier
mvpp2_set_ts_config(struct mvpp2_port * port,struct ifreq * ifr)5172ce3497e2SRussell King static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5173ce3497e2SRussell King {
5174ce3497e2SRussell King struct hwtstamp_config config;
5175ce3497e2SRussell King void __iomem *ptp;
5176f5015a59SRussell King u32 gcr, int_mask;
5177ce3497e2SRussell King
5178ce3497e2SRussell King if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5179ce3497e2SRussell King return -EFAULT;
5180ce3497e2SRussell King
5181f5015a59SRussell King if (config.tx_type != HWTSTAMP_TX_OFF &&
5182f5015a59SRussell King config.tx_type != HWTSTAMP_TX_ON)
5183ce3497e2SRussell King return -ERANGE;
5184ce3497e2SRussell King
5185ce3497e2SRussell King ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
5186f5015a59SRussell King
5187f5015a59SRussell King int_mask = gcr = 0;
5188f5015a59SRussell King if (config.tx_type != HWTSTAMP_TX_OFF) {
5189f5015a59SRussell King gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
5190f5015a59SRussell King int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
5191f5015a59SRussell King MVPP22_PTP_INT_MASK_QUEUE0;
5192f5015a59SRussell King }
5193f5015a59SRussell King
5194f5015a59SRussell King /* It seems we must also release the TX reset when enabling the TSU */
5195f5015a59SRussell King if (config.rx_filter != HWTSTAMP_FILTER_NONE)
5196f5015a59SRussell King gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
5197f5015a59SRussell King MVPP22_PTP_GCR_TX_RESET;
5198f5015a59SRussell King
5199f5015a59SRussell King if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
5200f5015a59SRussell King mvpp22_tai_start(port->priv->tai);
5201f5015a59SRussell King
5202ce3497e2SRussell King if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
5203ce3497e2SRussell King config.rx_filter = HWTSTAMP_FILTER_ALL;
5204ce3497e2SRussell King mvpp2_modify(ptp + MVPP22_PTP_GCR,
5205ce3497e2SRussell King MVPP22_PTP_GCR_RX_RESET |
5206ce3497e2SRussell King MVPP22_PTP_GCR_TX_RESET |
5207f5015a59SRussell King MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5208ce3497e2SRussell King port->rx_hwtstamp = true;
5209ce3497e2SRussell King } else {
5210ce3497e2SRussell King port->rx_hwtstamp = false;
5211ce3497e2SRussell King mvpp2_modify(ptp + MVPP22_PTP_GCR,
5212ce3497e2SRussell King MVPP22_PTP_GCR_RX_RESET |
5213ce3497e2SRussell King MVPP22_PTP_GCR_TX_RESET |
5214f5015a59SRussell King MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5215ce3497e2SRussell King }
5216ce3497e2SRussell King
5217f5015a59SRussell King mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
5218f5015a59SRussell King MVPP22_PTP_INT_MASK_QUEUE1 |
5219f5015a59SRussell King MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
5220f5015a59SRussell King
5221f5015a59SRussell King if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
5222f5015a59SRussell King mvpp22_tai_stop(port->priv->tai);
5223f5015a59SRussell King
5224f5015a59SRussell King port->tx_hwtstamp_type = config.tx_type;
5225f5015a59SRussell King
5226ce3497e2SRussell King if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5227ce3497e2SRussell King return -EFAULT;
5228ce3497e2SRussell King
5229ce3497e2SRussell King return 0;
5230ce3497e2SRussell King }
5231ce3497e2SRussell King
mvpp2_get_ts_config(struct mvpp2_port * port,struct ifreq * ifr)5232ce3497e2SRussell King static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5233ce3497e2SRussell King {
5234ce3497e2SRussell King struct hwtstamp_config config;
5235ce3497e2SRussell King
5236ce3497e2SRussell King memset(&config, 0, sizeof(config));
5237ce3497e2SRussell King
5238f5015a59SRussell King config.tx_type = port->tx_hwtstamp_type;
5239ce3497e2SRussell King config.rx_filter = port->rx_hwtstamp ?
5240ce3497e2SRussell King HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
5241ce3497e2SRussell King
5242ce3497e2SRussell King if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5243ce3497e2SRussell King return -EFAULT;
5244ce3497e2SRussell King
5245ce3497e2SRussell King return 0;
5246ce3497e2SRussell King }
5247ce3497e2SRussell King
mvpp2_ethtool_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)5248ce3497e2SRussell King static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
5249ce3497e2SRussell King struct ethtool_ts_info *info)
5250ce3497e2SRussell King {
5251ce3497e2SRussell King struct mvpp2_port *port = netdev_priv(dev);
5252ce3497e2SRussell King
5253ce3497e2SRussell King if (!port->hwtstamp)
5254ce3497e2SRussell King return -EOPNOTSUPP;
5255ce3497e2SRussell King
5256ce3497e2SRussell King info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
5257ce3497e2SRussell King info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5258ce3497e2SRussell King SOF_TIMESTAMPING_RX_SOFTWARE |
5259ce3497e2SRussell King SOF_TIMESTAMPING_SOFTWARE |
5260f5015a59SRussell King SOF_TIMESTAMPING_TX_HARDWARE |
5261ce3497e2SRussell King SOF_TIMESTAMPING_RX_HARDWARE |
5262ce3497e2SRussell King SOF_TIMESTAMPING_RAW_HARDWARE;
5263f5015a59SRussell King info->tx_types = BIT(HWTSTAMP_TX_OFF) |
5264f5015a59SRussell King BIT(HWTSTAMP_TX_ON);
5265ce3497e2SRussell King info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
5266ce3497e2SRussell King BIT(HWTSTAMP_FILTER_ALL);
5267ce3497e2SRussell King
5268ce3497e2SRussell King return 0;
5269ce3497e2SRussell King }
5270ce3497e2SRussell King
mvpp2_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)5271db9d7d36SMaxime Chevallier static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5272db9d7d36SMaxime Chevallier {
5273db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5274db9d7d36SMaxime Chevallier
5275ce3497e2SRussell King switch (cmd) {
5276ce3497e2SRussell King case SIOCSHWTSTAMP:
5277ce3497e2SRussell King if (port->hwtstamp)
5278ce3497e2SRussell King return mvpp2_set_ts_config(port, ifr);
5279ce3497e2SRussell King break;
5280ce3497e2SRussell King
5281ce3497e2SRussell King case SIOCGHWTSTAMP:
5282ce3497e2SRussell King if (port->hwtstamp)
5283ce3497e2SRussell King return mvpp2_get_ts_config(port, ifr);
5284ce3497e2SRussell King break;
5285ce3497e2SRussell King }
5286ce3497e2SRussell King
5287db9d7d36SMaxime Chevallier if (!port->phylink)
5288db9d7d36SMaxime Chevallier return -ENOTSUPP;
5289db9d7d36SMaxime Chevallier
5290db9d7d36SMaxime Chevallier return phylink_mii_ioctl(port->phylink, ifr, cmd);
5291db9d7d36SMaxime Chevallier }
5292db9d7d36SMaxime Chevallier
mvpp2_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)5293db9d7d36SMaxime Chevallier static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
5294db9d7d36SMaxime Chevallier {
5295db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5296db9d7d36SMaxime Chevallier int ret;
5297db9d7d36SMaxime Chevallier
5298db9d7d36SMaxime Chevallier ret = mvpp2_prs_vid_entry_add(port, vid);
5299db9d7d36SMaxime Chevallier if (ret)
5300db9d7d36SMaxime Chevallier netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
5301db9d7d36SMaxime Chevallier MVPP2_PRS_VLAN_FILT_MAX - 1);
5302db9d7d36SMaxime Chevallier return ret;
5303db9d7d36SMaxime Chevallier }
5304db9d7d36SMaxime Chevallier
mvpp2_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)5305db9d7d36SMaxime Chevallier static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
5306db9d7d36SMaxime Chevallier {
5307db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5308db9d7d36SMaxime Chevallier
5309db9d7d36SMaxime Chevallier mvpp2_prs_vid_entry_remove(port, vid);
5310db9d7d36SMaxime Chevallier return 0;
5311db9d7d36SMaxime Chevallier }
5312db9d7d36SMaxime Chevallier
mvpp2_set_features(struct net_device * dev,netdev_features_t features)5313db9d7d36SMaxime Chevallier static int mvpp2_set_features(struct net_device *dev,
5314db9d7d36SMaxime Chevallier netdev_features_t features)
5315db9d7d36SMaxime Chevallier {
5316db9d7d36SMaxime Chevallier netdev_features_t changed = dev->features ^ features;
5317db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5318db9d7d36SMaxime Chevallier
5319db9d7d36SMaxime Chevallier if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5320db9d7d36SMaxime Chevallier if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
5321db9d7d36SMaxime Chevallier mvpp2_prs_vid_enable_filtering(port);
5322db9d7d36SMaxime Chevallier } else {
5323db9d7d36SMaxime Chevallier /* Invalidate all registered VID filters for this
5324db9d7d36SMaxime Chevallier * port
5325db9d7d36SMaxime Chevallier */
5326db9d7d36SMaxime Chevallier mvpp2_prs_vid_remove_all(port);
5327db9d7d36SMaxime Chevallier
5328db9d7d36SMaxime Chevallier mvpp2_prs_vid_disable_filtering(port);
5329db9d7d36SMaxime Chevallier }
5330db9d7d36SMaxime Chevallier }
5331db9d7d36SMaxime Chevallier
5332d33ec452SMaxime Chevallier if (changed & NETIF_F_RXHASH) {
5333d33ec452SMaxime Chevallier if (features & NETIF_F_RXHASH)
53346310f77dSMaxime Chevallier mvpp22_port_rss_enable(port);
5335d33ec452SMaxime Chevallier else
53366310f77dSMaxime Chevallier mvpp22_port_rss_disable(port);
5337d33ec452SMaxime Chevallier }
5338d33ec452SMaxime Chevallier
5339db9d7d36SMaxime Chevallier return 0;
5340db9d7d36SMaxime Chevallier }
5341db9d7d36SMaxime Chevallier
mvpp2_xdp_setup(struct mvpp2_port * port,struct netdev_bpf * bpf)534207dd0a7aSMatteo Croce static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
534307dd0a7aSMatteo Croce {
534407dd0a7aSMatteo Croce struct bpf_prog *prog = bpf->prog, *old_prog;
534507dd0a7aSMatteo Croce bool running = netif_running(port->dev);
534607dd0a7aSMatteo Croce bool reset = !prog != !port->xdp_prog;
534707dd0a7aSMatteo Croce
53487b1b62bcSMarek Behún if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
53497b1b62bcSMarek Behún NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
535007dd0a7aSMatteo Croce return -EOPNOTSUPP;
535107dd0a7aSMatteo Croce }
535207dd0a7aSMatteo Croce
535307dd0a7aSMatteo Croce if (!port->priv->percpu_pools) {
535407dd0a7aSMatteo Croce NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
535507dd0a7aSMatteo Croce return -EOPNOTSUPP;
535607dd0a7aSMatteo Croce }
535707dd0a7aSMatteo Croce
5358c2d6fe61SMatteo Croce if (port->ntxqs < num_possible_cpus() * 2) {
5359c2d6fe61SMatteo Croce NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
5360c2d6fe61SMatteo Croce return -EOPNOTSUPP;
536107dd0a7aSMatteo Croce }
536207dd0a7aSMatteo Croce
5363c2d6fe61SMatteo Croce /* device is up and bpf is added/removed, must setup the RX queues */
5364c2d6fe61SMatteo Croce if (running && reset)
5365c2d6fe61SMatteo Croce mvpp2_stop(port->dev);
5366c2d6fe61SMatteo Croce
536707dd0a7aSMatteo Croce old_prog = xchg(&port->xdp_prog, prog);
536807dd0a7aSMatteo Croce if (old_prog)
536907dd0a7aSMatteo Croce bpf_prog_put(old_prog);
537007dd0a7aSMatteo Croce
537107dd0a7aSMatteo Croce /* bpf is just replaced, RXQ and MTU are already setup */
537207dd0a7aSMatteo Croce if (!reset)
537307dd0a7aSMatteo Croce return 0;
537407dd0a7aSMatteo Croce
537507dd0a7aSMatteo Croce /* device was up, restore the link */
5376c2d6fe61SMatteo Croce if (running)
5377c2d6fe61SMatteo Croce mvpp2_open(port->dev);
537807dd0a7aSMatteo Croce
5379c2d6fe61SMatteo Croce /* Check Page Pool DMA Direction */
5380c2d6fe61SMatteo Croce mvpp2_check_pagepool_dma(port);
538107dd0a7aSMatteo Croce
538207dd0a7aSMatteo Croce return 0;
538307dd0a7aSMatteo Croce }
538407dd0a7aSMatteo Croce
mvpp2_xdp(struct net_device * dev,struct netdev_bpf * xdp)538507dd0a7aSMatteo Croce static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
538607dd0a7aSMatteo Croce {
538707dd0a7aSMatteo Croce struct mvpp2_port *port = netdev_priv(dev);
538807dd0a7aSMatteo Croce
538907dd0a7aSMatteo Croce switch (xdp->command) {
539007dd0a7aSMatteo Croce case XDP_SETUP_PROG:
539107dd0a7aSMatteo Croce return mvpp2_xdp_setup(port, xdp);
539207dd0a7aSMatteo Croce default:
539307dd0a7aSMatteo Croce return -EINVAL;
539407dd0a7aSMatteo Croce }
539507dd0a7aSMatteo Croce }
539607dd0a7aSMatteo Croce
5397db9d7d36SMaxime Chevallier /* Ethtool methods */
5398db9d7d36SMaxime Chevallier
mvpp2_ethtool_nway_reset(struct net_device * dev)5399db9d7d36SMaxime Chevallier static int mvpp2_ethtool_nway_reset(struct net_device *dev)
5400db9d7d36SMaxime Chevallier {
5401db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5402db9d7d36SMaxime Chevallier
5403db9d7d36SMaxime Chevallier if (!port->phylink)
5404db9d7d36SMaxime Chevallier return -ENOTSUPP;
5405db9d7d36SMaxime Chevallier
5406db9d7d36SMaxime Chevallier return phylink_ethtool_nway_reset(port->phylink);
5407db9d7d36SMaxime Chevallier }
5408db9d7d36SMaxime Chevallier
5409db9d7d36SMaxime Chevallier /* Set interrupt coalescing for ethtools */
5410f3ccfda1SYufeng Mo static int
mvpp2_ethtool_set_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5411f3ccfda1SYufeng Mo mvpp2_ethtool_set_coalesce(struct net_device *dev,
5412f3ccfda1SYufeng Mo struct ethtool_coalesce *c,
5413f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal,
5414f3ccfda1SYufeng Mo struct netlink_ext_ack *extack)
5415db9d7d36SMaxime Chevallier {
5416db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5417db9d7d36SMaxime Chevallier int queue;
5418db9d7d36SMaxime Chevallier
5419db9d7d36SMaxime Chevallier for (queue = 0; queue < port->nrxqs; queue++) {
5420db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5421db9d7d36SMaxime Chevallier
5422db9d7d36SMaxime Chevallier rxq->time_coal = c->rx_coalesce_usecs;
5423db9d7d36SMaxime Chevallier rxq->pkts_coal = c->rx_max_coalesced_frames;
5424db9d7d36SMaxime Chevallier mvpp2_rx_pkts_coal_set(port, rxq);
5425db9d7d36SMaxime Chevallier mvpp2_rx_time_coal_set(port, rxq);
5426db9d7d36SMaxime Chevallier }
5427db9d7d36SMaxime Chevallier
5428db9d7d36SMaxime Chevallier if (port->has_tx_irqs) {
5429db9d7d36SMaxime Chevallier port->tx_time_coal = c->tx_coalesce_usecs;
5430db9d7d36SMaxime Chevallier mvpp2_tx_time_coal_set(port);
5431db9d7d36SMaxime Chevallier }
5432db9d7d36SMaxime Chevallier
5433db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
5434db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq = port->txqs[queue];
5435db9d7d36SMaxime Chevallier
5436db9d7d36SMaxime Chevallier txq->done_pkts_coal = c->tx_max_coalesced_frames;
5437db9d7d36SMaxime Chevallier
5438db9d7d36SMaxime Chevallier if (port->has_tx_irqs)
5439db9d7d36SMaxime Chevallier mvpp2_tx_pkts_coal_set(port, txq);
5440db9d7d36SMaxime Chevallier }
5441db9d7d36SMaxime Chevallier
5442db9d7d36SMaxime Chevallier return 0;
5443db9d7d36SMaxime Chevallier }
5444db9d7d36SMaxime Chevallier
5445db9d7d36SMaxime Chevallier /* get coalescing for ethtools */
5446f3ccfda1SYufeng Mo static int
mvpp2_ethtool_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5447f3ccfda1SYufeng Mo mvpp2_ethtool_get_coalesce(struct net_device *dev,
5448f3ccfda1SYufeng Mo struct ethtool_coalesce *c,
5449f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal,
5450f3ccfda1SYufeng Mo struct netlink_ext_ack *extack)
5451db9d7d36SMaxime Chevallier {
5452db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5453db9d7d36SMaxime Chevallier
5454db9d7d36SMaxime Chevallier c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5455db9d7d36SMaxime Chevallier c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5456db9d7d36SMaxime Chevallier c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5457db9d7d36SMaxime Chevallier c->tx_coalesce_usecs = port->tx_time_coal;
5458db9d7d36SMaxime Chevallier return 0;
5459db9d7d36SMaxime Chevallier }
5460db9d7d36SMaxime Chevallier
mvpp2_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)5461db9d7d36SMaxime Chevallier static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5462db9d7d36SMaxime Chevallier struct ethtool_drvinfo *drvinfo)
5463db9d7d36SMaxime Chevallier {
5464f029c781SWolfram Sang strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5465db9d7d36SMaxime Chevallier sizeof(drvinfo->driver));
5466f029c781SWolfram Sang strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5467db9d7d36SMaxime Chevallier sizeof(drvinfo->version));
5468f029c781SWolfram Sang strscpy(drvinfo->bus_info, dev_name(&dev->dev),
5469db9d7d36SMaxime Chevallier sizeof(drvinfo->bus_info));
5470db9d7d36SMaxime Chevallier }
5471db9d7d36SMaxime Chevallier
547274624944SHao Chen static void
mvpp2_ethtool_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)547374624944SHao Chen mvpp2_ethtool_get_ringparam(struct net_device *dev,
547474624944SHao Chen struct ethtool_ringparam *ring,
547574624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring,
547674624944SHao Chen struct netlink_ext_ack *extack)
5477db9d7d36SMaxime Chevallier {
5478db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5479db9d7d36SMaxime Chevallier
5480db9d7d36SMaxime Chevallier ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5481db9d7d36SMaxime Chevallier ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5482db9d7d36SMaxime Chevallier ring->rx_pending = port->rx_ring_size;
5483db9d7d36SMaxime Chevallier ring->tx_pending = port->tx_ring_size;
5484db9d7d36SMaxime Chevallier }
5485db9d7d36SMaxime Chevallier
548674624944SHao Chen static int
mvpp2_ethtool_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)548774624944SHao Chen mvpp2_ethtool_set_ringparam(struct net_device *dev,
548874624944SHao Chen struct ethtool_ringparam *ring,
548974624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring,
549074624944SHao Chen struct netlink_ext_ack *extack)
5491db9d7d36SMaxime Chevallier {
5492db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5493db9d7d36SMaxime Chevallier u16 prev_rx_ring_size = port->rx_ring_size;
5494db9d7d36SMaxime Chevallier u16 prev_tx_ring_size = port->tx_ring_size;
5495db9d7d36SMaxime Chevallier int err;
5496db9d7d36SMaxime Chevallier
5497db9d7d36SMaxime Chevallier err = mvpp2_check_ringparam_valid(dev, ring);
5498db9d7d36SMaxime Chevallier if (err)
5499db9d7d36SMaxime Chevallier return err;
5500db9d7d36SMaxime Chevallier
5501db9d7d36SMaxime Chevallier if (!netif_running(dev)) {
5502db9d7d36SMaxime Chevallier port->rx_ring_size = ring->rx_pending;
5503db9d7d36SMaxime Chevallier port->tx_ring_size = ring->tx_pending;
5504db9d7d36SMaxime Chevallier return 0;
5505db9d7d36SMaxime Chevallier }
5506db9d7d36SMaxime Chevallier
5507db9d7d36SMaxime Chevallier /* The interface is running, so we have to force a
5508db9d7d36SMaxime Chevallier * reallocation of the queues
5509db9d7d36SMaxime Chevallier */
5510db9d7d36SMaxime Chevallier mvpp2_stop_dev(port);
5511db9d7d36SMaxime Chevallier mvpp2_cleanup_rxqs(port);
5512db9d7d36SMaxime Chevallier mvpp2_cleanup_txqs(port);
5513db9d7d36SMaxime Chevallier
5514db9d7d36SMaxime Chevallier port->rx_ring_size = ring->rx_pending;
5515db9d7d36SMaxime Chevallier port->tx_ring_size = ring->tx_pending;
5516db9d7d36SMaxime Chevallier
5517db9d7d36SMaxime Chevallier err = mvpp2_setup_rxqs(port);
5518db9d7d36SMaxime Chevallier if (err) {
5519db9d7d36SMaxime Chevallier /* Reallocate Rx queues with the original ring size */
5520db9d7d36SMaxime Chevallier port->rx_ring_size = prev_rx_ring_size;
5521db9d7d36SMaxime Chevallier ring->rx_pending = prev_rx_ring_size;
5522db9d7d36SMaxime Chevallier err = mvpp2_setup_rxqs(port);
5523db9d7d36SMaxime Chevallier if (err)
5524db9d7d36SMaxime Chevallier goto err_out;
5525db9d7d36SMaxime Chevallier }
5526db9d7d36SMaxime Chevallier err = mvpp2_setup_txqs(port);
5527db9d7d36SMaxime Chevallier if (err) {
5528db9d7d36SMaxime Chevallier /* Reallocate Tx queues with the original ring size */
5529db9d7d36SMaxime Chevallier port->tx_ring_size = prev_tx_ring_size;
5530db9d7d36SMaxime Chevallier ring->tx_pending = prev_tx_ring_size;
5531db9d7d36SMaxime Chevallier err = mvpp2_setup_txqs(port);
5532db9d7d36SMaxime Chevallier if (err)
5533db9d7d36SMaxime Chevallier goto err_clean_rxqs;
5534db9d7d36SMaxime Chevallier }
5535db9d7d36SMaxime Chevallier
5536db9d7d36SMaxime Chevallier mvpp2_start_dev(port);
5537db9d7d36SMaxime Chevallier mvpp2_egress_enable(port);
5538db9d7d36SMaxime Chevallier mvpp2_ingress_enable(port);
5539db9d7d36SMaxime Chevallier
5540db9d7d36SMaxime Chevallier return 0;
5541db9d7d36SMaxime Chevallier
5542db9d7d36SMaxime Chevallier err_clean_rxqs:
5543db9d7d36SMaxime Chevallier mvpp2_cleanup_rxqs(port);
5544db9d7d36SMaxime Chevallier err_out:
5545db9d7d36SMaxime Chevallier netdev_err(dev, "failed to change ring parameters");
5546db9d7d36SMaxime Chevallier return err;
5547db9d7d36SMaxime Chevallier }
5548db9d7d36SMaxime Chevallier
mvpp2_ethtool_get_pause_param(struct net_device * dev,struct ethtool_pauseparam * pause)5549db9d7d36SMaxime Chevallier static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5550db9d7d36SMaxime Chevallier struct ethtool_pauseparam *pause)
5551db9d7d36SMaxime Chevallier {
5552db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5553db9d7d36SMaxime Chevallier
5554db9d7d36SMaxime Chevallier if (!port->phylink)
5555db9d7d36SMaxime Chevallier return;
5556db9d7d36SMaxime Chevallier
5557db9d7d36SMaxime Chevallier phylink_ethtool_get_pauseparam(port->phylink, pause);
5558db9d7d36SMaxime Chevallier }
5559db9d7d36SMaxime Chevallier
mvpp2_ethtool_set_pause_param(struct net_device * dev,struct ethtool_pauseparam * pause)5560db9d7d36SMaxime Chevallier static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5561db9d7d36SMaxime Chevallier struct ethtool_pauseparam *pause)
5562db9d7d36SMaxime Chevallier {
5563db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5564db9d7d36SMaxime Chevallier
5565db9d7d36SMaxime Chevallier if (!port->phylink)
5566db9d7d36SMaxime Chevallier return -ENOTSUPP;
5567db9d7d36SMaxime Chevallier
5568db9d7d36SMaxime Chevallier return phylink_ethtool_set_pauseparam(port->phylink, pause);
5569db9d7d36SMaxime Chevallier }
5570db9d7d36SMaxime Chevallier
mvpp2_ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5571db9d7d36SMaxime Chevallier static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5572db9d7d36SMaxime Chevallier struct ethtool_link_ksettings *cmd)
5573db9d7d36SMaxime Chevallier {
5574db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5575db9d7d36SMaxime Chevallier
5576db9d7d36SMaxime Chevallier if (!port->phylink)
5577db9d7d36SMaxime Chevallier return -ENOTSUPP;
5578db9d7d36SMaxime Chevallier
5579db9d7d36SMaxime Chevallier return phylink_ethtool_ksettings_get(port->phylink, cmd);
5580db9d7d36SMaxime Chevallier }
5581db9d7d36SMaxime Chevallier
mvpp2_ethtool_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5582db9d7d36SMaxime Chevallier static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5583db9d7d36SMaxime Chevallier const struct ethtool_link_ksettings *cmd)
5584db9d7d36SMaxime Chevallier {
5585db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5586db9d7d36SMaxime Chevallier
5587db9d7d36SMaxime Chevallier if (!port->phylink)
5588db9d7d36SMaxime Chevallier return -ENOTSUPP;
5589db9d7d36SMaxime Chevallier
5590db9d7d36SMaxime Chevallier return phylink_ethtool_ksettings_set(port->phylink, cmd);
5591db9d7d36SMaxime Chevallier }
5592db9d7d36SMaxime Chevallier
mvpp2_ethtool_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules)55938179642bSAntoine Tenart static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
55948179642bSAntoine Tenart struct ethtool_rxnfc *info, u32 *rules)
55958179642bSAntoine Tenart {
55968179642bSAntoine Tenart struct mvpp2_port *port = netdev_priv(dev);
559790b509b3SMaxime Chevallier int ret = 0, i, loc = 0;
55988179642bSAntoine Tenart
55990a8a8000SStefan Chulski if (!mvpp22_rss_is_supported(port))
56008179642bSAntoine Tenart return -EOPNOTSUPP;
56018179642bSAntoine Tenart
56028179642bSAntoine Tenart switch (info->cmd) {
5603436d4fdbSMaxime Chevallier case ETHTOOL_GRXFH:
5604436d4fdbSMaxime Chevallier ret = mvpp2_ethtool_rxfh_get(port, info);
5605436d4fdbSMaxime Chevallier break;
56068179642bSAntoine Tenart case ETHTOOL_GRXRINGS:
56078179642bSAntoine Tenart info->data = port->nrxqs;
56088179642bSAntoine Tenart break;
560990b509b3SMaxime Chevallier case ETHTOOL_GRXCLSRLCNT:
561090b509b3SMaxime Chevallier info->rule_cnt = port->n_rfs_rules;
561190b509b3SMaxime Chevallier break;
561290b509b3SMaxime Chevallier case ETHTOOL_GRXCLSRULE:
561390b509b3SMaxime Chevallier ret = mvpp2_ethtool_cls_rule_get(port, info);
561490b509b3SMaxime Chevallier break;
561590b509b3SMaxime Chevallier case ETHTOOL_GRXCLSRLALL:
5616ae8e1d5eSMaxime Chevallier for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
561751fe0a47SHangyu Hua if (loc == info->rule_cnt) {
561851fe0a47SHangyu Hua ret = -EMSGSIZE;
561951fe0a47SHangyu Hua break;
562051fe0a47SHangyu Hua }
562151fe0a47SHangyu Hua
562290b509b3SMaxime Chevallier if (port->rfs_rules[i])
562390b509b3SMaxime Chevallier rules[loc++] = i;
562490b509b3SMaxime Chevallier }
562590b509b3SMaxime Chevallier break;
56268179642bSAntoine Tenart default:
56278179642bSAntoine Tenart return -ENOTSUPP;
56288179642bSAntoine Tenart }
56298179642bSAntoine Tenart
5630436d4fdbSMaxime Chevallier return ret;
5631436d4fdbSMaxime Chevallier }
5632436d4fdbSMaxime Chevallier
mvpp2_ethtool_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info)5633436d4fdbSMaxime Chevallier static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5634436d4fdbSMaxime Chevallier struct ethtool_rxnfc *info)
5635436d4fdbSMaxime Chevallier {
5636436d4fdbSMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5637436d4fdbSMaxime Chevallier int ret = 0;
5638436d4fdbSMaxime Chevallier
56390a8a8000SStefan Chulski if (!mvpp22_rss_is_supported(port))
5640436d4fdbSMaxime Chevallier return -EOPNOTSUPP;
5641436d4fdbSMaxime Chevallier
5642436d4fdbSMaxime Chevallier switch (info->cmd) {
5643436d4fdbSMaxime Chevallier case ETHTOOL_SRXFH:
5644436d4fdbSMaxime Chevallier ret = mvpp2_ethtool_rxfh_set(port, info);
5645436d4fdbSMaxime Chevallier break;
564690b509b3SMaxime Chevallier case ETHTOOL_SRXCLSRLINS:
564790b509b3SMaxime Chevallier ret = mvpp2_ethtool_cls_rule_ins(port, info);
564890b509b3SMaxime Chevallier break;
564990b509b3SMaxime Chevallier case ETHTOOL_SRXCLSRLDEL:
565090b509b3SMaxime Chevallier ret = mvpp2_ethtool_cls_rule_del(port, info);
565190b509b3SMaxime Chevallier break;
5652436d4fdbSMaxime Chevallier default:
5653436d4fdbSMaxime Chevallier return -EOPNOTSUPP;
5654436d4fdbSMaxime Chevallier }
5655436d4fdbSMaxime Chevallier return ret;
56568179642bSAntoine Tenart }
56578179642bSAntoine Tenart
mvpp2_ethtool_get_rxfh_indir_size(struct net_device * dev)56588179642bSAntoine Tenart static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
56598179642bSAntoine Tenart {
56600a8a8000SStefan Chulski struct mvpp2_port *port = netdev_priv(dev);
56610a8a8000SStefan Chulski
56620a8a8000SStefan Chulski return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
56638179642bSAntoine Tenart }
56648179642bSAntoine Tenart
mvpp2_ethtool_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)56658179642bSAntoine Tenart static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
56668179642bSAntoine Tenart u8 *hfunc)
56678179642bSAntoine Tenart {
56688179642bSAntoine Tenart struct mvpp2_port *port = netdev_priv(dev);
5669895586d5SMaxime Chevallier int ret = 0;
56708179642bSAntoine Tenart
56710a8a8000SStefan Chulski if (!mvpp22_rss_is_supported(port))
56728179642bSAntoine Tenart return -EOPNOTSUPP;
56738179642bSAntoine Tenart
56748179642bSAntoine Tenart if (indir)
5675895586d5SMaxime Chevallier ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
56768179642bSAntoine Tenart
56778179642bSAntoine Tenart if (hfunc)
56788179642bSAntoine Tenart *hfunc = ETH_RSS_HASH_CRC32;
56798179642bSAntoine Tenart
5680895586d5SMaxime Chevallier return ret;
56818179642bSAntoine Tenart }
56828179642bSAntoine Tenart
mvpp2_ethtool_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)56838179642bSAntoine Tenart static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
56848179642bSAntoine Tenart const u8 *key, const u8 hfunc)
56858179642bSAntoine Tenart {
56868179642bSAntoine Tenart struct mvpp2_port *port = netdev_priv(dev);
5687895586d5SMaxime Chevallier int ret = 0;
56888179642bSAntoine Tenart
56890a8a8000SStefan Chulski if (!mvpp22_rss_is_supported(port))
56908179642bSAntoine Tenart return -EOPNOTSUPP;
56918179642bSAntoine Tenart
56928179642bSAntoine Tenart if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
56938179642bSAntoine Tenart return -EOPNOTSUPP;
56948179642bSAntoine Tenart
56958179642bSAntoine Tenart if (key)
56968179642bSAntoine Tenart return -EOPNOTSUPP;
56978179642bSAntoine Tenart
5698895586d5SMaxime Chevallier if (indir)
5699895586d5SMaxime Chevallier ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
5700895586d5SMaxime Chevallier
5701895586d5SMaxime Chevallier return ret;
57028179642bSAntoine Tenart }
57038179642bSAntoine Tenart
mvpp2_ethtool_get_rxfh_context(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc,u32 rss_context)5704895586d5SMaxime Chevallier static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
5705895586d5SMaxime Chevallier u8 *key, u8 *hfunc, u32 rss_context)
5706895586d5SMaxime Chevallier {
5707895586d5SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5708895586d5SMaxime Chevallier int ret = 0;
5709895586d5SMaxime Chevallier
57100a8a8000SStefan Chulski if (!mvpp22_rss_is_supported(port))
5711895586d5SMaxime Chevallier return -EOPNOTSUPP;
571239bd16dfSDan Carpenter if (rss_context >= MVPP22_N_RSS_TABLES)
571339bd16dfSDan Carpenter return -EINVAL;
5714895586d5SMaxime Chevallier
5715895586d5SMaxime Chevallier if (hfunc)
5716895586d5SMaxime Chevallier *hfunc = ETH_RSS_HASH_CRC32;
5717895586d5SMaxime Chevallier
5718895586d5SMaxime Chevallier if (indir)
5719895586d5SMaxime Chevallier ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
5720895586d5SMaxime Chevallier
5721895586d5SMaxime Chevallier return ret;
57228179642bSAntoine Tenart }
57238179642bSAntoine Tenart
mvpp2_ethtool_set_rxfh_context(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc,u32 * rss_context,bool delete)5724895586d5SMaxime Chevallier static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
5725895586d5SMaxime Chevallier const u32 *indir, const u8 *key,
5726895586d5SMaxime Chevallier const u8 hfunc, u32 *rss_context,
5727895586d5SMaxime Chevallier bool delete)
5728895586d5SMaxime Chevallier {
5729895586d5SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
5730895586d5SMaxime Chevallier int ret;
5731895586d5SMaxime Chevallier
57320a8a8000SStefan Chulski if (!mvpp22_rss_is_supported(port))
5733895586d5SMaxime Chevallier return -EOPNOTSUPP;
5734895586d5SMaxime Chevallier
5735895586d5SMaxime Chevallier if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5736895586d5SMaxime Chevallier return -EOPNOTSUPP;
5737895586d5SMaxime Chevallier
5738895586d5SMaxime Chevallier if (key)
5739895586d5SMaxime Chevallier return -EOPNOTSUPP;
5740895586d5SMaxime Chevallier
5741895586d5SMaxime Chevallier if (delete)
5742895586d5SMaxime Chevallier return mvpp22_port_rss_ctx_delete(port, *rss_context);
5743895586d5SMaxime Chevallier
5744895586d5SMaxime Chevallier if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5745895586d5SMaxime Chevallier ret = mvpp22_port_rss_ctx_create(port, rss_context);
5746895586d5SMaxime Chevallier if (ret)
5747895586d5SMaxime Chevallier return ret;
5748895586d5SMaxime Chevallier }
5749895586d5SMaxime Chevallier
5750895586d5SMaxime Chevallier return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5751895586d5SMaxime Chevallier }
5752db9d7d36SMaxime Chevallier /* Device ops */
5753db9d7d36SMaxime Chevallier
5754db9d7d36SMaxime Chevallier static const struct net_device_ops mvpp2_netdev_ops = {
5755db9d7d36SMaxime Chevallier .ndo_open = mvpp2_open,
5756db9d7d36SMaxime Chevallier .ndo_stop = mvpp2_stop,
5757db9d7d36SMaxime Chevallier .ndo_start_xmit = mvpp2_tx,
5758db9d7d36SMaxime Chevallier .ndo_set_rx_mode = mvpp2_set_rx_mode,
5759db9d7d36SMaxime Chevallier .ndo_set_mac_address = mvpp2_set_mac_address,
5760db9d7d36SMaxime Chevallier .ndo_change_mtu = mvpp2_change_mtu,
5761db9d7d36SMaxime Chevallier .ndo_get_stats64 = mvpp2_get_stats64,
5762a7605370SArnd Bergmann .ndo_eth_ioctl = mvpp2_ioctl,
5763db9d7d36SMaxime Chevallier .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5764db9d7d36SMaxime Chevallier .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5765db9d7d36SMaxime Chevallier .ndo_set_features = mvpp2_set_features,
576607dd0a7aSMatteo Croce .ndo_bpf = mvpp2_xdp,
5767c2d6fe61SMatteo Croce .ndo_xdp_xmit = mvpp2_xdp_xmit,
5768db9d7d36SMaxime Chevallier };
5769db9d7d36SMaxime Chevallier
5770db9d7d36SMaxime Chevallier static const struct ethtool_ops mvpp2_eth_tool_ops = {
5771078db9a3SJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5772078db9a3SJakub Kicinski ETHTOOL_COALESCE_MAX_FRAMES,
5773db9d7d36SMaxime Chevallier .nway_reset = mvpp2_ethtool_nway_reset,
5774db9d7d36SMaxime Chevallier .get_link = ethtool_op_get_link,
5775ce3497e2SRussell King .get_ts_info = mvpp2_ethtool_get_ts_info,
5776db9d7d36SMaxime Chevallier .set_coalesce = mvpp2_ethtool_set_coalesce,
5777db9d7d36SMaxime Chevallier .get_coalesce = mvpp2_ethtool_get_coalesce,
5778db9d7d36SMaxime Chevallier .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5779db9d7d36SMaxime Chevallier .get_ringparam = mvpp2_ethtool_get_ringparam,
5780db9d7d36SMaxime Chevallier .set_ringparam = mvpp2_ethtool_set_ringparam,
5781db9d7d36SMaxime Chevallier .get_strings = mvpp2_ethtool_get_strings,
5782db9d7d36SMaxime Chevallier .get_ethtool_stats = mvpp2_ethtool_get_stats,
5783db9d7d36SMaxime Chevallier .get_sset_count = mvpp2_ethtool_get_sset_count,
5784db9d7d36SMaxime Chevallier .get_pauseparam = mvpp2_ethtool_get_pause_param,
5785db9d7d36SMaxime Chevallier .set_pauseparam = mvpp2_ethtool_set_pause_param,
5786db9d7d36SMaxime Chevallier .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5787db9d7d36SMaxime Chevallier .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
57888179642bSAntoine Tenart .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5789436d4fdbSMaxime Chevallier .set_rxnfc = mvpp2_ethtool_set_rxnfc,
57908179642bSAntoine Tenart .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
57918179642bSAntoine Tenart .get_rxfh = mvpp2_ethtool_get_rxfh,
57928179642bSAntoine Tenart .set_rxfh = mvpp2_ethtool_set_rxfh,
5793895586d5SMaxime Chevallier .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
5794895586d5SMaxime Chevallier .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
5795db9d7d36SMaxime Chevallier };
5796db9d7d36SMaxime Chevallier
5797db9d7d36SMaxime Chevallier /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5798db9d7d36SMaxime Chevallier * had a single IRQ defined per-port.
5799db9d7d36SMaxime Chevallier */
mvpp2_simple_queue_vectors_init(struct mvpp2_port * port,struct device_node * port_node)5800db9d7d36SMaxime Chevallier static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5801db9d7d36SMaxime Chevallier struct device_node *port_node)
5802db9d7d36SMaxime Chevallier {
5803db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *v = &port->qvecs[0];
5804db9d7d36SMaxime Chevallier
5805db9d7d36SMaxime Chevallier v->first_rxq = 0;
5806db9d7d36SMaxime Chevallier v->nrxqs = port->nrxqs;
5807db9d7d36SMaxime Chevallier v->type = MVPP2_QUEUE_VECTOR_SHARED;
5808db9d7d36SMaxime Chevallier v->sw_thread_id = 0;
5809db9d7d36SMaxime Chevallier v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5810db9d7d36SMaxime Chevallier v->port = port;
5811db9d7d36SMaxime Chevallier v->irq = irq_of_parse_and_map(port_node, 0);
5812db9d7d36SMaxime Chevallier if (v->irq <= 0)
5813db9d7d36SMaxime Chevallier return -EINVAL;
5814b48b89f9SJakub Kicinski netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5815db9d7d36SMaxime Chevallier
5816db9d7d36SMaxime Chevallier port->nqvecs = 1;
5817db9d7d36SMaxime Chevallier
5818db9d7d36SMaxime Chevallier return 0;
5819db9d7d36SMaxime Chevallier }
5820db9d7d36SMaxime Chevallier
mvpp2_multi_queue_vectors_init(struct mvpp2_port * port,struct device_node * port_node)5821db9d7d36SMaxime Chevallier static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5822db9d7d36SMaxime Chevallier struct device_node *port_node)
5823db9d7d36SMaxime Chevallier {
5824e531f767SAntoine Tenart struct mvpp2 *priv = port->priv;
5825db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *v;
5826db9d7d36SMaxime Chevallier int i, ret;
5827db9d7d36SMaxime Chevallier
5828e531f767SAntoine Tenart switch (queue_mode) {
5829e531f767SAntoine Tenart case MVPP2_QDIST_SINGLE_MODE:
5830e531f767SAntoine Tenart port->nqvecs = priv->nthreads + 1;
5831e531f767SAntoine Tenart break;
5832e531f767SAntoine Tenart case MVPP2_QDIST_MULTI_MODE:
5833e531f767SAntoine Tenart port->nqvecs = priv->nthreads;
5834e531f767SAntoine Tenart break;
5835e531f767SAntoine Tenart }
5836db9d7d36SMaxime Chevallier
5837db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++) {
5838db9d7d36SMaxime Chevallier char irqname[16];
5839db9d7d36SMaxime Chevallier
5840db9d7d36SMaxime Chevallier v = port->qvecs + i;
5841db9d7d36SMaxime Chevallier
5842db9d7d36SMaxime Chevallier v->port = port;
5843db9d7d36SMaxime Chevallier v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5844db9d7d36SMaxime Chevallier v->sw_thread_id = i;
5845db9d7d36SMaxime Chevallier v->sw_thread_mask = BIT(i);
5846db9d7d36SMaxime Chevallier
5847a9aac385SAntoine Tenart if (port->flags & MVPP2_F_DT_COMPAT)
5848db9d7d36SMaxime Chevallier snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5849a9aac385SAntoine Tenart else
5850a9aac385SAntoine Tenart snprintf(irqname, sizeof(irqname), "hif%d", i);
5851db9d7d36SMaxime Chevallier
5852db9d7d36SMaxime Chevallier if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
58533f136849SAntoine Tenart v->first_rxq = i;
58543f136849SAntoine Tenart v->nrxqs = 1;
5855db9d7d36SMaxime Chevallier } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5856db9d7d36SMaxime Chevallier i == (port->nqvecs - 1)) {
5857db9d7d36SMaxime Chevallier v->first_rxq = 0;
5858db9d7d36SMaxime Chevallier v->nrxqs = port->nrxqs;
5859db9d7d36SMaxime Chevallier v->type = MVPP2_QUEUE_VECTOR_SHARED;
5860a9aac385SAntoine Tenart
5861a9aac385SAntoine Tenart if (port->flags & MVPP2_F_DT_COMPAT)
5862db9d7d36SMaxime Chevallier strncpy(irqname, "rx-shared", sizeof(irqname));
5863db9d7d36SMaxime Chevallier }
5864db9d7d36SMaxime Chevallier
5865db9d7d36SMaxime Chevallier if (port_node)
5866db9d7d36SMaxime Chevallier v->irq = of_irq_get_byname(port_node, irqname);
5867db9d7d36SMaxime Chevallier else
5868db9d7d36SMaxime Chevallier v->irq = fwnode_irq_get(port->fwnode, i);
5869db9d7d36SMaxime Chevallier if (v->irq <= 0) {
5870db9d7d36SMaxime Chevallier ret = -EINVAL;
5871db9d7d36SMaxime Chevallier goto err;
5872db9d7d36SMaxime Chevallier }
5873db9d7d36SMaxime Chevallier
5874b48b89f9SJakub Kicinski netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5875db9d7d36SMaxime Chevallier }
5876db9d7d36SMaxime Chevallier
5877db9d7d36SMaxime Chevallier return 0;
5878db9d7d36SMaxime Chevallier
5879db9d7d36SMaxime Chevallier err:
5880db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++)
5881db9d7d36SMaxime Chevallier irq_dispose_mapping(port->qvecs[i].irq);
5882db9d7d36SMaxime Chevallier return ret;
5883db9d7d36SMaxime Chevallier }
5884db9d7d36SMaxime Chevallier
mvpp2_queue_vectors_init(struct mvpp2_port * port,struct device_node * port_node)5885db9d7d36SMaxime Chevallier static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5886db9d7d36SMaxime Chevallier struct device_node *port_node)
5887db9d7d36SMaxime Chevallier {
5888db9d7d36SMaxime Chevallier if (port->has_tx_irqs)
5889db9d7d36SMaxime Chevallier return mvpp2_multi_queue_vectors_init(port, port_node);
5890db9d7d36SMaxime Chevallier else
5891db9d7d36SMaxime Chevallier return mvpp2_simple_queue_vectors_init(port, port_node);
5892db9d7d36SMaxime Chevallier }
5893db9d7d36SMaxime Chevallier
mvpp2_queue_vectors_deinit(struct mvpp2_port * port)5894db9d7d36SMaxime Chevallier static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5895db9d7d36SMaxime Chevallier {
5896db9d7d36SMaxime Chevallier int i;
5897db9d7d36SMaxime Chevallier
5898db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++)
5899db9d7d36SMaxime Chevallier irq_dispose_mapping(port->qvecs[i].irq);
5900db9d7d36SMaxime Chevallier }
5901db9d7d36SMaxime Chevallier
5902db9d7d36SMaxime Chevallier /* Configure Rx queue group interrupt for this port */
mvpp2_rx_irqs_setup(struct mvpp2_port * port)5903db9d7d36SMaxime Chevallier static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5904db9d7d36SMaxime Chevallier {
5905db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
5906db9d7d36SMaxime Chevallier u32 val;
5907db9d7d36SMaxime Chevallier int i;
5908db9d7d36SMaxime Chevallier
5909db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21) {
5910db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5911db9d7d36SMaxime Chevallier port->nrxqs);
5912db9d7d36SMaxime Chevallier return;
5913db9d7d36SMaxime Chevallier }
5914db9d7d36SMaxime Chevallier
59156af27a1dSStefan Chulski /* Handle the more complicated PPv2.2 and PPv2.3 case */
5916db9d7d36SMaxime Chevallier for (i = 0; i < port->nqvecs; i++) {
5917db9d7d36SMaxime Chevallier struct mvpp2_queue_vector *qv = port->qvecs + i;
5918db9d7d36SMaxime Chevallier
5919db9d7d36SMaxime Chevallier if (!qv->nrxqs)
5920db9d7d36SMaxime Chevallier continue;
5921db9d7d36SMaxime Chevallier
5922db9d7d36SMaxime Chevallier val = qv->sw_thread_id;
5923db9d7d36SMaxime Chevallier val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5924db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5925db9d7d36SMaxime Chevallier
5926db9d7d36SMaxime Chevallier val = qv->first_rxq;
5927db9d7d36SMaxime Chevallier val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5928db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5929db9d7d36SMaxime Chevallier }
5930db9d7d36SMaxime Chevallier }
5931db9d7d36SMaxime Chevallier
5932db9d7d36SMaxime Chevallier /* Initialize port HW */
mvpp2_port_init(struct mvpp2_port * port)5933db9d7d36SMaxime Chevallier static int mvpp2_port_init(struct mvpp2_port *port)
5934db9d7d36SMaxime Chevallier {
5935db9d7d36SMaxime Chevallier struct device *dev = port->dev->dev.parent;
5936db9d7d36SMaxime Chevallier struct mvpp2 *priv = port->priv;
5937db9d7d36SMaxime Chevallier struct mvpp2_txq_pcpu *txq_pcpu;
5938074c74dfSAntoine Tenart unsigned int thread;
593987508224SStefan Chulski int queue, err, val;
5940db9d7d36SMaxime Chevallier
5941db9d7d36SMaxime Chevallier /* Checks for hardware constraints */
5942db9d7d36SMaxime Chevallier if (port->first_rxq + port->nrxqs >
5943db9d7d36SMaxime Chevallier MVPP2_MAX_PORTS * priv->max_port_rxqs)
5944db9d7d36SMaxime Chevallier return -EINVAL;
5945db9d7d36SMaxime Chevallier
59463f136849SAntoine Tenart if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5947db9d7d36SMaxime Chevallier return -EINVAL;
5948db9d7d36SMaxime Chevallier
5949db9d7d36SMaxime Chevallier /* Disable port */
5950db9d7d36SMaxime Chevallier mvpp2_egress_disable(port);
5951db9d7d36SMaxime Chevallier mvpp2_port_disable(port);
5952db9d7d36SMaxime Chevallier
595387508224SStefan Chulski if (mvpp2_is_xlg(port->phy_interface)) {
595487508224SStefan Chulski val = readl(port->base + MVPP22_XLG_CTRL0_REG);
595587508224SStefan Chulski val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
595687508224SStefan Chulski val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
595787508224SStefan Chulski writel(val, port->base + MVPP22_XLG_CTRL0_REG);
595887508224SStefan Chulski } else {
595987508224SStefan Chulski val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
596087508224SStefan Chulski val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
596187508224SStefan Chulski val |= MVPP2_GMAC_FORCE_LINK_DOWN;
596287508224SStefan Chulski writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
596387508224SStefan Chulski }
596487508224SStefan Chulski
5965db9d7d36SMaxime Chevallier port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5966db9d7d36SMaxime Chevallier
5967db9d7d36SMaxime Chevallier port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5968db9d7d36SMaxime Chevallier GFP_KERNEL);
5969db9d7d36SMaxime Chevallier if (!port->txqs)
5970db9d7d36SMaxime Chevallier return -ENOMEM;
5971db9d7d36SMaxime Chevallier
5972db9d7d36SMaxime Chevallier /* Associate physical Tx queues to this port and initialize.
5973db9d7d36SMaxime Chevallier * The mapping is predefined.
5974db9d7d36SMaxime Chevallier */
5975db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
5976db9d7d36SMaxime Chevallier int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5977db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *txq;
5978db9d7d36SMaxime Chevallier
5979db9d7d36SMaxime Chevallier txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5980db9d7d36SMaxime Chevallier if (!txq) {
5981db9d7d36SMaxime Chevallier err = -ENOMEM;
5982db9d7d36SMaxime Chevallier goto err_free_percpu;
5983db9d7d36SMaxime Chevallier }
5984db9d7d36SMaxime Chevallier
5985db9d7d36SMaxime Chevallier txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5986db9d7d36SMaxime Chevallier if (!txq->pcpu) {
5987db9d7d36SMaxime Chevallier err = -ENOMEM;
5988db9d7d36SMaxime Chevallier goto err_free_percpu;
5989db9d7d36SMaxime Chevallier }
5990db9d7d36SMaxime Chevallier
5991db9d7d36SMaxime Chevallier txq->id = queue_phy_id;
5992db9d7d36SMaxime Chevallier txq->log_id = queue;
5993db9d7d36SMaxime Chevallier txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5994e531f767SAntoine Tenart for (thread = 0; thread < priv->nthreads; thread++) {
5995074c74dfSAntoine Tenart txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5996074c74dfSAntoine Tenart txq_pcpu->thread = thread;
5997db9d7d36SMaxime Chevallier }
5998db9d7d36SMaxime Chevallier
5999db9d7d36SMaxime Chevallier port->txqs[queue] = txq;
6000db9d7d36SMaxime Chevallier }
6001db9d7d36SMaxime Chevallier
6002db9d7d36SMaxime Chevallier port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
6003db9d7d36SMaxime Chevallier GFP_KERNEL);
6004db9d7d36SMaxime Chevallier if (!port->rxqs) {
6005db9d7d36SMaxime Chevallier err = -ENOMEM;
6006db9d7d36SMaxime Chevallier goto err_free_percpu;
6007db9d7d36SMaxime Chevallier }
6008db9d7d36SMaxime Chevallier
6009db9d7d36SMaxime Chevallier /* Allocate and initialize Rx queue for this port */
6010db9d7d36SMaxime Chevallier for (queue = 0; queue < port->nrxqs; queue++) {
6011db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq;
6012db9d7d36SMaxime Chevallier
6013db9d7d36SMaxime Chevallier /* Map physical Rx queue to port's logical Rx queue */
6014db9d7d36SMaxime Chevallier rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6015db9d7d36SMaxime Chevallier if (!rxq) {
6016db9d7d36SMaxime Chevallier err = -ENOMEM;
6017db9d7d36SMaxime Chevallier goto err_free_percpu;
6018db9d7d36SMaxime Chevallier }
6019db9d7d36SMaxime Chevallier /* Map this Rx queue to a physical queue */
6020db9d7d36SMaxime Chevallier rxq->id = port->first_rxq + queue;
6021db9d7d36SMaxime Chevallier rxq->port = port->id;
6022db9d7d36SMaxime Chevallier rxq->logic_rxq = queue;
6023db9d7d36SMaxime Chevallier
6024db9d7d36SMaxime Chevallier port->rxqs[queue] = rxq;
6025db9d7d36SMaxime Chevallier }
6026db9d7d36SMaxime Chevallier
6027db9d7d36SMaxime Chevallier mvpp2_rx_irqs_setup(port);
6028db9d7d36SMaxime Chevallier
6029db9d7d36SMaxime Chevallier /* Create Rx descriptor rings */
6030db9d7d36SMaxime Chevallier for (queue = 0; queue < port->nrxqs; queue++) {
6031db9d7d36SMaxime Chevallier struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6032db9d7d36SMaxime Chevallier
6033db9d7d36SMaxime Chevallier rxq->size = port->rx_ring_size;
6034db9d7d36SMaxime Chevallier rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6035db9d7d36SMaxime Chevallier rxq->time_coal = MVPP2_RX_COAL_USEC;
6036db9d7d36SMaxime Chevallier }
6037db9d7d36SMaxime Chevallier
6038db9d7d36SMaxime Chevallier mvpp2_ingress_disable(port);
6039db9d7d36SMaxime Chevallier
6040db9d7d36SMaxime Chevallier /* Port default configuration */
6041db9d7d36SMaxime Chevallier mvpp2_defaults_set(port);
6042db9d7d36SMaxime Chevallier
6043db9d7d36SMaxime Chevallier /* Port's classifier configuration */
6044db9d7d36SMaxime Chevallier mvpp2_cls_oversize_rxq_set(port);
6045db9d7d36SMaxime Chevallier mvpp2_cls_port_config(port);
6046db9d7d36SMaxime Chevallier
60470a8a8000SStefan Chulski if (mvpp22_rss_is_supported(port))
60486310f77dSMaxime Chevallier mvpp22_port_rss_init(port);
6049e6e21c02SMaxime Chevallier
6050db9d7d36SMaxime Chevallier /* Provide an initial Rx packet size */
6051db9d7d36SMaxime Chevallier port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6052db9d7d36SMaxime Chevallier
6053db9d7d36SMaxime Chevallier /* Initialize pools for swf */
6054db9d7d36SMaxime Chevallier err = mvpp2_swf_bm_pool_init(port);
6055db9d7d36SMaxime Chevallier if (err)
6056db9d7d36SMaxime Chevallier goto err_free_percpu;
6057db9d7d36SMaxime Chevallier
60589bea6897SMaxime Chevallier /* Clear all port stats */
60599bea6897SMaxime Chevallier mvpp2_read_stats(port);
60609bea6897SMaxime Chevallier memset(port->ethtool_stats, 0,
60619bea6897SMaxime Chevallier MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
60626410c139SMaxime Chevallier
6063db9d7d36SMaxime Chevallier return 0;
6064db9d7d36SMaxime Chevallier
6065db9d7d36SMaxime Chevallier err_free_percpu:
6066db9d7d36SMaxime Chevallier for (queue = 0; queue < port->ntxqs; queue++) {
6067db9d7d36SMaxime Chevallier if (!port->txqs[queue])
6068db9d7d36SMaxime Chevallier continue;
6069db9d7d36SMaxime Chevallier free_percpu(port->txqs[queue]->pcpu);
6070db9d7d36SMaxime Chevallier }
6071db9d7d36SMaxime Chevallier return err;
6072db9d7d36SMaxime Chevallier }
6073db9d7d36SMaxime Chevallier
mvpp22_port_has_legacy_tx_irqs(struct device_node * port_node,unsigned long * flags)6074a9aac385SAntoine Tenart static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
6075a9aac385SAntoine Tenart unsigned long *flags)
6076db9d7d36SMaxime Chevallier {
6077a9aac385SAntoine Tenart char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
6078a9aac385SAntoine Tenart "tx-cpu3" };
6079a9aac385SAntoine Tenart int i;
6080a9aac385SAntoine Tenart
6081a9aac385SAntoine Tenart for (i = 0; i < 5; i++)
6082a9aac385SAntoine Tenart if (of_property_match_string(port_node, "interrupt-names",
6083a9aac385SAntoine Tenart irqs[i]) < 0)
6084a9aac385SAntoine Tenart return false;
6085a9aac385SAntoine Tenart
6086a9aac385SAntoine Tenart *flags |= MVPP2_F_DT_COMPAT;
6087a9aac385SAntoine Tenart return true;
6088a9aac385SAntoine Tenart }
6089a9aac385SAntoine Tenart
6090a9aac385SAntoine Tenart /* Checks if the port dt description has the required Tx interrupts:
6091a9aac385SAntoine Tenart * - PPv2.1: there are no such interrupts.
60926af27a1dSStefan Chulski * - PPv2.2 and PPv2.3:
6093a9aac385SAntoine Tenart * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
6094a9aac385SAntoine Tenart * - The new ones have: "hifX" with X in [0..8]
6095a9aac385SAntoine Tenart *
6096a9aac385SAntoine Tenart * All those variants are supported to keep the backward compatibility.
6097a9aac385SAntoine Tenart */
mvpp2_port_has_irqs(struct mvpp2 * priv,struct device_node * port_node,unsigned long * flags)6098a9aac385SAntoine Tenart static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
6099a9aac385SAntoine Tenart struct device_node *port_node,
6100a9aac385SAntoine Tenart unsigned long *flags)
6101a9aac385SAntoine Tenart {
6102a9aac385SAntoine Tenart char name[5];
6103a9aac385SAntoine Tenart int i;
6104db9d7d36SMaxime Chevallier
6105fd4a1056SAntoine Tenart /* ACPI */
6106fd4a1056SAntoine Tenart if (!port_node)
6107fd4a1056SAntoine Tenart return true;
6108fd4a1056SAntoine Tenart
6109db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21)
6110db9d7d36SMaxime Chevallier return false;
6111db9d7d36SMaxime Chevallier
6112a9aac385SAntoine Tenart if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
6113a9aac385SAntoine Tenart return true;
6114a9aac385SAntoine Tenart
6115a9aac385SAntoine Tenart for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6116a9aac385SAntoine Tenart snprintf(name, 5, "hif%d", i);
6117a9aac385SAntoine Tenart if (of_property_match_string(port_node, "interrupt-names",
6118a9aac385SAntoine Tenart name) < 0)
6119db9d7d36SMaxime Chevallier return false;
6120db9d7d36SMaxime Chevallier }
6121db9d7d36SMaxime Chevallier
6122db9d7d36SMaxime Chevallier return true;
6123db9d7d36SMaxime Chevallier }
6124db9d7d36SMaxime Chevallier
mvpp2_port_copy_mac_addr(struct net_device * dev,struct mvpp2 * priv,struct fwnode_handle * fwnode,char ** mac_from)6125cc4342f6SMiquel Raynal static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6126db9d7d36SMaxime Chevallier struct fwnode_handle *fwnode,
6127db9d7d36SMaxime Chevallier char **mac_from)
6128db9d7d36SMaxime Chevallier {
6129db9d7d36SMaxime Chevallier struct mvpp2_port *port = netdev_priv(dev);
6130db9d7d36SMaxime Chevallier char hw_mac_addr[ETH_ALEN] = {0};
6131db9d7d36SMaxime Chevallier char fw_mac_addr[ETH_ALEN];
6132cc4342f6SMiquel Raynal int ret;
6133db9d7d36SMaxime Chevallier
61340a14501eSJakub Kicinski if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
6135db9d7d36SMaxime Chevallier *mac_from = "firmware node";
6136f3956ebbSJakub Kicinski eth_hw_addr_set(dev, fw_mac_addr);
6137cc4342f6SMiquel Raynal return 0;
6138db9d7d36SMaxime Chevallier }
6139db9d7d36SMaxime Chevallier
6140db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21) {
6141db9d7d36SMaxime Chevallier mvpp21_get_mac_address(port, hw_mac_addr);
6142db9d7d36SMaxime Chevallier if (is_valid_ether_addr(hw_mac_addr)) {
6143db9d7d36SMaxime Chevallier *mac_from = "hardware";
6144f3956ebbSJakub Kicinski eth_hw_addr_set(dev, hw_mac_addr);
6145cc4342f6SMiquel Raynal return 0;
6146db9d7d36SMaxime Chevallier }
6147db9d7d36SMaxime Chevallier }
6148db9d7d36SMaxime Chevallier
61497a74c126SMiquel Raynal /* Only valid on OF enabled platforms */
6150cc4342f6SMiquel Raynal ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
6151cc4342f6SMiquel Raynal if (ret == -EPROBE_DEFER)
6152cc4342f6SMiquel Raynal return ret;
6153cc4342f6SMiquel Raynal if (!ret) {
61547a74c126SMiquel Raynal *mac_from = "nvmem cell";
61557a74c126SMiquel Raynal eth_hw_addr_set(dev, fw_mac_addr);
6156cc4342f6SMiquel Raynal return 0;
61577a74c126SMiquel Raynal }
61587a74c126SMiquel Raynal
6159db9d7d36SMaxime Chevallier *mac_from = "random";
6160db9d7d36SMaxime Chevallier eth_hw_addr_random(dev);
6161cc4342f6SMiquel Raynal
6162cc4342f6SMiquel Raynal return 0;
6163db9d7d36SMaxime Chevallier }
6164db9d7d36SMaxime Chevallier
mvpp2_phylink_to_port(struct phylink_config * config)61656c2b49ebSRussell King static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
61666c2b49ebSRussell King {
61676c2b49ebSRussell King return container_of(config, struct mvpp2_port, phylink_config);
61686c2b49ebSRussell King }
61696c2b49ebSRussell King
mvpp2_pcs_xlg_to_port(struct phylink_pcs * pcs)6170cff05632SRussell King (Oracle) static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
617194bfe438SRussell King {
6172cff05632SRussell King (Oracle) return container_of(pcs, struct mvpp2_port, pcs_xlg);
6173cff05632SRussell King (Oracle) }
6174cff05632SRussell King (Oracle)
mvpp2_pcs_gmac_to_port(struct phylink_pcs * pcs)6175cff05632SRussell King (Oracle) static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
6176cff05632SRussell King (Oracle) {
6177cff05632SRussell King (Oracle) return container_of(pcs, struct mvpp2_port, pcs_gmac);
617894bfe438SRussell King }
617994bfe438SRussell King
mvpp2_xlg_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)6180c596d2cdSRussell King static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
618194bfe438SRussell King struct phylink_link_state *state)
618294bfe438SRussell King {
6183cff05632SRussell King (Oracle) struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
618494bfe438SRussell King u32 val;
618594bfe438SRussell King
61864043ec70SMarek Behún if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
61874043ec70SMarek Behún state->speed = SPEED_5000;
61884043ec70SMarek Behún else
618994bfe438SRussell King state->speed = SPEED_10000;
619094bfe438SRussell King state->duplex = 1;
619194bfe438SRussell King state->an_complete = 1;
619294bfe438SRussell King
619394bfe438SRussell King val = readl(port->base + MVPP22_XLG_STATUS);
619494bfe438SRussell King state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
619594bfe438SRussell King
619694bfe438SRussell King state->pause = 0;
619794bfe438SRussell King val = readl(port->base + MVPP22_XLG_CTRL0_REG);
619894bfe438SRussell King if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
619994bfe438SRussell King state->pause |= MLO_PAUSE_TX;
620094bfe438SRussell King if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
620194bfe438SRussell King state->pause |= MLO_PAUSE_RX;
620294bfe438SRussell King }
620394bfe438SRussell King
mvpp2_xlg_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)6204d5b16264SRussell King (Oracle) static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6205c596d2cdSRussell King phy_interface_t interface,
6206c596d2cdSRussell King const unsigned long *advertising,
6207c596d2cdSRussell King bool permit_pause_to_mac)
6208c596d2cdSRussell King {
6209c596d2cdSRussell King return 0;
6210c596d2cdSRussell King }
6211c596d2cdSRussell King
6212c596d2cdSRussell King static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
6213c596d2cdSRussell King .pcs_get_state = mvpp2_xlg_pcs_get_state,
6214c596d2cdSRussell King .pcs_config = mvpp2_xlg_pcs_config,
6215c596d2cdSRussell King };
6216c596d2cdSRussell King
mvpp2_gmac_pcs_validate(struct phylink_pcs * pcs,unsigned long * supported,const struct phylink_link_state * state)621785e3e0ebSRussell King (Oracle) static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
621885e3e0ebSRussell King (Oracle) unsigned long *supported,
621985e3e0ebSRussell King (Oracle) const struct phylink_link_state *state)
622085e3e0ebSRussell King (Oracle) {
622185e3e0ebSRussell King (Oracle) /* When in 802.3z mode, we must have AN enabled:
622285e3e0ebSRussell King (Oracle) * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
622385e3e0ebSRussell King (Oracle) * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
622485e3e0ebSRussell King (Oracle) */
622585e3e0ebSRussell King (Oracle) if (phy_interface_mode_is_8023z(state->interface) &&
622685e3e0ebSRussell King (Oracle) !phylink_test(state->advertising, Autoneg))
622785e3e0ebSRussell King (Oracle) return -EINVAL;
622885e3e0ebSRussell King (Oracle)
622985e3e0ebSRussell King (Oracle) return 0;
623085e3e0ebSRussell King (Oracle) }
623185e3e0ebSRussell King (Oracle)
mvpp2_gmac_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)6232c596d2cdSRussell King static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
623394bfe438SRussell King struct phylink_link_state *state)
623494bfe438SRussell King {
6235cff05632SRussell King (Oracle) struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
623694bfe438SRussell King u32 val;
623794bfe438SRussell King
623894bfe438SRussell King val = readl(port->base + MVPP2_GMAC_STATUS0);
623994bfe438SRussell King
624094bfe438SRussell King state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
624194bfe438SRussell King state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
624294bfe438SRussell King state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
624394bfe438SRussell King
624494bfe438SRussell King switch (port->phy_interface) {
624594bfe438SRussell King case PHY_INTERFACE_MODE_1000BASEX:
624694bfe438SRussell King state->speed = SPEED_1000;
624794bfe438SRussell King break;
624894bfe438SRussell King case PHY_INTERFACE_MODE_2500BASEX:
624994bfe438SRussell King state->speed = SPEED_2500;
625094bfe438SRussell King break;
625194bfe438SRussell King default:
625294bfe438SRussell King if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
625394bfe438SRussell King state->speed = SPEED_1000;
625494bfe438SRussell King else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
625594bfe438SRussell King state->speed = SPEED_100;
625694bfe438SRussell King else
625794bfe438SRussell King state->speed = SPEED_10;
625894bfe438SRussell King }
625994bfe438SRussell King
626094bfe438SRussell King state->pause = 0;
626194bfe438SRussell King if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
626294bfe438SRussell King state->pause |= MLO_PAUSE_RX;
626394bfe438SRussell King if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
626494bfe438SRussell King state->pause |= MLO_PAUSE_TX;
626594bfe438SRussell King }
626694bfe438SRussell King
mvpp2_gmac_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)6267d5b16264SRussell King (Oracle) static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
626894bfe438SRussell King phy_interface_t interface,
626994bfe438SRussell King const unsigned long *advertising,
627094bfe438SRussell King bool permit_pause_to_mac)
627194bfe438SRussell King {
6272cff05632SRussell King (Oracle) struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
627394bfe438SRussell King u32 mask, val, an, old_an, changed;
627494bfe438SRussell King
627594bfe438SRussell King mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
627694bfe438SRussell King MVPP2_GMAC_IN_BAND_AUTONEG |
627794bfe438SRussell King MVPP2_GMAC_AN_SPEED_EN |
627894bfe438SRussell King MVPP2_GMAC_FLOW_CTRL_AUTONEG |
627994bfe438SRussell King MVPP2_GMAC_AN_DUPLEX_EN;
628094bfe438SRussell King
6281d5b16264SRussell King (Oracle) if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
628294bfe438SRussell King mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
628394bfe438SRussell King MVPP2_GMAC_CONFIG_GMII_SPEED |
628494bfe438SRussell King MVPP2_GMAC_CONFIG_FULL_DUPLEX;
628594bfe438SRussell King val = MVPP2_GMAC_IN_BAND_AUTONEG;
628694bfe438SRussell King
628794bfe438SRussell King if (interface == PHY_INTERFACE_MODE_SGMII) {
628894bfe438SRussell King /* SGMII mode receives the speed and duplex from PHY */
628994bfe438SRussell King val |= MVPP2_GMAC_AN_SPEED_EN |
629094bfe438SRussell King MVPP2_GMAC_AN_DUPLEX_EN;
629194bfe438SRussell King } else {
629294bfe438SRussell King /* 802.3z mode has fixed speed and duplex */
629394bfe438SRussell King val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
629494bfe438SRussell King MVPP2_GMAC_CONFIG_FULL_DUPLEX;
629594bfe438SRussell King
629694bfe438SRussell King /* The FLOW_CTRL_AUTONEG bit selects either the hardware
629794bfe438SRussell King * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
629894bfe438SRussell King * manually controls the GMAC pause modes.
629994bfe438SRussell King */
630094bfe438SRussell King if (permit_pause_to_mac)
630194bfe438SRussell King val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
630294bfe438SRussell King
630394bfe438SRussell King /* Configure advertisement bits */
630494bfe438SRussell King mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
630594bfe438SRussell King if (phylink_test(advertising, Pause))
630694bfe438SRussell King val |= MVPP2_GMAC_FC_ADV_EN;
630794bfe438SRussell King if (phylink_test(advertising, Asym_Pause))
630894bfe438SRussell King val |= MVPP2_GMAC_FC_ADV_ASM_EN;
630994bfe438SRussell King }
631094bfe438SRussell King } else {
631194bfe438SRussell King val = 0;
631294bfe438SRussell King }
631394bfe438SRussell King
631494bfe438SRussell King old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
631594bfe438SRussell King an = (an & ~mask) | val;
631694bfe438SRussell King changed = an ^ old_an;
631794bfe438SRussell King if (changed)
631894bfe438SRussell King writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
631994bfe438SRussell King
632094bfe438SRussell King /* We are only interested in the advertisement bits changing */
632194bfe438SRussell King return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
632294bfe438SRussell King }
632394bfe438SRussell King
mvpp2_gmac_pcs_an_restart(struct phylink_pcs * pcs)6324c596d2cdSRussell King static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
632594bfe438SRussell King {
6326cff05632SRussell King (Oracle) struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
632794bfe438SRussell King u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
632894bfe438SRussell King
632994bfe438SRussell King writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
633094bfe438SRussell King port->base + MVPP2_GMAC_AUTONEG_CONFIG);
633194bfe438SRussell King writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
633294bfe438SRussell King port->base + MVPP2_GMAC_AUTONEG_CONFIG);
633394bfe438SRussell King }
633494bfe438SRussell King
6335c596d2cdSRussell King static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
633685e3e0ebSRussell King (Oracle) .pcs_validate = mvpp2_gmac_pcs_validate,
6337c596d2cdSRussell King .pcs_get_state = mvpp2_gmac_pcs_get_state,
6338c596d2cdSRussell King .pcs_config = mvpp2_gmac_pcs_config,
6339c596d2cdSRussell King .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
634094bfe438SRussell King };
634194bfe438SRussell King
mvpp2_xlg_config(struct mvpp2_port * port,unsigned int mode,const struct phylink_link_state * state)6342db9d7d36SMaxime Chevallier static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
6343db9d7d36SMaxime Chevallier const struct phylink_link_state *state)
6344db9d7d36SMaxime Chevallier {
6345bd45f644SRussell King u32 val;
6346db9d7d36SMaxime Chevallier
6347bd45f644SRussell King mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
634863d78cc9SRussell King MVPP22_XLG_CTRL0_MAC_RESET_DIS,
634963d78cc9SRussell King MVPP22_XLG_CTRL0_MAC_RESET_DIS);
6350bd45f644SRussell King mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
6351bd45f644SRussell King MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
6352bd45f644SRussell King MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
6353bd45f644SRussell King MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
6354bd45f644SRussell King MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
6355db9d7d36SMaxime Chevallier
6356bd45f644SRussell King /* Wait for reset to deassert */
6357bd45f644SRussell King do {
6358bd45f644SRussell King val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6359bd45f644SRussell King } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
6360db9d7d36SMaxime Chevallier }
6361db9d7d36SMaxime Chevallier
mvpp2_gmac_config(struct mvpp2_port * port,unsigned int mode,const struct phylink_link_state * state)6362db9d7d36SMaxime Chevallier static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6363db9d7d36SMaxime Chevallier const struct phylink_link_state *state)
6364db9d7d36SMaxime Chevallier {
6365d14e078fSRussell King u32 old_ctrl0, ctrl0;
6366d14e078fSRussell King u32 old_ctrl2, ctrl2;
6367d14e078fSRussell King u32 old_ctrl4, ctrl4;
6368db9d7d36SMaxime Chevallier
6369d14e078fSRussell King old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6370d14e078fSRussell King old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6371d14e078fSRussell King old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6372db9d7d36SMaxime Chevallier
6373db9d7d36SMaxime Chevallier ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
6374262412d5SStefan Chulski ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
6375db9d7d36SMaxime Chevallier
6376388ca27fSRussell King /* Configure port type */
63774a4cec72SRussell King if (phy_interface_mode_is_8023z(state->interface)) {
6378388ca27fSRussell King ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6379388ca27fSRussell King ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6380388ca27fSRussell King ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6381388ca27fSRussell King MVPP22_CTRL4_DP_CLK_SEL |
6382388ca27fSRussell King MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6383388ca27fSRussell King } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6384388ca27fSRussell King ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6385388ca27fSRussell King ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6386388ca27fSRussell King ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6387388ca27fSRussell King MVPP22_CTRL4_DP_CLK_SEL |
6388388ca27fSRussell King MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6389388ca27fSRussell King } else if (phy_interface_mode_is_rgmii(state->interface)) {
6390388ca27fSRussell King ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
6391388ca27fSRussell King ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
6392388ca27fSRussell King MVPP22_CTRL4_SYNC_BYPASS_DIS |
6393388ca27fSRussell King MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6394388ca27fSRussell King }
6395388ca27fSRussell King
6396388ca27fSRussell King /* Configure negotiation style */
6397388ca27fSRussell King if (!phylink_autoneg_inband(mode)) {
639824cb72dfSRussell King /* Phy or fixed speed - no in-band AN, nothing to do, leave the
639924cb72dfSRussell King * configured speed, duplex and flow control as-is.
640024cb72dfSRussell King */
6401388ca27fSRussell King } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6402388ca27fSRussell King /* SGMII in-band mode receives the speed and duplex from
6403388ca27fSRussell King * the PHY. Flow control information is not received. */
6404388ca27fSRussell King } else if (phy_interface_mode_is_8023z(state->interface)) {
6405db9d7d36SMaxime Chevallier /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
6406db9d7d36SMaxime Chevallier * they negotiate duplex: they are always operating with a fixed
6407db9d7d36SMaxime Chevallier * speed of 1000/2500Mbps in full duplex, so force 1000/2500
6408db9d7d36SMaxime Chevallier * speed and full duplex here.
6409db9d7d36SMaxime Chevallier */
6410db9d7d36SMaxime Chevallier ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6411db9d7d36SMaxime Chevallier }
6412db9d7d36SMaxime Chevallier
6413d14e078fSRussell King if (old_ctrl0 != ctrl0)
6414db9d7d36SMaxime Chevallier writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6415d14e078fSRussell King if (old_ctrl2 != ctrl2)
6416db9d7d36SMaxime Chevallier writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6417d14e078fSRussell King if (old_ctrl4 != ctrl4)
6418db9d7d36SMaxime Chevallier writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6419db9d7d36SMaxime Chevallier }
6420db9d7d36SMaxime Chevallier
mvpp2_select_pcs(struct phylink_config * config,phy_interface_t interface)6421cff05632SRussell King (Oracle) static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
6422cff05632SRussell King (Oracle) phy_interface_t interface)
6423cff05632SRussell King (Oracle) {
6424cff05632SRussell King (Oracle) struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6425cff05632SRussell King (Oracle)
6426cff05632SRussell King (Oracle) /* Select the appropriate PCS operations depending on the
6427cff05632SRussell King (Oracle) * configured interface mode. We will only switch to a mode
6428cff05632SRussell King (Oracle) * that the validate() checks have already passed.
6429cff05632SRussell King (Oracle) */
6430cff05632SRussell King (Oracle) if (mvpp2_is_xlg(interface))
6431cff05632SRussell King (Oracle) return &port->pcs_xlg;
6432cff05632SRussell King (Oracle) else
6433cff05632SRussell King (Oracle) return &port->pcs_gmac;
6434cff05632SRussell King (Oracle) }
6435cff05632SRussell King (Oracle)
mvpp2_mac_prepare(struct phylink_config * config,unsigned int mode,phy_interface_t interface)6436cff05632SRussell King (Oracle) static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6437bfe301ebSRussell King phy_interface_t interface)
6438db9d7d36SMaxime Chevallier {
64396c2b49ebSRussell King struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6440db9d7d36SMaxime Chevallier
6441db9d7d36SMaxime Chevallier /* Check for invalid configuration */
6442bfe301ebSRussell King if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
64436c2b49ebSRussell King netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6444bfe301ebSRussell King return -EINVAL;
6445db9d7d36SMaxime Chevallier }
6446db9d7d36SMaxime Chevallier
6447fefeae73SRussell King if (port->phy_interface != interface ||
6448fefeae73SRussell King phylink_autoneg_inband(mode)) {
6449fefeae73SRussell King /* Force the link down when changing the interface or if in
6450fefeae73SRussell King * in-band mode to ensure we do not change the configuration
6451fefeae73SRussell King * while the hardware is indicating link is up. We force both
6452fefeae73SRussell King * XLG and GMAC down to ensure that they're both in a known
6453fefeae73SRussell King * state.
6454fefeae73SRussell King */
6455fefeae73SRussell King mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6456fefeae73SRussell King MVPP2_GMAC_FORCE_LINK_PASS |
6457fefeae73SRussell King MVPP2_GMAC_FORCE_LINK_DOWN,
6458fefeae73SRussell King MVPP2_GMAC_FORCE_LINK_DOWN);
6459fefeae73SRussell King
6460fefeae73SRussell King if (mvpp2_port_supports_xlg(port))
6461fefeae73SRussell King mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6462fefeae73SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6463fefeae73SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6464fefeae73SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6465fefeae73SRussell King }
6466fefeae73SRussell King
6467db9d7d36SMaxime Chevallier /* Make sure the port is disabled when reconfiguring the mode */
6468db9d7d36SMaxime Chevallier mvpp2_port_disable(port);
64691970ee96SAntoine Tenart
647082b1c8faSRussell King if (port->phy_interface != interface) {
647182b1c8faSRussell King /* Place GMAC into reset */
647282b1c8faSRussell King mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
647382b1c8faSRussell King MVPP2_GMAC_PORT_RESET_MASK,
647482b1c8faSRussell King MVPP2_GMAC_PORT_RESET_MASK);
647582b1c8faSRussell King
6476f704177eSStefan Chulski if (port->priv->hw_version >= MVPP22) {
6477bf2fa125SRussell King mvpp22_gop_mask_irq(port);
6478db9d7d36SMaxime Chevallier
6479db9d7d36SMaxime Chevallier phy_power_off(port->comphy);
6480bb7bbb6eSMarek Behún
6481bb7bbb6eSMarek Behún /* Reconfigure the serdes lanes */
6482bb7bbb6eSMarek Behún mvpp22_mode_reconfigure(port, interface);
6483db9d7d36SMaxime Chevallier }
648482b1c8faSRussell King }
6485db9d7d36SMaxime Chevallier
6486bfe301ebSRussell King return 0;
6487bfe301ebSRussell King }
6488bfe301ebSRussell King
mvpp2_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)6489bfe301ebSRussell King static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6490bfe301ebSRussell King const struct phylink_link_state *state)
6491bfe301ebSRussell King {
6492bfe301ebSRussell King struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6493bfe301ebSRussell King
6494db9d7d36SMaxime Chevallier /* mac (re)configuration */
64951d9b041eSRussell King if (mvpp2_is_xlg(state->interface))
6496db9d7d36SMaxime Chevallier mvpp2_xlg_config(port, mode, state);
6497db9d7d36SMaxime Chevallier else if (phy_interface_mode_is_rgmii(state->interface) ||
64984a4cec72SRussell King phy_interface_mode_is_8023z(state->interface) ||
64994a4cec72SRussell King state->interface == PHY_INTERFACE_MODE_SGMII)
6500db9d7d36SMaxime Chevallier mvpp2_gmac_config(port, mode, state);
6501db9d7d36SMaxime Chevallier
6502db9d7d36SMaxime Chevallier if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6503db9d7d36SMaxime Chevallier mvpp2_port_loopback_set(port, state);
6504bfe301ebSRussell King }
6505db9d7d36SMaxime Chevallier
mvpp2_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)6506bfe301ebSRussell King static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6507bfe301ebSRussell King phy_interface_t interface)
6508bfe301ebSRussell King {
6509bfe301ebSRussell King struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6510bfe301ebSRussell King
6511f704177eSStefan Chulski if (port->priv->hw_version >= MVPP22 &&
6512bfe301ebSRussell King port->phy_interface != interface) {
6513bfe301ebSRussell King port->phy_interface = interface;
6514bfe301ebSRussell King
6515bfe301ebSRussell King /* Unmask interrupts */
6516bf2fa125SRussell King mvpp22_gop_unmask_irq(port);
6517bfe301ebSRussell King }
6518bf2fa125SRussell King
651982b1c8faSRussell King if (!mvpp2_is_xlg(interface)) {
652082b1c8faSRussell King /* Release GMAC reset and wait */
652182b1c8faSRussell King mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
652282b1c8faSRussell King MVPP2_GMAC_PORT_RESET_MASK, 0);
652382b1c8faSRussell King
652482b1c8faSRussell King while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
652582b1c8faSRussell King MVPP2_GMAC_PORT_RESET_MASK)
652682b1c8faSRussell King continue;
652782b1c8faSRussell King }
652882b1c8faSRussell King
6529db9d7d36SMaxime Chevallier mvpp2_port_enable(port);
6530bfe301ebSRussell King
6531fefeae73SRussell King /* Allow the link to come up if in in-band mode, otherwise the
6532fefeae73SRussell King * link is forced via mac_link_down()/mac_link_up()
6533fefeae73SRussell King */
6534fefeae73SRussell King if (phylink_autoneg_inband(mode)) {
6535fefeae73SRussell King if (mvpp2_is_xlg(interface))
6536fefeae73SRussell King mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6537fefeae73SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6538fefeae73SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6539fefeae73SRussell King else
6540fefeae73SRussell King mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6541fefeae73SRussell King MVPP2_GMAC_FORCE_LINK_PASS |
6542fefeae73SRussell King MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6543fefeae73SRussell King }
6544fefeae73SRussell King
6545bfe301ebSRussell King return 0;
6546db9d7d36SMaxime Chevallier }
6547db9d7d36SMaxime Chevallier
mvpp2_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)654891a208f2SRussell King static void mvpp2_mac_link_up(struct phylink_config *config,
654991a208f2SRussell King struct phy_device *phy,
655091a208f2SRussell King unsigned int mode, phy_interface_t interface,
655191a208f2SRussell King int speed, int duplex,
655291a208f2SRussell King bool tx_pause, bool rx_pause)
6553db9d7d36SMaxime Chevallier {
65546c2b49ebSRussell King struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6555db9d7d36SMaxime Chevallier u32 val;
655676055831SStefan Chulski int i;
6557db9d7d36SMaxime Chevallier
65581970ee96SAntoine Tenart if (mvpp2_is_xlg(interface)) {
655924cb72dfSRussell King if (!phylink_autoneg_inband(mode)) {
656063d78cc9SRussell King val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
656163d78cc9SRussell King if (tx_pause)
656263d78cc9SRussell King val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
656363d78cc9SRussell King if (rx_pause)
656463d78cc9SRussell King val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
656563d78cc9SRussell King
6566bd45f644SRussell King mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6567bd45f644SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
656863d78cc9SRussell King MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
656963d78cc9SRussell King MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
657063d78cc9SRussell King MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
657124cb72dfSRussell King }
65721970ee96SAntoine Tenart } else {
657324cb72dfSRussell King if (!phylink_autoneg_inband(mode)) {
6574bd45f644SRussell King val = MVPP2_GMAC_FORCE_LINK_PASS;
657524cb72dfSRussell King
657624cb72dfSRussell King if (speed == SPEED_1000 || speed == SPEED_2500)
657724cb72dfSRussell King val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
657824cb72dfSRussell King else if (speed == SPEED_100)
657924cb72dfSRussell King val |= MVPP2_GMAC_CONFIG_MII_SPEED;
658024cb72dfSRussell King
658124cb72dfSRussell King if (duplex == DUPLEX_FULL)
658224cb72dfSRussell King val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
658324cb72dfSRussell King
6584bd45f644SRussell King mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6585bd45f644SRussell King MVPP2_GMAC_FORCE_LINK_DOWN |
6586bd45f644SRussell King MVPP2_GMAC_FORCE_LINK_PASS |
6587bd45f644SRussell King MVPP2_GMAC_CONFIG_MII_SPEED |
6588bd45f644SRussell King MVPP2_GMAC_CONFIG_GMII_SPEED |
6589bd45f644SRussell King MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6590db9d7d36SMaxime Chevallier }
659124cb72dfSRussell King
659224cb72dfSRussell King /* We can always update the flow control enable bits;
659324cb72dfSRussell King * these will only be effective if flow control AN
659424cb72dfSRussell King * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
659524cb72dfSRussell King */
6596bd45f644SRussell King val = 0;
659724cb72dfSRussell King if (tx_pause)
659824cb72dfSRussell King val |= MVPP22_CTRL4_TX_FC_EN;
659924cb72dfSRussell King if (rx_pause)
660024cb72dfSRussell King val |= MVPP22_CTRL4_RX_FC_EN;
6601bd45f644SRussell King
6602bd45f644SRussell King mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6603bd45f644SRussell King MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6604bd45f644SRussell King val);
66051970ee96SAntoine Tenart }
6606db9d7d36SMaxime Chevallier
660776055831SStefan Chulski if (port->priv->global_tx_fc) {
660876055831SStefan Chulski port->tx_fc = tx_pause;
660976055831SStefan Chulski if (tx_pause)
661076055831SStefan Chulski mvpp2_rxq_enable_fc(port);
661176055831SStefan Chulski else
661276055831SStefan Chulski mvpp2_rxq_disable_fc(port);
661376055831SStefan Chulski if (port->priv->percpu_pools) {
661476055831SStefan Chulski for (i = 0; i < port->nrxqs; i++)
661576055831SStefan Chulski mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
661676055831SStefan Chulski } else {
661776055831SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
661876055831SStefan Chulski mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
661976055831SStefan Chulski }
6620aca0e235SStefan Chulski if (port->priv->hw_version == MVPP23)
6621aca0e235SStefan Chulski mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
662276055831SStefan Chulski }
662376055831SStefan Chulski
6624db9d7d36SMaxime Chevallier mvpp2_port_enable(port);
6625db9d7d36SMaxime Chevallier
6626db9d7d36SMaxime Chevallier mvpp2_egress_enable(port);
6627db9d7d36SMaxime Chevallier mvpp2_ingress_enable(port);
66286c2b49ebSRussell King netif_tx_wake_all_queues(port->dev);
6629db9d7d36SMaxime Chevallier }
6630db9d7d36SMaxime Chevallier
mvpp2_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)663144cc27e4SIoana Ciornei static void mvpp2_mac_link_down(struct phylink_config *config,
663244cc27e4SIoana Ciornei unsigned int mode, phy_interface_t interface)
6633db9d7d36SMaxime Chevallier {
66346c2b49ebSRussell King struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6635db9d7d36SMaxime Chevallier u32 val;
6636db9d7d36SMaxime Chevallier
66371970ee96SAntoine Tenart if (!phylink_autoneg_inband(mode)) {
66381970ee96SAntoine Tenart if (mvpp2_is_xlg(interface)) {
66391970ee96SAntoine Tenart val = readl(port->base + MVPP22_XLG_CTRL0_REG);
66401970ee96SAntoine Tenart val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
66411970ee96SAntoine Tenart val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
66421970ee96SAntoine Tenart writel(val, port->base + MVPP22_XLG_CTRL0_REG);
66431970ee96SAntoine Tenart } else {
6644db9d7d36SMaxime Chevallier val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6645db9d7d36SMaxime Chevallier val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6646db9d7d36SMaxime Chevallier val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6647db9d7d36SMaxime Chevallier writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6648db9d7d36SMaxime Chevallier }
66491970ee96SAntoine Tenart }
6650db9d7d36SMaxime Chevallier
66516c2b49ebSRussell King netif_tx_stop_all_queues(port->dev);
6652db9d7d36SMaxime Chevallier mvpp2_egress_disable(port);
6653db9d7d36SMaxime Chevallier mvpp2_ingress_disable(port);
6654db9d7d36SMaxime Chevallier
6655db9d7d36SMaxime Chevallier mvpp2_port_disable(port);
6656db9d7d36SMaxime Chevallier }
6657db9d7d36SMaxime Chevallier
6658db9d7d36SMaxime Chevallier static const struct phylink_mac_ops mvpp2_phylink_ops = {
6659cff05632SRussell King (Oracle) .mac_select_pcs = mvpp2_select_pcs,
6660bfe301ebSRussell King .mac_prepare = mvpp2_mac_prepare,
6661db9d7d36SMaxime Chevallier .mac_config = mvpp2_mac_config,
6662bfe301ebSRussell King .mac_finish = mvpp2_mac_finish,
6663db9d7d36SMaxime Chevallier .mac_link_up = mvpp2_mac_link_up,
6664db9d7d36SMaxime Chevallier .mac_link_down = mvpp2_mac_link_down,
6665db9d7d36SMaxime Chevallier };
6666db9d7d36SMaxime Chevallier
666787745c74SRussell King /* Work-around for ACPI */
mvpp2_acpi_start(struct mvpp2_port * port)666887745c74SRussell King static void mvpp2_acpi_start(struct mvpp2_port *port)
666987745c74SRussell King {
667087745c74SRussell King /* Phylink isn't used as of now for ACPI, so the MAC has to be
667187745c74SRussell King * configured manually when the interface is started. This will
667287745c74SRussell King * be removed as soon as the phylink ACPI support lands in.
667387745c74SRussell King */
667487745c74SRussell King struct phylink_link_state state = {
667587745c74SRussell King .interface = port->phy_interface,
667687745c74SRussell King };
6677cff05632SRussell King (Oracle) struct phylink_pcs *pcs;
6678cff05632SRussell King (Oracle)
6679cff05632SRussell King (Oracle) pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
6680cff05632SRussell King (Oracle)
6681cff05632SRussell King (Oracle) mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6682bfe301ebSRussell King port->phy_interface);
668387745c74SRussell King mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6684d5b16264SRussell King (Oracle) pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED,
6685d5b16264SRussell King (Oracle) port->phy_interface, state.advertising,
6686d5b16264SRussell King (Oracle) false);
6687bfe301ebSRussell King mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6688bfe301ebSRussell King port->phy_interface);
668987745c74SRussell King mvpp2_mac_link_up(&port->phylink_config, NULL,
669087745c74SRussell King MLO_AN_INBAND, port->phy_interface,
669187745c74SRussell King SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
669287745c74SRussell King }
669387745c74SRussell King
6694dfce1babSMarcin Wojtas /* In order to ensure backward compatibility for ACPI, check if the port
6695dfce1babSMarcin Wojtas * firmware node comprises the necessary description allowing to use phylink.
6696dfce1babSMarcin Wojtas */
mvpp2_use_acpi_compat_mode(struct fwnode_handle * port_fwnode)6697dfce1babSMarcin Wojtas static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
6698dfce1babSMarcin Wojtas {
6699dfce1babSMarcin Wojtas if (!is_acpi_node(port_fwnode))
6700dfce1babSMarcin Wojtas return false;
6701dfce1babSMarcin Wojtas
6702dfce1babSMarcin Wojtas return (!fwnode_property_present(port_fwnode, "phy-handle") &&
6703dfce1babSMarcin Wojtas !fwnode_property_present(port_fwnode, "managed") &&
6704dfce1babSMarcin Wojtas !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
6705dfce1babSMarcin Wojtas }
6706dfce1babSMarcin Wojtas
6707db9d7d36SMaxime Chevallier /* Ports initialization */
mvpp2_port_probe(struct platform_device * pdev,struct fwnode_handle * port_fwnode,struct mvpp2 * priv)6708db9d7d36SMaxime Chevallier static int mvpp2_port_probe(struct platform_device *pdev,
6709db9d7d36SMaxime Chevallier struct fwnode_handle *port_fwnode,
6710db9d7d36SMaxime Chevallier struct mvpp2 *priv)
6711db9d7d36SMaxime Chevallier {
6712db9d7d36SMaxime Chevallier struct phy *comphy = NULL;
6713db9d7d36SMaxime Chevallier struct mvpp2_port *port;
6714db9d7d36SMaxime Chevallier struct mvpp2_port_pcpu *port_pcpu;
6715db9d7d36SMaxime Chevallier struct device_node *port_node = to_of_node(port_fwnode);
6716c9dbb6cfSMaxime Chevallier netdev_features_t features;
6717db9d7d36SMaxime Chevallier struct net_device *dev;
6718db9d7d36SMaxime Chevallier struct phylink *phylink;
6719db9d7d36SMaxime Chevallier char *mac_from = "";
6720074c74dfSAntoine Tenart unsigned int ntxqs, nrxqs, thread;
6721a9aac385SAntoine Tenart unsigned long flags = 0;
6722db9d7d36SMaxime Chevallier bool has_tx_irqs;
6723db9d7d36SMaxime Chevallier u32 id;
6724db9d7d36SMaxime Chevallier int phy_mode;
6725850623b3SAntoine Tenart int err, i;
6726db9d7d36SMaxime Chevallier
6727a9aac385SAntoine Tenart has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6728fd4a1056SAntoine Tenart if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6729fd4a1056SAntoine Tenart dev_err(&pdev->dev,
6730fd4a1056SAntoine Tenart "not enough IRQs to support multi queue mode\n");
6731fd4a1056SAntoine Tenart return -EINVAL;
6732db9d7d36SMaxime Chevallier }
6733db9d7d36SMaxime Chevallier
6734db9d7d36SMaxime Chevallier ntxqs = MVPP2_MAX_TXQ;
67357d04b0b1SMatteo Croce nrxqs = mvpp2_get_nrxqs(priv);
6736db9d7d36SMaxime Chevallier
6737db9d7d36SMaxime Chevallier dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6738db9d7d36SMaxime Chevallier if (!dev)
6739db9d7d36SMaxime Chevallier return -ENOMEM;
6740db9d7d36SMaxime Chevallier
6741db9d7d36SMaxime Chevallier phy_mode = fwnode_get_phy_mode(port_fwnode);
6742db9d7d36SMaxime Chevallier if (phy_mode < 0) {
6743db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "incorrect phy mode\n");
6744db9d7d36SMaxime Chevallier err = phy_mode;
6745db9d7d36SMaxime Chevallier goto err_free_netdev;
6746db9d7d36SMaxime Chevallier }
6747db9d7d36SMaxime Chevallier
6748e0f909bcSRussell King /*
6749e0f909bcSRussell King * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6750e0f909bcSRussell King * Existing usage of 10GBASE-KR is not correct; no backplane
6751e0f909bcSRussell King * negotiation is done, and this driver does not actually support
6752e0f909bcSRussell King * 10GBASE-KR.
6753e0f909bcSRussell King */
6754e0f909bcSRussell King if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6755e0f909bcSRussell King phy_mode = PHY_INTERFACE_MODE_10GBASER;
6756e0f909bcSRussell King
6757db9d7d36SMaxime Chevallier if (port_node) {
6758db9d7d36SMaxime Chevallier comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6759db9d7d36SMaxime Chevallier if (IS_ERR(comphy)) {
6760db9d7d36SMaxime Chevallier if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6761db9d7d36SMaxime Chevallier err = -EPROBE_DEFER;
6762db9d7d36SMaxime Chevallier goto err_free_netdev;
6763db9d7d36SMaxime Chevallier }
6764db9d7d36SMaxime Chevallier comphy = NULL;
6765db9d7d36SMaxime Chevallier }
6766db9d7d36SMaxime Chevallier }
6767db9d7d36SMaxime Chevallier
6768db9d7d36SMaxime Chevallier if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6769db9d7d36SMaxime Chevallier err = -EINVAL;
6770db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "missing port-id value\n");
6771db9d7d36SMaxime Chevallier goto err_free_netdev;
6772db9d7d36SMaxime Chevallier }
6773db9d7d36SMaxime Chevallier
6774db9d7d36SMaxime Chevallier dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6775db9d7d36SMaxime Chevallier dev->watchdog_timeo = 5 * HZ;
6776db9d7d36SMaxime Chevallier dev->netdev_ops = &mvpp2_netdev_ops;
6777db9d7d36SMaxime Chevallier dev->ethtool_ops = &mvpp2_eth_tool_ops;
6778db9d7d36SMaxime Chevallier
6779db9d7d36SMaxime Chevallier port = netdev_priv(dev);
6780db9d7d36SMaxime Chevallier port->dev = dev;
6781db9d7d36SMaxime Chevallier port->fwnode = port_fwnode;
6782db9d7d36SMaxime Chevallier port->ntxqs = ntxqs;
6783db9d7d36SMaxime Chevallier port->nrxqs = nrxqs;
6784db9d7d36SMaxime Chevallier port->priv = priv;
6785db9d7d36SMaxime Chevallier port->has_tx_irqs = has_tx_irqs;
6786a9aac385SAntoine Tenart port->flags = flags;
6787db9d7d36SMaxime Chevallier
6788db9d7d36SMaxime Chevallier err = mvpp2_queue_vectors_init(port, port_node);
6789db9d7d36SMaxime Chevallier if (err)
6790db9d7d36SMaxime Chevallier goto err_free_netdev;
6791db9d7d36SMaxime Chevallier
6792db9d7d36SMaxime Chevallier if (port_node)
679389141972SRussell King port->port_irq = of_irq_get_byname(port_node, "link");
6794db9d7d36SMaxime Chevallier else
679589141972SRussell King port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
679689141972SRussell King if (port->port_irq == -EPROBE_DEFER) {
6797db9d7d36SMaxime Chevallier err = -EPROBE_DEFER;
6798db9d7d36SMaxime Chevallier goto err_deinit_qvecs;
6799db9d7d36SMaxime Chevallier }
680089141972SRussell King if (port->port_irq <= 0)
6801db9d7d36SMaxime Chevallier /* the link irq is optional */
680289141972SRussell King port->port_irq = 0;
6803db9d7d36SMaxime Chevallier
6804db9d7d36SMaxime Chevallier if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6805db9d7d36SMaxime Chevallier port->flags |= MVPP2_F_LOOPBACK;
6806db9d7d36SMaxime Chevallier
6807db9d7d36SMaxime Chevallier port->id = id;
6808db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21)
6809db9d7d36SMaxime Chevallier port->first_rxq = port->id * port->nrxqs;
6810db9d7d36SMaxime Chevallier else
6811db9d7d36SMaxime Chevallier port->first_rxq = port->id * priv->max_port_rxqs;
6812db9d7d36SMaxime Chevallier
6813db9d7d36SMaxime Chevallier port->of_node = port_node;
6814db9d7d36SMaxime Chevallier port->phy_interface = phy_mode;
6815db9d7d36SMaxime Chevallier port->comphy = comphy;
6816db9d7d36SMaxime Chevallier
6817db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21) {
68183230a55bSYueHaibing port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6819db9d7d36SMaxime Chevallier if (IS_ERR(port->base)) {
6820db9d7d36SMaxime Chevallier err = PTR_ERR(port->base);
6821db9d7d36SMaxime Chevallier goto err_free_irq;
6822db9d7d36SMaxime Chevallier }
6823db9d7d36SMaxime Chevallier
6824db9d7d36SMaxime Chevallier port->stats_base = port->priv->lms_base +
6825db9d7d36SMaxime Chevallier MVPP21_MIB_COUNTERS_OFFSET +
6826db9d7d36SMaxime Chevallier port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6827db9d7d36SMaxime Chevallier } else {
6828db9d7d36SMaxime Chevallier if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6829db9d7d36SMaxime Chevallier &port->gop_id)) {
6830db9d7d36SMaxime Chevallier err = -EINVAL;
6831db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "missing gop-port-id value\n");
6832db9d7d36SMaxime Chevallier goto err_deinit_qvecs;
6833db9d7d36SMaxime Chevallier }
6834db9d7d36SMaxime Chevallier
6835db9d7d36SMaxime Chevallier port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6836db9d7d36SMaxime Chevallier port->stats_base = port->priv->iface_base +
6837db9d7d36SMaxime Chevallier MVPP22_MIB_COUNTERS_OFFSET +
6838db9d7d36SMaxime Chevallier port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6839ce3497e2SRussell King
6840ce3497e2SRussell King /* We may want a property to describe whether we should use
6841ce3497e2SRussell King * MAC hardware timestamping.
6842ce3497e2SRussell King */
6843ce3497e2SRussell King if (priv->tai)
6844ce3497e2SRussell King port->hwtstamp = true;
6845db9d7d36SMaxime Chevallier }
6846db9d7d36SMaxime Chevallier
6847db9d7d36SMaxime Chevallier /* Alloc per-cpu and ethtool stats */
6848db9d7d36SMaxime Chevallier port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6849db9d7d36SMaxime Chevallier if (!port->stats) {
6850db9d7d36SMaxime Chevallier err = -ENOMEM;
6851db9d7d36SMaxime Chevallier goto err_free_irq;
6852db9d7d36SMaxime Chevallier }
6853db9d7d36SMaxime Chevallier
6854db9d7d36SMaxime Chevallier port->ethtool_stats = devm_kcalloc(&pdev->dev,
68559bea6897SMaxime Chevallier MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6856db9d7d36SMaxime Chevallier sizeof(u64), GFP_KERNEL);
6857db9d7d36SMaxime Chevallier if (!port->ethtool_stats) {
6858db9d7d36SMaxime Chevallier err = -ENOMEM;
6859db9d7d36SMaxime Chevallier goto err_free_stats;
6860db9d7d36SMaxime Chevallier }
6861db9d7d36SMaxime Chevallier
6862db9d7d36SMaxime Chevallier mutex_init(&port->gather_stats_lock);
6863db9d7d36SMaxime Chevallier INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6864db9d7d36SMaxime Chevallier
6865cc4342f6SMiquel Raynal err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6866cc4342f6SMiquel Raynal if (err < 0)
6867cc4342f6SMiquel Raynal goto err_free_stats;
6868db9d7d36SMaxime Chevallier
6869db9d7d36SMaxime Chevallier port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6870db9d7d36SMaxime Chevallier port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6871db9d7d36SMaxime Chevallier SET_NETDEV_DEV(dev, &pdev->dev);
6872db9d7d36SMaxime Chevallier
6873db9d7d36SMaxime Chevallier err = mvpp2_port_init(port);
6874db9d7d36SMaxime Chevallier if (err < 0) {
6875db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "failed to init port %d\n", id);
6876db9d7d36SMaxime Chevallier goto err_free_stats;
6877db9d7d36SMaxime Chevallier }
6878db9d7d36SMaxime Chevallier
6879db9d7d36SMaxime Chevallier mvpp2_port_periodic_xon_disable(port);
6880db9d7d36SMaxime Chevallier
6881649e51d5SAntoine Tenart mvpp2_mac_reset_assert(port);
68827409e66eSAntoine Tenart mvpp22_pcs_reset_assert(port);
6883db9d7d36SMaxime Chevallier
6884db9d7d36SMaxime Chevallier port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6885db9d7d36SMaxime Chevallier if (!port->pcpu) {
6886db9d7d36SMaxime Chevallier err = -ENOMEM;
6887db9d7d36SMaxime Chevallier goto err_free_txq_pcpu;
6888db9d7d36SMaxime Chevallier }
6889db9d7d36SMaxime Chevallier
6890db9d7d36SMaxime Chevallier if (!port->has_tx_irqs) {
6891e531f767SAntoine Tenart for (thread = 0; thread < priv->nthreads; thread++) {
6892074c74dfSAntoine Tenart port_pcpu = per_cpu_ptr(port->pcpu, thread);
6893db9d7d36SMaxime Chevallier
6894db9d7d36SMaxime Chevallier hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6895ecb9f80dSThomas Gleixner HRTIMER_MODE_REL_PINNED_SOFT);
6896db9d7d36SMaxime Chevallier port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6897db9d7d36SMaxime Chevallier port_pcpu->timer_scheduled = false;
6898ecb9f80dSThomas Gleixner port_pcpu->dev = dev;
6899db9d7d36SMaxime Chevallier }
6900db9d7d36SMaxime Chevallier }
6901db9d7d36SMaxime Chevallier
6902db9d7d36SMaxime Chevallier features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6903db9d7d36SMaxime Chevallier NETIF_F_TSO;
6904db9d7d36SMaxime Chevallier dev->features = features | NETIF_F_RXCSUM;
6905db9d7d36SMaxime Chevallier dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6906db9d7d36SMaxime Chevallier NETIF_F_HW_VLAN_CTAG_FILTER;
6907db9d7d36SMaxime Chevallier
69080a8a8000SStefan Chulski if (mvpp22_rss_is_supported(port)) {
6909d33ec452SMaxime Chevallier dev->hw_features |= NETIF_F_RXHASH;
6910da86f59fSMaxime Chevallier dev->features |= NETIF_F_NTUPLE;
6911da86f59fSMaxime Chevallier }
6912d33ec452SMaxime Chevallier
69137d04b0b1SMatteo Croce if (!port->priv->percpu_pools)
6914d66503c4SMatteo Croce mvpp2_set_hw_csum(port, port->pool_long->id);
6915481e96fcSMatteo Croce else if (port->ntxqs >= num_possible_cpus() * 2)
6916481e96fcSMatteo Croce dev->xdp_features = NETDEV_XDP_ACT_BASIC |
6917481e96fcSMatteo Croce NETDEV_XDP_ACT_REDIRECT |
6918481e96fcSMatteo Croce NETDEV_XDP_ACT_NDO_XMIT;
6919db9d7d36SMaxime Chevallier
6920db9d7d36SMaxime Chevallier dev->vlan_features |= features;
6921ee8b7a11SJakub Kicinski netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
692266c0e13aSMarek Majtyka
6923db9d7d36SMaxime Chevallier dev->priv_flags |= IFF_UNICAST_FLT;
6924db9d7d36SMaxime Chevallier
6925db9d7d36SMaxime Chevallier /* MTU range: 68 - 9704 */
6926db9d7d36SMaxime Chevallier dev->min_mtu = ETH_MIN_MTU;
6927db9d7d36SMaxime Chevallier /* 9704 == 9728 - 20 and rounding to 8 */
6928db9d7d36SMaxime Chevallier dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6929c4053ef3SBaruch Siach dev->dev.of_node = port_node;
6930db9d7d36SMaxime Chevallier
69315a2aba71SJeremy Linton port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
6932d5b16264SRussell King (Oracle) port->pcs_gmac.neg_mode = true;
69335a2aba71SJeremy Linton port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
6934d5b16264SRussell King (Oracle) port->pcs_xlg.neg_mode = true;
69355a2aba71SJeremy Linton
6936dfce1babSMarcin Wojtas if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
693744cc27e4SIoana Ciornei port->phylink_config.dev = &dev->dev;
693844cc27e4SIoana Ciornei port->phylink_config.type = PHYLINK_NETDEV;
69395038ffeaSRussell King (Oracle) port->phylink_config.mac_capabilities =
69405038ffeaSRussell King (Oracle) MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
69415038ffeaSRussell King (Oracle)
69425038ffeaSRussell King (Oracle) if (port->priv->global_tx_fc)
69435038ffeaSRussell King (Oracle) port->phylink_config.mac_capabilities |=
69445038ffeaSRussell King (Oracle) MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
694544cc27e4SIoana Ciornei
69468498e17eSRussell King if (mvpp2_port_supports_xlg(port)) {
69474043ec70SMarek Behún /* If a COMPHY is present, we can support any of
69484043ec70SMarek Behún * the serdes modes and switch between them.
69494043ec70SMarek Behún */
69504043ec70SMarek Behún if (comphy) {
69514043ec70SMarek Behún __set_bit(PHY_INTERFACE_MODE_5GBASER,
69524043ec70SMarek Behún port->phylink_config.supported_interfaces);
69538498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_10GBASER,
69548498e17eSRussell King port->phylink_config.supported_interfaces);
69558498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_XAUI,
69568498e17eSRussell King port->phylink_config.supported_interfaces);
69574043ec70SMarek Behún } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
69584043ec70SMarek Behún __set_bit(PHY_INTERFACE_MODE_5GBASER,
69594043ec70SMarek Behún port->phylink_config.supported_interfaces);
69604043ec70SMarek Behún } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
69614043ec70SMarek Behún __set_bit(PHY_INTERFACE_MODE_10GBASER,
69624043ec70SMarek Behún port->phylink_config.supported_interfaces);
69634043ec70SMarek Behún } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
69644043ec70SMarek Behún __set_bit(PHY_INTERFACE_MODE_XAUI,
69654043ec70SMarek Behún port->phylink_config.supported_interfaces);
69664043ec70SMarek Behún }
69674043ec70SMarek Behún
69684043ec70SMarek Behún if (comphy)
69694043ec70SMarek Behún port->phylink_config.mac_capabilities |=
69704043ec70SMarek Behún MAC_10000FD | MAC_5000FD;
69714043ec70SMarek Behún else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
69724043ec70SMarek Behún port->phylink_config.mac_capabilities |=
69734043ec70SMarek Behún MAC_5000FD;
69744043ec70SMarek Behún else
69755038ffeaSRussell King (Oracle) port->phylink_config.mac_capabilities |=
69765038ffeaSRussell King (Oracle) MAC_10000FD;
69778498e17eSRussell King }
69788498e17eSRussell King
69798498e17eSRussell King if (mvpp2_port_supports_rgmii(port))
69808498e17eSRussell King phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
69818498e17eSRussell King
69828498e17eSRussell King if (comphy) {
69838498e17eSRussell King /* If a COMPHY is present, we can support any of the
69848498e17eSRussell King * serdes modes and switch between them.
69858498e17eSRussell King */
69868498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_SGMII,
69878498e17eSRussell King port->phylink_config.supported_interfaces);
69888498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_1000BASEX,
69898498e17eSRussell King port->phylink_config.supported_interfaces);
69908498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_2500BASEX,
69918498e17eSRussell King port->phylink_config.supported_interfaces);
69928498e17eSRussell King } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
69938498e17eSRussell King /* No COMPHY, with only 2500BASE-X mode supported */
69948498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_2500BASEX,
69958498e17eSRussell King port->phylink_config.supported_interfaces);
69968498e17eSRussell King } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
69978498e17eSRussell King phy_mode == PHY_INTERFACE_MODE_SGMII) {
69988498e17eSRussell King /* No COMPHY, we can switch between 1000BASE-X and SGMII
69998498e17eSRussell King */
70008498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_1000BASEX,
70018498e17eSRussell King port->phylink_config.supported_interfaces);
70028498e17eSRussell King __set_bit(PHY_INTERFACE_MODE_SGMII,
70038498e17eSRussell King port->phylink_config.supported_interfaces);
70048498e17eSRussell King }
70058498e17eSRussell King
700644cc27e4SIoana Ciornei phylink = phylink_create(&port->phylink_config, port_fwnode,
700744cc27e4SIoana Ciornei phy_mode, &mvpp2_phylink_ops);
7008db9d7d36SMaxime Chevallier if (IS_ERR(phylink)) {
7009db9d7d36SMaxime Chevallier err = PTR_ERR(phylink);
7010db9d7d36SMaxime Chevallier goto err_free_port_pcpu;
7011db9d7d36SMaxime Chevallier }
7012db9d7d36SMaxime Chevallier port->phylink = phylink;
7013db9d7d36SMaxime Chevallier } else {
7014dfce1babSMarcin Wojtas dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
7015db9d7d36SMaxime Chevallier port->phylink = NULL;
7016db9d7d36SMaxime Chevallier }
7017db9d7d36SMaxime Chevallier
70186791c102SRussell King /* Cycle the comphy to power it down, saving 270mW per port -
70196791c102SRussell King * don't worry about an error powering it up. When the comphy
70206791c102SRussell King * driver does this, we can remove this code.
70216791c102SRussell King */
70226791c102SRussell King if (port->comphy) {
7023bb7bbb6eSMarek Behún err = mvpp22_comphy_init(port, port->phy_interface);
70246791c102SRussell King if (err == 0)
70256791c102SRussell King phy_power_off(port->comphy);
70266791c102SRussell King }
70276791c102SRussell King
7028db9d7d36SMaxime Chevallier err = register_netdev(dev);
7029db9d7d36SMaxime Chevallier if (err < 0) {
7030db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "failed to register netdev\n");
7031db9d7d36SMaxime Chevallier goto err_phylink;
7032db9d7d36SMaxime Chevallier }
7033db9d7d36SMaxime Chevallier netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7034db9d7d36SMaxime Chevallier
7035db9d7d36SMaxime Chevallier priv->port_list[priv->port_count++] = port;
7036db9d7d36SMaxime Chevallier
7037db9d7d36SMaxime Chevallier return 0;
7038db9d7d36SMaxime Chevallier
7039db9d7d36SMaxime Chevallier err_phylink:
7040db9d7d36SMaxime Chevallier if (port->phylink)
7041db9d7d36SMaxime Chevallier phylink_destroy(port->phylink);
7042db9d7d36SMaxime Chevallier err_free_port_pcpu:
7043db9d7d36SMaxime Chevallier free_percpu(port->pcpu);
7044db9d7d36SMaxime Chevallier err_free_txq_pcpu:
7045db9d7d36SMaxime Chevallier for (i = 0; i < port->ntxqs; i++)
7046db9d7d36SMaxime Chevallier free_percpu(port->txqs[i]->pcpu);
7047db9d7d36SMaxime Chevallier err_free_stats:
7048db9d7d36SMaxime Chevallier free_percpu(port->stats);
7049db9d7d36SMaxime Chevallier err_free_irq:
705089141972SRussell King if (port->port_irq)
705189141972SRussell King irq_dispose_mapping(port->port_irq);
7052db9d7d36SMaxime Chevallier err_deinit_qvecs:
7053db9d7d36SMaxime Chevallier mvpp2_queue_vectors_deinit(port);
7054db9d7d36SMaxime Chevallier err_free_netdev:
7055db9d7d36SMaxime Chevallier free_netdev(dev);
7056db9d7d36SMaxime Chevallier return err;
7057db9d7d36SMaxime Chevallier }
7058db9d7d36SMaxime Chevallier
7059db9d7d36SMaxime Chevallier /* Ports removal routine */
mvpp2_port_remove(struct mvpp2_port * port)7060db9d7d36SMaxime Chevallier static void mvpp2_port_remove(struct mvpp2_port *port)
7061db9d7d36SMaxime Chevallier {
7062db9d7d36SMaxime Chevallier int i;
7063db9d7d36SMaxime Chevallier
7064db9d7d36SMaxime Chevallier unregister_netdev(port->dev);
7065db9d7d36SMaxime Chevallier if (port->phylink)
7066db9d7d36SMaxime Chevallier phylink_destroy(port->phylink);
7067db9d7d36SMaxime Chevallier free_percpu(port->pcpu);
7068db9d7d36SMaxime Chevallier free_percpu(port->stats);
7069db9d7d36SMaxime Chevallier for (i = 0; i < port->ntxqs; i++)
7070db9d7d36SMaxime Chevallier free_percpu(port->txqs[i]->pcpu);
7071db9d7d36SMaxime Chevallier mvpp2_queue_vectors_deinit(port);
707289141972SRussell King if (port->port_irq)
707389141972SRussell King irq_dispose_mapping(port->port_irq);
7074db9d7d36SMaxime Chevallier free_netdev(port->dev);
7075db9d7d36SMaxime Chevallier }
7076db9d7d36SMaxime Chevallier
7077db9d7d36SMaxime Chevallier /* Initialize decoding windows */
mvpp2_conf_mbus_windows(const struct mbus_dram_target_info * dram,struct mvpp2 * priv)7078db9d7d36SMaxime Chevallier static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7079db9d7d36SMaxime Chevallier struct mvpp2 *priv)
7080db9d7d36SMaxime Chevallier {
7081db9d7d36SMaxime Chevallier u32 win_enable;
7082db9d7d36SMaxime Chevallier int i;
7083db9d7d36SMaxime Chevallier
7084db9d7d36SMaxime Chevallier for (i = 0; i < 6; i++) {
7085db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7086db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7087db9d7d36SMaxime Chevallier
7088db9d7d36SMaxime Chevallier if (i < 4)
7089db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7090db9d7d36SMaxime Chevallier }
7091db9d7d36SMaxime Chevallier
7092db9d7d36SMaxime Chevallier win_enable = 0;
7093db9d7d36SMaxime Chevallier
7094db9d7d36SMaxime Chevallier for (i = 0; i < dram->num_cs; i++) {
7095db9d7d36SMaxime Chevallier const struct mbus_dram_window *cs = dram->cs + i;
7096db9d7d36SMaxime Chevallier
7097db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_WIN_BASE(i),
7098db9d7d36SMaxime Chevallier (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7099db9d7d36SMaxime Chevallier dram->mbus_dram_target_id);
7100db9d7d36SMaxime Chevallier
7101db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7102db9d7d36SMaxime Chevallier (cs->size - 1) & 0xffff0000);
7103db9d7d36SMaxime Chevallier
7104db9d7d36SMaxime Chevallier win_enable |= (1 << i);
7105db9d7d36SMaxime Chevallier }
7106db9d7d36SMaxime Chevallier
7107db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7108db9d7d36SMaxime Chevallier }
7109db9d7d36SMaxime Chevallier
7110db9d7d36SMaxime Chevallier /* Initialize Rx FIFO's */
mvpp2_rx_fifo_init(struct mvpp2 * priv)7111db9d7d36SMaxime Chevallier static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7112db9d7d36SMaxime Chevallier {
7113db9d7d36SMaxime Chevallier int port;
7114db9d7d36SMaxime Chevallier
7115db9d7d36SMaxime Chevallier for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7116db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7117db9d7d36SMaxime Chevallier MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7118db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7119db9d7d36SMaxime Chevallier MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
7120db9d7d36SMaxime Chevallier }
7121db9d7d36SMaxime Chevallier
7122db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7123db9d7d36SMaxime Chevallier MVPP2_RX_FIFO_PORT_MIN_PKT);
7124db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7125db9d7d36SMaxime Chevallier }
7126db9d7d36SMaxime Chevallier
mvpp22_rx_fifo_set_hw(struct mvpp2 * priv,int port,int data_size)71279a71baf7SStefan Chulski static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
71289a71baf7SStefan Chulski {
71299a71baf7SStefan Chulski int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
71309a71baf7SStefan Chulski
71319a71baf7SStefan Chulski mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
71329a71baf7SStefan Chulski mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
71339a71baf7SStefan Chulski }
71349a71baf7SStefan Chulski
71356af27a1dSStefan Chulski /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
71369a71baf7SStefan Chulski * 4kB fixed space must be assigned for the loopback port.
71379a71baf7SStefan Chulski * Redistribute remaining avialable 44kB space among all active ports.
71389a71baf7SStefan Chulski * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
71399a71baf7SStefan Chulski * SGMII link.
71409a71baf7SStefan Chulski */
mvpp22_rx_fifo_init(struct mvpp2 * priv)7141db9d7d36SMaxime Chevallier static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
7142db9d7d36SMaxime Chevallier {
71439a71baf7SStefan Chulski int remaining_ports_count;
71449a71baf7SStefan Chulski unsigned long port_map;
71459a71baf7SStefan Chulski int size_remainder;
71469a71baf7SStefan Chulski int port, size;
7147db9d7d36SMaxime Chevallier
71489a71baf7SStefan Chulski /* The loopback requires fixed 4kB of the FIFO space assignment. */
71499a71baf7SStefan Chulski mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7150db9d7d36SMaxime Chevallier MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
71519a71baf7SStefan Chulski port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
71529a71baf7SStefan Chulski
71539a71baf7SStefan Chulski /* Set RX FIFO size to 0 for inactive ports. */
71549a71baf7SStefan Chulski for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
71559a71baf7SStefan Chulski mvpp22_rx_fifo_set_hw(priv, port, 0);
71569a71baf7SStefan Chulski
71579a71baf7SStefan Chulski /* Assign remaining RX FIFO space among all active ports. */
71589a71baf7SStefan Chulski size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
71599a71baf7SStefan Chulski remaining_ports_count = hweight_long(port_map);
71609a71baf7SStefan Chulski
71619a71baf7SStefan Chulski for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
71629a71baf7SStefan Chulski if (remaining_ports_count == 1)
71639a71baf7SStefan Chulski size = size_remainder;
71649a71baf7SStefan Chulski else if (port == 0)
71659a71baf7SStefan Chulski size = max(size_remainder / remaining_ports_count,
71669a71baf7SStefan Chulski MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
71679a71baf7SStefan Chulski else if (port == 1)
71689a71baf7SStefan Chulski size = max(size_remainder / remaining_ports_count,
71699a71baf7SStefan Chulski MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
71709a71baf7SStefan Chulski else
71719a71baf7SStefan Chulski size = size_remainder / remaining_ports_count;
71729a71baf7SStefan Chulski
71739a71baf7SStefan Chulski size_remainder -= size;
71749a71baf7SStefan Chulski remaining_ports_count--;
71759a71baf7SStefan Chulski
71769a71baf7SStefan Chulski mvpp22_rx_fifo_set_hw(priv, port, size);
7177db9d7d36SMaxime Chevallier }
7178db9d7d36SMaxime Chevallier
7179db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7180db9d7d36SMaxime Chevallier MVPP2_RX_FIFO_PORT_MIN_PKT);
7181db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7182db9d7d36SMaxime Chevallier }
7183db9d7d36SMaxime Chevallier
7184aca0e235SStefan Chulski /* Configure Rx FIFO Flow control thresholds */
mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 * priv)7185aca0e235SStefan Chulski static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
7186aca0e235SStefan Chulski {
7187aca0e235SStefan Chulski int port, val;
7188aca0e235SStefan Chulski
7189aca0e235SStefan Chulski /* Port 0: maximum speed -10Gb/s port
7190aca0e235SStefan Chulski * required by spec RX FIFO threshold 9KB
7191aca0e235SStefan Chulski * Port 1: maximum speed -5Gb/s port
7192aca0e235SStefan Chulski * required by spec RX FIFO threshold 4KB
7193aca0e235SStefan Chulski * Port 2: maximum speed -1Gb/s port
7194aca0e235SStefan Chulski * required by spec RX FIFO threshold 2KB
7195aca0e235SStefan Chulski */
7196aca0e235SStefan Chulski
7197aca0e235SStefan Chulski /* Without loopback port */
7198aca0e235SStefan Chulski for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
7199aca0e235SStefan Chulski if (port == 0) {
7200aca0e235SStefan Chulski val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7201aca0e235SStefan Chulski << MVPP2_RX_FC_TRSH_OFFS;
7202aca0e235SStefan Chulski val &= MVPP2_RX_FC_TRSH_MASK;
7203aca0e235SStefan Chulski mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7204aca0e235SStefan Chulski } else if (port == 1) {
7205aca0e235SStefan Chulski val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7206aca0e235SStefan Chulski << MVPP2_RX_FC_TRSH_OFFS;
7207aca0e235SStefan Chulski val &= MVPP2_RX_FC_TRSH_MASK;
7208aca0e235SStefan Chulski mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7209aca0e235SStefan Chulski } else {
7210aca0e235SStefan Chulski val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7211aca0e235SStefan Chulski << MVPP2_RX_FC_TRSH_OFFS;
7212aca0e235SStefan Chulski val &= MVPP2_RX_FC_TRSH_MASK;
7213aca0e235SStefan Chulski mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7214aca0e235SStefan Chulski }
7215aca0e235SStefan Chulski }
7216aca0e235SStefan Chulski }
7217aca0e235SStefan Chulski
7218aca0e235SStefan Chulski /* Configure Rx FIFO Flow control thresholds */
mvpp23_rx_fifo_fc_en(struct mvpp2 * priv,int port,bool en)7219aca0e235SStefan Chulski void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
7220aca0e235SStefan Chulski {
7221aca0e235SStefan Chulski int val;
7222aca0e235SStefan Chulski
7223aca0e235SStefan Chulski val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
7224aca0e235SStefan Chulski
7225aca0e235SStefan Chulski if (en)
7226aca0e235SStefan Chulski val |= MVPP2_RX_FC_EN;
7227aca0e235SStefan Chulski else
7228aca0e235SStefan Chulski val &= ~MVPP2_RX_FC_EN;
7229aca0e235SStefan Chulski
7230aca0e235SStefan Chulski mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7231aca0e235SStefan Chulski }
7232aca0e235SStefan Chulski
mvpp22_tx_fifo_set_hw(struct mvpp2 * priv,int port,int size)72339a71baf7SStefan Chulski static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
72349a71baf7SStefan Chulski {
72359a71baf7SStefan Chulski int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
72369a71baf7SStefan Chulski
72379a71baf7SStefan Chulski mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
72389a71baf7SStefan Chulski mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
72399a71baf7SStefan Chulski }
72409a71baf7SStefan Chulski
72416af27a1dSStefan Chulski /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
72427c294515SStefan Chulski * 1kB fixed space must be assigned for the loopback port.
72437c294515SStefan Chulski * Redistribute remaining avialable 18kB space among all active ports.
72449a71baf7SStefan Chulski * The 10G interface should use 10kB (which is maximum possible size
72459a71baf7SStefan Chulski * per single port).
7246db9d7d36SMaxime Chevallier */
mvpp22_tx_fifo_init(struct mvpp2 * priv)7247db9d7d36SMaxime Chevallier static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
7248db9d7d36SMaxime Chevallier {
72499a71baf7SStefan Chulski int remaining_ports_count;
72509a71baf7SStefan Chulski unsigned long port_map;
72519a71baf7SStefan Chulski int size_remainder;
72529a71baf7SStefan Chulski int port, size;
7253db9d7d36SMaxime Chevallier
72547c294515SStefan Chulski /* The loopback requires fixed 1kB of the FIFO space assignment. */
72559a71baf7SStefan Chulski mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
72567c294515SStefan Chulski MVPP22_TX_FIFO_DATA_SIZE_1KB);
72579a71baf7SStefan Chulski port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
72589a71baf7SStefan Chulski
72599a71baf7SStefan Chulski /* Set TX FIFO size to 0 for inactive ports. */
72609a71baf7SStefan Chulski for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
72619a71baf7SStefan Chulski mvpp22_tx_fifo_set_hw(priv, port, 0);
72629a71baf7SStefan Chulski
72639a71baf7SStefan Chulski /* Assign remaining TX FIFO space among all active ports. */
72647c294515SStefan Chulski size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
72659a71baf7SStefan Chulski remaining_ports_count = hweight_long(port_map);
72669a71baf7SStefan Chulski
72679a71baf7SStefan Chulski for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
72689a71baf7SStefan Chulski if (remaining_ports_count == 1)
72699a71baf7SStefan Chulski size = min(size_remainder,
72709a71baf7SStefan Chulski MVPP22_TX_FIFO_DATA_SIZE_10KB);
72719a71baf7SStefan Chulski else if (port == 0)
7272db9d7d36SMaxime Chevallier size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
72739a71baf7SStefan Chulski else
72749a71baf7SStefan Chulski size = size_remainder / remaining_ports_count;
72759a71baf7SStefan Chulski
72769a71baf7SStefan Chulski size_remainder -= size;
72779a71baf7SStefan Chulski remaining_ports_count--;
72789a71baf7SStefan Chulski
72799a71baf7SStefan Chulski mvpp22_tx_fifo_set_hw(priv, port, size);
7280db9d7d36SMaxime Chevallier }
7281db9d7d36SMaxime Chevallier }
7282db9d7d36SMaxime Chevallier
mvpp2_axi_init(struct mvpp2 * priv)7283db9d7d36SMaxime Chevallier static void mvpp2_axi_init(struct mvpp2 *priv)
7284db9d7d36SMaxime Chevallier {
7285db9d7d36SMaxime Chevallier u32 val, rdval, wrval;
7286db9d7d36SMaxime Chevallier
7287db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7288db9d7d36SMaxime Chevallier
7289db9d7d36SMaxime Chevallier /* AXI Bridge Configuration */
7290db9d7d36SMaxime Chevallier
7291db9d7d36SMaxime Chevallier rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7292db9d7d36SMaxime Chevallier << MVPP22_AXI_ATTR_CACHE_OFFS;
7293db9d7d36SMaxime Chevallier rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7294db9d7d36SMaxime Chevallier << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7295db9d7d36SMaxime Chevallier
7296db9d7d36SMaxime Chevallier wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7297db9d7d36SMaxime Chevallier << MVPP22_AXI_ATTR_CACHE_OFFS;
7298db9d7d36SMaxime Chevallier wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7299db9d7d36SMaxime Chevallier << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7300db9d7d36SMaxime Chevallier
7301db9d7d36SMaxime Chevallier /* BM */
7302db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7303db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7304db9d7d36SMaxime Chevallier
7305db9d7d36SMaxime Chevallier /* Descriptors */
7306db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7307db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7308db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7309db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7310db9d7d36SMaxime Chevallier
7311db9d7d36SMaxime Chevallier /* Buffer Data */
7312db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7313db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7314db9d7d36SMaxime Chevallier
7315db9d7d36SMaxime Chevallier val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7316db9d7d36SMaxime Chevallier << MVPP22_AXI_CODE_CACHE_OFFS;
7317db9d7d36SMaxime Chevallier val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7318db9d7d36SMaxime Chevallier << MVPP22_AXI_CODE_DOMAIN_OFFS;
7319db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7320db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7321db9d7d36SMaxime Chevallier
7322db9d7d36SMaxime Chevallier val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7323db9d7d36SMaxime Chevallier << MVPP22_AXI_CODE_CACHE_OFFS;
7324db9d7d36SMaxime Chevallier val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7325db9d7d36SMaxime Chevallier << MVPP22_AXI_CODE_DOMAIN_OFFS;
7326db9d7d36SMaxime Chevallier
7327db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7328db9d7d36SMaxime Chevallier
7329db9d7d36SMaxime Chevallier val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7330db9d7d36SMaxime Chevallier << MVPP22_AXI_CODE_CACHE_OFFS;
7331db9d7d36SMaxime Chevallier val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7332db9d7d36SMaxime Chevallier << MVPP22_AXI_CODE_DOMAIN_OFFS;
7333db9d7d36SMaxime Chevallier
7334db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7335db9d7d36SMaxime Chevallier }
7336db9d7d36SMaxime Chevallier
7337db9d7d36SMaxime Chevallier /* Initialize network controller common part HW */
mvpp2_init(struct platform_device * pdev,struct mvpp2 * priv)7338db9d7d36SMaxime Chevallier static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7339db9d7d36SMaxime Chevallier {
7340db9d7d36SMaxime Chevallier const struct mbus_dram_target_info *dram_target_info;
7341db9d7d36SMaxime Chevallier int err, i;
7342db9d7d36SMaxime Chevallier u32 val;
7343db9d7d36SMaxime Chevallier
7344db9d7d36SMaxime Chevallier /* MBUS windows configuration */
7345db9d7d36SMaxime Chevallier dram_target_info = mv_mbus_dram_info();
7346db9d7d36SMaxime Chevallier if (dram_target_info)
7347db9d7d36SMaxime Chevallier mvpp2_conf_mbus_windows(dram_target_info, priv);
7348db9d7d36SMaxime Chevallier
7349f704177eSStefan Chulski if (priv->hw_version >= MVPP22)
7350db9d7d36SMaxime Chevallier mvpp2_axi_init(priv);
7351db9d7d36SMaxime Chevallier
7352db9d7d36SMaxime Chevallier /* Disable HW PHY polling */
7353db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21) {
7354db9d7d36SMaxime Chevallier val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7355db9d7d36SMaxime Chevallier val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7356db9d7d36SMaxime Chevallier writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7357db9d7d36SMaxime Chevallier } else {
7358db9d7d36SMaxime Chevallier val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7359db9d7d36SMaxime Chevallier val &= ~MVPP22_SMI_POLLING_EN;
7360db9d7d36SMaxime Chevallier writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7361db9d7d36SMaxime Chevallier }
7362db9d7d36SMaxime Chevallier
7363db9d7d36SMaxime Chevallier /* Allocate and initialize aggregated TXQs */
7364074c74dfSAntoine Tenart priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
7365db9d7d36SMaxime Chevallier sizeof(*priv->aggr_txqs),
7366db9d7d36SMaxime Chevallier GFP_KERNEL);
7367db9d7d36SMaxime Chevallier if (!priv->aggr_txqs)
7368db9d7d36SMaxime Chevallier return -ENOMEM;
7369db9d7d36SMaxime Chevallier
7370074c74dfSAntoine Tenart for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7371db9d7d36SMaxime Chevallier priv->aggr_txqs[i].id = i;
7372db9d7d36SMaxime Chevallier priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7373db9d7d36SMaxime Chevallier err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7374db9d7d36SMaxime Chevallier if (err < 0)
7375db9d7d36SMaxime Chevallier return err;
7376db9d7d36SMaxime Chevallier }
7377db9d7d36SMaxime Chevallier
7378db9d7d36SMaxime Chevallier /* Fifo Init */
7379db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21) {
7380db9d7d36SMaxime Chevallier mvpp2_rx_fifo_init(priv);
7381db9d7d36SMaxime Chevallier } else {
7382db9d7d36SMaxime Chevallier mvpp22_rx_fifo_init(priv);
7383db9d7d36SMaxime Chevallier mvpp22_tx_fifo_init(priv);
7384aca0e235SStefan Chulski if (priv->hw_version == MVPP23)
7385aca0e235SStefan Chulski mvpp23_rx_fifo_fc_set_tresh(priv);
7386db9d7d36SMaxime Chevallier }
7387db9d7d36SMaxime Chevallier
7388db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21)
7389db9d7d36SMaxime Chevallier writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7390db9d7d36SMaxime Chevallier priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7391db9d7d36SMaxime Chevallier
7392db9d7d36SMaxime Chevallier /* Allow cache snoop when transmiting packets */
7393db9d7d36SMaxime Chevallier mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7394db9d7d36SMaxime Chevallier
7395db9d7d36SMaxime Chevallier /* Buffer Manager initialization */
739613616361SMatteo Croce err = mvpp2_bm_init(&pdev->dev, priv);
7397db9d7d36SMaxime Chevallier if (err < 0)
7398db9d7d36SMaxime Chevallier return err;
7399db9d7d36SMaxime Chevallier
7400db9d7d36SMaxime Chevallier /* Parser default initialization */
7401db9d7d36SMaxime Chevallier err = mvpp2_prs_default_init(pdev, priv);
7402db9d7d36SMaxime Chevallier if (err < 0)
7403db9d7d36SMaxime Chevallier return err;
7404db9d7d36SMaxime Chevallier
7405db9d7d36SMaxime Chevallier /* Classifier default initialization */
7406db9d7d36SMaxime Chevallier mvpp2_cls_init(priv);
7407db9d7d36SMaxime Chevallier
7408db9d7d36SMaxime Chevallier return 0;
7409db9d7d36SMaxime Chevallier }
7410db9d7d36SMaxime Chevallier
mvpp2_get_sram(struct platform_device * pdev,struct mvpp2 * priv)7411e54ad1e0SStefan Chulski static int mvpp2_get_sram(struct platform_device *pdev,
7412e54ad1e0SStefan Chulski struct mvpp2 *priv)
7413e54ad1e0SStefan Chulski {
7414e54ad1e0SStefan Chulski struct resource *res;
7415cbe86768SHui Tang void __iomem *base;
7416e54ad1e0SStefan Chulski
7417e54ad1e0SStefan Chulski res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
7418e54ad1e0SStefan Chulski if (!res) {
7419e54ad1e0SStefan Chulski if (has_acpi_companion(&pdev->dev))
7420e54ad1e0SStefan Chulski dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
7421e54ad1e0SStefan Chulski else
7422e54ad1e0SStefan Chulski dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
7423e54ad1e0SStefan Chulski return 0;
7424e54ad1e0SStefan Chulski }
7425e54ad1e0SStefan Chulski
7426cbe86768SHui Tang base = devm_ioremap_resource(&pdev->dev, res);
7427cbe86768SHui Tang if (IS_ERR(base))
7428cbe86768SHui Tang return PTR_ERR(base);
7429e54ad1e0SStefan Chulski
7430cbe86768SHui Tang priv->cm3_base = base;
7431cbe86768SHui Tang return 0;
7432e54ad1e0SStefan Chulski }
7433e54ad1e0SStefan Chulski
mvpp2_probe(struct platform_device * pdev)7434db9d7d36SMaxime Chevallier static int mvpp2_probe(struct platform_device *pdev)
7435db9d7d36SMaxime Chevallier {
7436db9d7d36SMaxime Chevallier struct fwnode_handle *fwnode = pdev->dev.fwnode;
7437db9d7d36SMaxime Chevallier struct fwnode_handle *port_fwnode;
7438db9d7d36SMaxime Chevallier struct mvpp2 *priv;
7439db9d7d36SMaxime Chevallier struct resource *res;
7440db9d7d36SMaxime Chevallier void __iomem *base;
7441e531f767SAntoine Tenart int i, shared;
74429ca5e767SStefan Chulski int err;
7443db9d7d36SMaxime Chevallier
7444db9d7d36SMaxime Chevallier priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7445db9d7d36SMaxime Chevallier if (!priv)
7446db9d7d36SMaxime Chevallier return -ENOMEM;
7447db9d7d36SMaxime Chevallier
7448692b82c5SAndy Shevchenko priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
7449db9d7d36SMaxime Chevallier
74501e27a628SMaxime Chevallier /* multi queue mode isn't supported on PPV2.1, fallback to single
74511e27a628SMaxime Chevallier * mode
74521e27a628SMaxime Chevallier */
74531e27a628SMaxime Chevallier if (priv->hw_version == MVPP21)
74541e27a628SMaxime Chevallier queue_mode = MVPP2_QDIST_SINGLE_MODE;
74551e27a628SMaxime Chevallier
74563230a55bSYueHaibing base = devm_platform_ioremap_resource(pdev, 0);
7457db9d7d36SMaxime Chevallier if (IS_ERR(base))
7458db9d7d36SMaxime Chevallier return PTR_ERR(base);
7459db9d7d36SMaxime Chevallier
7460db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21) {
74613230a55bSYueHaibing priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
7462db9d7d36SMaxime Chevallier if (IS_ERR(priv->lms_base))
7463db9d7d36SMaxime Chevallier return PTR_ERR(priv->lms_base);
7464db9d7d36SMaxime Chevallier } else {
7465db9d7d36SMaxime Chevallier res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
74660bb51a3aSYang Yingliang if (!res) {
74670bb51a3aSYang Yingliang dev_err(&pdev->dev, "Invalid resource\n");
74680bb51a3aSYang Yingliang return -EINVAL;
74690bb51a3aSYang Yingliang }
7470db9d7d36SMaxime Chevallier if (has_acpi_companion(&pdev->dev)) {
7471db9d7d36SMaxime Chevallier /* In case the MDIO memory region is declared in
7472db9d7d36SMaxime Chevallier * the ACPI, it can already appear as 'in-use'
7473db9d7d36SMaxime Chevallier * in the OS. Because it is overlapped by second
7474db9d7d36SMaxime Chevallier * region of the network controller, make
7475db9d7d36SMaxime Chevallier * sure it is released, before requesting it again.
7476db9d7d36SMaxime Chevallier * The care is taken by mvpp2 driver to avoid
7477db9d7d36SMaxime Chevallier * concurrent access to this memory region.
7478db9d7d36SMaxime Chevallier */
7479db9d7d36SMaxime Chevallier release_resource(res);
7480db9d7d36SMaxime Chevallier }
7481db9d7d36SMaxime Chevallier priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7482db9d7d36SMaxime Chevallier if (IS_ERR(priv->iface_base))
7483db9d7d36SMaxime Chevallier return PTR_ERR(priv->iface_base);
7484e54ad1e0SStefan Chulski
7485e54ad1e0SStefan Chulski /* Map CM3 SRAM */
7486e54ad1e0SStefan Chulski err = mvpp2_get_sram(pdev, priv);
7487e54ad1e0SStefan Chulski if (err)
7488e54ad1e0SStefan Chulski dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
7489a59d3542SStefan Chulski
7490a59d3542SStefan Chulski /* Enable global Flow Control only if handler to SRAM not NULL */
7491a59d3542SStefan Chulski if (priv->cm3_base)
7492a59d3542SStefan Chulski priv->global_tx_fc = true;
7493db9d7d36SMaxime Chevallier }
7494db9d7d36SMaxime Chevallier
7495f704177eSStefan Chulski if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
7496db9d7d36SMaxime Chevallier priv->sysctrl_base =
7497db9d7d36SMaxime Chevallier syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7498db9d7d36SMaxime Chevallier "marvell,system-controller");
7499db9d7d36SMaxime Chevallier if (IS_ERR(priv->sysctrl_base))
7500db9d7d36SMaxime Chevallier /* The system controller regmap is optional for dt
7501db9d7d36SMaxime Chevallier * compatibility reasons. When not provided, the
7502db9d7d36SMaxime Chevallier * configuration of the GoP relies on the
7503db9d7d36SMaxime Chevallier * firmware/bootloader.
7504db9d7d36SMaxime Chevallier */
7505db9d7d36SMaxime Chevallier priv->sysctrl_base = NULL;
7506db9d7d36SMaxime Chevallier }
7507db9d7d36SMaxime Chevallier
7508f704177eSStefan Chulski if (priv->hw_version >= MVPP22 &&
75097d04b0b1SMatteo Croce mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
75107d04b0b1SMatteo Croce priv->percpu_pools = 1;
75117d04b0b1SMatteo Croce
7512db9d7d36SMaxime Chevallier mvpp2_setup_bm_pool();
7513db9d7d36SMaxime Chevallier
7514e531f767SAntoine Tenart
7515e531f767SAntoine Tenart priv->nthreads = min_t(unsigned int, num_present_cpus(),
7516e531f767SAntoine Tenart MVPP2_MAX_THREADS);
7517e531f767SAntoine Tenart
7518e531f767SAntoine Tenart shared = num_present_cpus() - priv->nthreads;
7519e531f767SAntoine Tenart if (shared > 0)
7520b83f5ac7SChristophe JAILLET bitmap_set(&priv->lock_map, 0,
7521e531f767SAntoine Tenart min_t(int, shared, MVPP2_MAX_THREADS));
7522e531f767SAntoine Tenart
7523db9d7d36SMaxime Chevallier for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7524db9d7d36SMaxime Chevallier u32 addr_space_sz;
7525db9d7d36SMaxime Chevallier
7526db9d7d36SMaxime Chevallier addr_space_sz = (priv->hw_version == MVPP21 ?
7527db9d7d36SMaxime Chevallier MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7528db9d7d36SMaxime Chevallier priv->swth_base[i] = base + i * addr_space_sz;
7529db9d7d36SMaxime Chevallier }
7530db9d7d36SMaxime Chevallier
7531db9d7d36SMaxime Chevallier if (priv->hw_version == MVPP21)
7532db9d7d36SMaxime Chevallier priv->max_port_rxqs = 8;
7533db9d7d36SMaxime Chevallier else
7534db9d7d36SMaxime Chevallier priv->max_port_rxqs = 32;
7535db9d7d36SMaxime Chevallier
7536db9d7d36SMaxime Chevallier if (dev_of_node(&pdev->dev)) {
7537db9d7d36SMaxime Chevallier priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7538db9d7d36SMaxime Chevallier if (IS_ERR(priv->pp_clk))
7539db9d7d36SMaxime Chevallier return PTR_ERR(priv->pp_clk);
7540db9d7d36SMaxime Chevallier err = clk_prepare_enable(priv->pp_clk);
7541db9d7d36SMaxime Chevallier if (err < 0)
7542db9d7d36SMaxime Chevallier return err;
7543db9d7d36SMaxime Chevallier
7544db9d7d36SMaxime Chevallier priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7545db9d7d36SMaxime Chevallier if (IS_ERR(priv->gop_clk)) {
7546db9d7d36SMaxime Chevallier err = PTR_ERR(priv->gop_clk);
7547db9d7d36SMaxime Chevallier goto err_pp_clk;
7548db9d7d36SMaxime Chevallier }
7549db9d7d36SMaxime Chevallier err = clk_prepare_enable(priv->gop_clk);
7550db9d7d36SMaxime Chevallier if (err < 0)
7551db9d7d36SMaxime Chevallier goto err_pp_clk;
7552db9d7d36SMaxime Chevallier
7553f704177eSStefan Chulski if (priv->hw_version >= MVPP22) {
7554db9d7d36SMaxime Chevallier priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7555db9d7d36SMaxime Chevallier if (IS_ERR(priv->mg_clk)) {
7556db9d7d36SMaxime Chevallier err = PTR_ERR(priv->mg_clk);
7557db9d7d36SMaxime Chevallier goto err_gop_clk;
7558db9d7d36SMaxime Chevallier }
7559db9d7d36SMaxime Chevallier
7560db9d7d36SMaxime Chevallier err = clk_prepare_enable(priv->mg_clk);
7561db9d7d36SMaxime Chevallier if (err < 0)
7562db9d7d36SMaxime Chevallier goto err_gop_clk;
7563db9d7d36SMaxime Chevallier
7564cf3399b7SAndy Shevchenko priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
7565db9d7d36SMaxime Chevallier if (IS_ERR(priv->mg_core_clk)) {
7566cf3399b7SAndy Shevchenko err = PTR_ERR(priv->mg_core_clk);
7567cf3399b7SAndy Shevchenko goto err_mg_clk;
7568cf3399b7SAndy Shevchenko }
7569cf3399b7SAndy Shevchenko
7570db9d7d36SMaxime Chevallier err = clk_prepare_enable(priv->mg_core_clk);
7571db9d7d36SMaxime Chevallier if (err < 0)
7572db9d7d36SMaxime Chevallier goto err_mg_clk;
7573db9d7d36SMaxime Chevallier }
7574db9d7d36SMaxime Chevallier
7575cf3399b7SAndy Shevchenko priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
7576db9d7d36SMaxime Chevallier if (IS_ERR(priv->axi_clk)) {
7577db9d7d36SMaxime Chevallier err = PTR_ERR(priv->axi_clk);
7578db9d7d36SMaxime Chevallier goto err_mg_core_clk;
7579cf3399b7SAndy Shevchenko }
7580cf3399b7SAndy Shevchenko
7581db9d7d36SMaxime Chevallier err = clk_prepare_enable(priv->axi_clk);
7582db9d7d36SMaxime Chevallier if (err < 0)
7583db9d7d36SMaxime Chevallier goto err_mg_core_clk;
7584db9d7d36SMaxime Chevallier
7585db9d7d36SMaxime Chevallier /* Get system's tclk rate */
7586db9d7d36SMaxime Chevallier priv->tclk = clk_get_rate(priv->pp_clk);
758758452555SAndy Shevchenko } else {
758858452555SAndy Shevchenko err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
758958452555SAndy Shevchenko if (err) {
7590db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "missing clock-frequency value\n");
759158452555SAndy Shevchenko return err;
759258452555SAndy Shevchenko }
7593db9d7d36SMaxime Chevallier }
7594db9d7d36SMaxime Chevallier
7595f704177eSStefan Chulski if (priv->hw_version >= MVPP22) {
7596db9d7d36SMaxime Chevallier err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7597db9d7d36SMaxime Chevallier if (err)
7598db9d7d36SMaxime Chevallier goto err_axi_clk;
7599db9d7d36SMaxime Chevallier /* Sadly, the BM pools all share the same register to
7600db9d7d36SMaxime Chevallier * store the high 32 bits of their address. So they
7601db9d7d36SMaxime Chevallier * must all have the same high 32 bits, which forces
7602db9d7d36SMaxime Chevallier * us to restrict coherent memory to DMA_BIT_MASK(32).
7603db9d7d36SMaxime Chevallier */
7604db9d7d36SMaxime Chevallier err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7605db9d7d36SMaxime Chevallier if (err)
7606db9d7d36SMaxime Chevallier goto err_axi_clk;
7607db9d7d36SMaxime Chevallier }
7608db9d7d36SMaxime Chevallier
76099a71baf7SStefan Chulski /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
76109a71baf7SStefan Chulski fwnode_for_each_available_child_node(fwnode, port_fwnode) {
76119a71baf7SStefan Chulski if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
76129a71baf7SStefan Chulski priv->port_map |= BIT(i);
76139a71baf7SStefan Chulski }
76149a71baf7SStefan Chulski
76156af27a1dSStefan Chulski if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
76166af27a1dSStefan Chulski priv->hw_version = MVPP23;
76176af27a1dSStefan Chulski
7618*5b0ae172STobias Waldekranz /* Init locks for shared packet processor resources */
76193bd17fdcSStefan Chulski spin_lock_init(&priv->mss_spinlock);
7620*5b0ae172STobias Waldekranz spin_lock_init(&priv->prs_spinlock);
76213bd17fdcSStefan Chulski
7622db9d7d36SMaxime Chevallier /* Initialize network controller */
7623db9d7d36SMaxime Chevallier err = mvpp2_init(pdev, priv);
7624db9d7d36SMaxime Chevallier if (err < 0) {
7625db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "failed to initialize controller\n");
7626db9d7d36SMaxime Chevallier goto err_axi_clk;
7627db9d7d36SMaxime Chevallier }
7628db9d7d36SMaxime Chevallier
762991dd7195SRussell King err = mvpp22_tai_probe(&pdev->dev, priv);
763091dd7195SRussell King if (err < 0)
763191dd7195SRussell King goto err_axi_clk;
763291dd7195SRussell King
7633db9d7d36SMaxime Chevallier /* Initialize ports */
7634db9d7d36SMaxime Chevallier fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7635db9d7d36SMaxime Chevallier err = mvpp2_port_probe(pdev, port_fwnode, priv);
7636db9d7d36SMaxime Chevallier if (err < 0)
7637db9d7d36SMaxime Chevallier goto err_port_probe;
7638db9d7d36SMaxime Chevallier }
7639db9d7d36SMaxime Chevallier
7640db9d7d36SMaxime Chevallier if (priv->port_count == 0) {
7641db9d7d36SMaxime Chevallier dev_err(&pdev->dev, "no ports enabled\n");
7642db9d7d36SMaxime Chevallier err = -ENODEV;
7643db9d7d36SMaxime Chevallier goto err_axi_clk;
7644db9d7d36SMaxime Chevallier }
7645db9d7d36SMaxime Chevallier
7646db9d7d36SMaxime Chevallier /* Statistics must be gathered regularly because some of them (like
7647db9d7d36SMaxime Chevallier * packets counters) are 32-bit registers and could overflow quite
7648db9d7d36SMaxime Chevallier * quickly. For instance, a 10Gb link used at full bandwidth with the
7649db9d7d36SMaxime Chevallier * smallest packets (64B) will overflow a 32-bit counter in less than
7650db9d7d36SMaxime Chevallier * 30 seconds. Then, use a workqueue to fill 64-bit counters.
7651db9d7d36SMaxime Chevallier */
7652db9d7d36SMaxime Chevallier snprintf(priv->queue_name, sizeof(priv->queue_name),
7653db9d7d36SMaxime Chevallier "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7654db9d7d36SMaxime Chevallier priv->port_count > 1 ? "+" : "");
7655db9d7d36SMaxime Chevallier priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7656db9d7d36SMaxime Chevallier if (!priv->stats_queue) {
7657db9d7d36SMaxime Chevallier err = -ENOMEM;
7658db9d7d36SMaxime Chevallier goto err_port_probe;
7659db9d7d36SMaxime Chevallier }
7660db9d7d36SMaxime Chevallier
7661f704177eSStefan Chulski if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
76629ca5e767SStefan Chulski err = mvpp2_enable_global_fc(priv);
76639ca5e767SStefan Chulski if (err)
76649ca5e767SStefan Chulski dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
7665a59d3542SStefan Chulski }
7666a59d3542SStefan Chulski
766721da57a2SMaxime Chevallier mvpp2_dbgfs_init(priv, pdev->name);
766821da57a2SMaxime Chevallier
7669db9d7d36SMaxime Chevallier platform_set_drvdata(pdev, priv);
7670db9d7d36SMaxime Chevallier return 0;
7671db9d7d36SMaxime Chevallier
7672db9d7d36SMaxime Chevallier err_port_probe:
767371f0891cSAndy Shevchenko fwnode_handle_put(port_fwnode);
767471f0891cSAndy Shevchenko
7675db9d7d36SMaxime Chevallier i = 0;
7676db9d7d36SMaxime Chevallier fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7677db9d7d36SMaxime Chevallier if (priv->port_list[i])
7678db9d7d36SMaxime Chevallier mvpp2_port_remove(priv->port_list[i]);
7679db9d7d36SMaxime Chevallier i++;
7680db9d7d36SMaxime Chevallier }
7681db9d7d36SMaxime Chevallier err_axi_clk:
7682db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->axi_clk);
7683db9d7d36SMaxime Chevallier err_mg_core_clk:
7684db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->mg_core_clk);
7685db9d7d36SMaxime Chevallier err_mg_clk:
7686db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->mg_clk);
7687db9d7d36SMaxime Chevallier err_gop_clk:
7688db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->gop_clk);
7689db9d7d36SMaxime Chevallier err_pp_clk:
7690db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->pp_clk);
7691db9d7d36SMaxime Chevallier return err;
7692db9d7d36SMaxime Chevallier }
7693db9d7d36SMaxime Chevallier
mvpp2_remove(struct platform_device * pdev)7694db9d7d36SMaxime Chevallier static int mvpp2_remove(struct platform_device *pdev)
7695db9d7d36SMaxime Chevallier {
7696db9d7d36SMaxime Chevallier struct mvpp2 *priv = platform_get_drvdata(pdev);
7697db9d7d36SMaxime Chevallier struct fwnode_handle *fwnode = pdev->dev.fwnode;
7698807eaf99SSven Auhagen int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7699db9d7d36SMaxime Chevallier struct fwnode_handle *port_fwnode;
7700db9d7d36SMaxime Chevallier
770121da57a2SMaxime Chevallier mvpp2_dbgfs_cleanup(priv);
770221da57a2SMaxime Chevallier
7703db9d7d36SMaxime Chevallier fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7704db9d7d36SMaxime Chevallier if (priv->port_list[i]) {
7705db9d7d36SMaxime Chevallier mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7706db9d7d36SMaxime Chevallier mvpp2_port_remove(priv->port_list[i]);
7707db9d7d36SMaxime Chevallier }
7708db9d7d36SMaxime Chevallier i++;
7709db9d7d36SMaxime Chevallier }
7710db9d7d36SMaxime Chevallier
7711944a83a2SMatteo Croce destroy_workqueue(priv->stats_queue);
7712944a83a2SMatteo Croce
7713807eaf99SSven Auhagen if (priv->percpu_pools)
7714807eaf99SSven Auhagen poolnum = mvpp2_get_nrxqs(priv) * 2;
7715807eaf99SSven Auhagen
7716807eaf99SSven Auhagen for (i = 0; i < poolnum; i++) {
7717db9d7d36SMaxime Chevallier struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7718db9d7d36SMaxime Chevallier
771913616361SMatteo Croce mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7720db9d7d36SMaxime Chevallier }
7721db9d7d36SMaxime Chevallier
7722074c74dfSAntoine Tenart for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7723db9d7d36SMaxime Chevallier struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7724db9d7d36SMaxime Chevallier
7725db9d7d36SMaxime Chevallier dma_free_coherent(&pdev->dev,
7726db9d7d36SMaxime Chevallier MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7727db9d7d36SMaxime Chevallier aggr_txq->descs,
7728db9d7d36SMaxime Chevallier aggr_txq->descs_dma);
7729db9d7d36SMaxime Chevallier }
7730db9d7d36SMaxime Chevallier
7731db9d7d36SMaxime Chevallier if (is_acpi_node(port_fwnode))
7732db9d7d36SMaxime Chevallier return 0;
7733db9d7d36SMaxime Chevallier
7734db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->axi_clk);
7735db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->mg_core_clk);
7736db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->mg_clk);
7737db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->pp_clk);
7738db9d7d36SMaxime Chevallier clk_disable_unprepare(priv->gop_clk);
7739db9d7d36SMaxime Chevallier
7740db9d7d36SMaxime Chevallier return 0;
7741db9d7d36SMaxime Chevallier }
7742db9d7d36SMaxime Chevallier
7743db9d7d36SMaxime Chevallier static const struct of_device_id mvpp2_match[] = {
7744db9d7d36SMaxime Chevallier {
7745db9d7d36SMaxime Chevallier .compatible = "marvell,armada-375-pp2",
7746db9d7d36SMaxime Chevallier .data = (void *)MVPP21,
7747db9d7d36SMaxime Chevallier },
7748db9d7d36SMaxime Chevallier {
7749db9d7d36SMaxime Chevallier .compatible = "marvell,armada-7k-pp22",
7750db9d7d36SMaxime Chevallier .data = (void *)MVPP22,
7751db9d7d36SMaxime Chevallier },
7752db9d7d36SMaxime Chevallier { }
7753db9d7d36SMaxime Chevallier };
7754db9d7d36SMaxime Chevallier MODULE_DEVICE_TABLE(of, mvpp2_match);
7755db9d7d36SMaxime Chevallier
775636563ce6SAndrew Lunn #ifdef CONFIG_ACPI
7757db9d7d36SMaxime Chevallier static const struct acpi_device_id mvpp2_acpi_match[] = {
7758db9d7d36SMaxime Chevallier { "MRVL0110", MVPP22 },
7759db9d7d36SMaxime Chevallier { },
7760db9d7d36SMaxime Chevallier };
7761db9d7d36SMaxime Chevallier MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
776236563ce6SAndrew Lunn #endif
7763db9d7d36SMaxime Chevallier
7764db9d7d36SMaxime Chevallier static struct platform_driver mvpp2_driver = {
7765db9d7d36SMaxime Chevallier .probe = mvpp2_probe,
7766db9d7d36SMaxime Chevallier .remove = mvpp2_remove,
7767db9d7d36SMaxime Chevallier .driver = {
7768db9d7d36SMaxime Chevallier .name = MVPP2_DRIVER_NAME,
7769db9d7d36SMaxime Chevallier .of_match_table = mvpp2_match,
7770db9d7d36SMaxime Chevallier .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7771db9d7d36SMaxime Chevallier },
7772db9d7d36SMaxime Chevallier };
7773db9d7d36SMaxime Chevallier
mvpp2_driver_init(void)77740152dfeeSRussell King (Oracle) static int __init mvpp2_driver_init(void)
77750152dfeeSRussell King (Oracle) {
77760152dfeeSRussell King (Oracle) return platform_driver_register(&mvpp2_driver);
77770152dfeeSRussell King (Oracle) }
77780152dfeeSRussell King (Oracle) module_init(mvpp2_driver_init);
77790152dfeeSRussell King (Oracle)
mvpp2_driver_exit(void)77800152dfeeSRussell King (Oracle) static void __exit mvpp2_driver_exit(void)
77810152dfeeSRussell King (Oracle) {
77820152dfeeSRussell King (Oracle) platform_driver_unregister(&mvpp2_driver);
77830152dfeeSRussell King (Oracle) mvpp2_dbgfs_exit();
77840152dfeeSRussell King (Oracle) }
77850152dfeeSRussell King (Oracle) module_exit(mvpp2_driver_exit);
7786db9d7d36SMaxime Chevallier
7787db9d7d36SMaxime Chevallier MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7788db9d7d36SMaxime Chevallier MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7789db9d7d36SMaxime Chevallier MODULE_LICENSE("GPL v2");
7790