xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/cgx.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Marvell OcteonTx2 CGX driver
3   *
4   * Copyright (C) 2018 Marvell.
5   *
6   */
7  
8  #include <linux/acpi.h>
9  #include <linux/module.h>
10  #include <linux/interrupt.h>
11  #include <linux/pci.h>
12  #include <linux/netdevice.h>
13  #include <linux/etherdevice.h>
14  #include <linux/ethtool.h>
15  #include <linux/phy.h>
16  #include <linux/of.h>
17  #include <linux/of_mdio.h>
18  #include <linux/of_net.h>
19  
20  #include "cgx.h"
21  #include "rvu.h"
22  #include "lmac_common.h"
23  
24  #define DRV_NAME	"Marvell-CGX/RPM"
25  #define DRV_STRING      "Marvell CGX/RPM Driver"
26  
27  #define CGX_RX_STAT_GLOBAL_INDEX	9
28  
29  static LIST_HEAD(cgx_list);
30  
31  /* Convert firmware speed encoding to user format(Mbps) */
32  static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
33  	[CGX_LINK_NONE] = 0,
34  	[CGX_LINK_10M] = 10,
35  	[CGX_LINK_100M] = 100,
36  	[CGX_LINK_1G] = 1000,
37  	[CGX_LINK_2HG] = 2500,
38  	[CGX_LINK_5G] = 5000,
39  	[CGX_LINK_10G] = 10000,
40  	[CGX_LINK_20G] = 20000,
41  	[CGX_LINK_25G] = 25000,
42  	[CGX_LINK_40G] = 40000,
43  	[CGX_LINK_50G] = 50000,
44  	[CGX_LINK_80G] = 80000,
45  	[CGX_LINK_100G] = 100000,
46  };
47  
48  /* Convert firmware lmac type encoding to string */
49  static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
50  	[LMAC_MODE_SGMII] = "SGMII",
51  	[LMAC_MODE_XAUI] = "XAUI",
52  	[LMAC_MODE_RXAUI] = "RXAUI",
53  	[LMAC_MODE_10G_R] = "10G_R",
54  	[LMAC_MODE_40G_R] = "40G_R",
55  	[LMAC_MODE_QSGMII] = "QSGMII",
56  	[LMAC_MODE_25G_R] = "25G_R",
57  	[LMAC_MODE_50G_R] = "50G_R",
58  	[LMAC_MODE_100G_R] = "100G_R",
59  	[LMAC_MODE_USXGMII] = "USXGMII",
60  	[LMAC_MODE_USGMII] = "USGMII",
61  };
62  
63  /* CGX PHY management internal APIs */
64  static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
65  
66  /* Supported devices */
67  static const struct pci_device_id cgx_id_table[] = {
68  	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
69  	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
70  	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
71  	{ 0, }  /* end of table */
72  };
73  
74  MODULE_DEVICE_TABLE(pci, cgx_id_table);
75  
is_dev_rpm(void * cgxd)76  static bool is_dev_rpm(void *cgxd)
77  {
78  	struct cgx *cgx = cgxd;
79  
80  	return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
81  	       (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
82  }
83  
is_lmac_valid(struct cgx * cgx,int lmac_id)84  bool is_lmac_valid(struct cgx *cgx, int lmac_id)
85  {
86  	if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
87  		return false;
88  	return test_bit(lmac_id, &cgx->lmac_bmap);
89  }
90  
91  /* Helper function to get sequential index
92   * given the enabled LMAC of a CGX
93   */
get_sequence_id_of_lmac(struct cgx * cgx,int lmac_id)94  static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
95  {
96  	int tmp, id = 0;
97  
98  	for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
99  		if (tmp == lmac_id)
100  			break;
101  		id++;
102  	}
103  
104  	return id;
105  }
106  
get_mac_ops(void * cgxd)107  struct mac_ops *get_mac_ops(void *cgxd)
108  {
109  	if (!cgxd)
110  		return cgxd;
111  
112  	return ((struct cgx *)cgxd)->mac_ops;
113  }
114  
cgx_get_fifo_len(void * cgxd)115  u32 cgx_get_fifo_len(void *cgxd)
116  {
117  	return ((struct cgx *)cgxd)->fifo_len;
118  }
119  
cgx_write(struct cgx * cgx,u64 lmac,u64 offset,u64 val)120  void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
121  {
122  	writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
123  	       offset);
124  }
125  
cgx_read(struct cgx * cgx,u64 lmac,u64 offset)126  u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
127  {
128  	return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
129  		     offset);
130  }
131  
lmac_pdata(u8 lmac_id,struct cgx * cgx)132  struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
133  {
134  	if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
135  		return NULL;
136  
137  	return cgx->lmac_idmap[lmac_id];
138  }
139  
cgx_get_cgxcnt_max(void)140  int cgx_get_cgxcnt_max(void)
141  {
142  	struct cgx *cgx_dev;
143  	int idmax = -ENODEV;
144  
145  	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
146  		if (cgx_dev->cgx_id > idmax)
147  			idmax = cgx_dev->cgx_id;
148  
149  	if (idmax < 0)
150  		return 0;
151  
152  	return idmax + 1;
153  }
154  
cgx_get_lmac_cnt(void * cgxd)155  int cgx_get_lmac_cnt(void *cgxd)
156  {
157  	struct cgx *cgx = cgxd;
158  
159  	if (!cgx)
160  		return -ENODEV;
161  
162  	return cgx->lmac_count;
163  }
164  
cgx_get_pdata(int cgx_id)165  void *cgx_get_pdata(int cgx_id)
166  {
167  	struct cgx *cgx_dev;
168  
169  	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
170  		if (cgx_dev->cgx_id == cgx_id)
171  			return cgx_dev;
172  	}
173  	return NULL;
174  }
175  
cgx_lmac_write(int cgx_id,int lmac_id,u64 offset,u64 val)176  void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
177  {
178  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
179  
180  	/* Software must not access disabled LMAC registers */
181  	if (!is_lmac_valid(cgx_dev, lmac_id))
182  		return;
183  	cgx_write(cgx_dev, lmac_id, offset, val);
184  }
185  
cgx_lmac_read(int cgx_id,int lmac_id,u64 offset)186  u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
187  {
188  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
189  
190  	/* Software must not access disabled LMAC registers */
191  	if (!is_lmac_valid(cgx_dev, lmac_id))
192  		return 0;
193  
194  	return cgx_read(cgx_dev, lmac_id, offset);
195  }
196  
cgx_get_cgxid(void * cgxd)197  int cgx_get_cgxid(void *cgxd)
198  {
199  	struct cgx *cgx = cgxd;
200  
201  	if (!cgx)
202  		return -EINVAL;
203  
204  	return cgx->cgx_id;
205  }
206  
cgx_lmac_get_p2x(int cgx_id,int lmac_id)207  u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
208  {
209  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
210  	u64 cfg;
211  
212  	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
213  
214  	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
215  }
216  
cgx_get_nix_resetbit(struct cgx * cgx)217  static u8 cgx_get_nix_resetbit(struct cgx *cgx)
218  {
219  	int first_lmac;
220  	u8 p2x;
221  
222  	/* non 98XX silicons supports only NIX0 block */
223  	if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX)
224  		return CGX_NIX0_RESET;
225  
226  	first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
227  	p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac);
228  
229  	if (p2x == CMR_P2X_SEL_NIX1)
230  		return CGX_NIX1_RESET;
231  	else
232  		return CGX_NIX0_RESET;
233  }
234  
235  /* Ensure the required lock for event queue(where asynchronous events are
236   * posted) is acquired before calling this API. Else an asynchronous event(with
237   * latest link status) can reach the destination before this function returns
238   * and could make the link status appear wrong.
239   */
cgx_get_link_info(void * cgxd,int lmac_id,struct cgx_link_user_info * linfo)240  int cgx_get_link_info(void *cgxd, int lmac_id,
241  		      struct cgx_link_user_info *linfo)
242  {
243  	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
244  
245  	if (!lmac)
246  		return -ENODEV;
247  
248  	*linfo = lmac->link_info;
249  	return 0;
250  }
251  
cgx_lmac_addr_set(u8 cgx_id,u8 lmac_id,u8 * mac_addr)252  int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
253  {
254  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
255  	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
256  	struct mac_ops *mac_ops;
257  	int index, id;
258  	u64 cfg;
259  
260  	if (!lmac)
261  		return -ENODEV;
262  
263  	/* access mac_ops to know csr_offset */
264  	mac_ops = cgx_dev->mac_ops;
265  
266  	/* copy 6bytes from macaddr */
267  	/* memcpy(&cfg, mac_addr, 6); */
268  
269  	cfg = ether_addr_to_u64(mac_addr);
270  
271  	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
272  
273  	index = id * lmac->mac_to_index_bmap.max;
274  
275  	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
276  		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
277  
278  	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
279  	cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
280  		CGX_DMAC_MCAST_MODE);
281  	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
282  
283  	return 0;
284  }
285  
cgx_read_dmac_ctrl(void * cgxd,int lmac_id)286  u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
287  {
288  	struct mac_ops *mac_ops;
289  	struct cgx *cgx = cgxd;
290  
291  	if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
292  		return 0;
293  
294  	cgx = cgxd;
295  	/* Get mac_ops to know csr offset */
296  	mac_ops = cgx->mac_ops;
297  
298  	return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
299  }
300  
cgx_read_dmac_entry(void * cgxd,int index)301  u64 cgx_read_dmac_entry(void *cgxd, int index)
302  {
303  	struct mac_ops *mac_ops;
304  	struct cgx *cgx;
305  
306  	if (!cgxd)
307  		return 0;
308  
309  	cgx = cgxd;
310  	mac_ops = cgx->mac_ops;
311  	return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
312  }
313  
cgx_lmac_addr_add(u8 cgx_id,u8 lmac_id,u8 * mac_addr)314  int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
315  {
316  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
317  	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
318  	struct mac_ops *mac_ops;
319  	int index, idx;
320  	u64 cfg = 0;
321  	int id;
322  
323  	if (!lmac)
324  		return -ENODEV;
325  
326  	mac_ops = cgx_dev->mac_ops;
327  	/* Get available index where entry is to be installed */
328  	idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
329  	if (idx < 0)
330  		return idx;
331  
332  	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
333  
334  	index = id * lmac->mac_to_index_bmap.max + idx;
335  
336  	cfg = ether_addr_to_u64(mac_addr);
337  	cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
338  	cfg |= ((u64)lmac_id << 49);
339  	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
340  
341  	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
342  	cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
343  
344  	if (is_multicast_ether_addr(mac_addr)) {
345  		cfg &= ~GENMASK_ULL(2, 1);
346  		cfg |= CGX_DMAC_MCAST_MODE_CAM;
347  		lmac->mcast_filters_count++;
348  	} else if (!lmac->mcast_filters_count) {
349  		cfg |= CGX_DMAC_MCAST_MODE;
350  	}
351  
352  	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
353  
354  	return idx;
355  }
356  
cgx_lmac_addr_reset(u8 cgx_id,u8 lmac_id)357  int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
358  {
359  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
360  	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
361  	struct mac_ops *mac_ops;
362  	u8 index = 0, id;
363  	u64 cfg;
364  
365  	if (!lmac)
366  		return -ENODEV;
367  
368  	mac_ops = cgx_dev->mac_ops;
369  	/* Restore index 0 to its default init value as done during
370  	 * cgx_lmac_init
371  	 */
372  	set_bit(0, lmac->mac_to_index_bmap.bmap);
373  
374  	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
375  
376  	index = id * lmac->mac_to_index_bmap.max + index;
377  	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
378  
379  	/* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
380  	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
381  	cfg &= ~CGX_DMAC_CAM_ACCEPT;
382  	cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
383  	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
384  
385  	return 0;
386  }
387  
388  /* Allows caller to change macaddress associated with index
389   * in dmac filter table including index 0 reserved for
390   * interface mac address
391   */
cgx_lmac_addr_update(u8 cgx_id,u8 lmac_id,u8 * mac_addr,u8 index)392  int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
393  {
394  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
395  	struct mac_ops *mac_ops;
396  	struct lmac *lmac;
397  	u64 cfg;
398  	int id;
399  
400  	lmac = lmac_pdata(lmac_id, cgx_dev);
401  	if (!lmac)
402  		return -ENODEV;
403  
404  	mac_ops = cgx_dev->mac_ops;
405  	/* Validate the index */
406  	if (index >= lmac->mac_to_index_bmap.max)
407  		return -EINVAL;
408  
409  	/* ensure index is already set */
410  	if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
411  		return -EINVAL;
412  
413  	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
414  
415  	index = id * lmac->mac_to_index_bmap.max + index;
416  
417  	cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
418  	cfg &= ~CGX_RX_DMAC_ADR_MASK;
419  	cfg |= ether_addr_to_u64(mac_addr);
420  
421  	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
422  	return 0;
423  }
424  
cgx_lmac_addr_del(u8 cgx_id,u8 lmac_id,u8 index)425  int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
426  {
427  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
428  	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
429  	struct mac_ops *mac_ops;
430  	u8 mac[ETH_ALEN];
431  	u64 cfg;
432  	int id;
433  
434  	if (!lmac)
435  		return -ENODEV;
436  
437  	mac_ops = cgx_dev->mac_ops;
438  	/* Validate the index */
439  	if (index >= lmac->mac_to_index_bmap.max)
440  		return -EINVAL;
441  
442  	/* Skip deletion for reserved index i.e. index 0 */
443  	if (index == 0)
444  		return 0;
445  
446  	rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
447  
448  	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
449  
450  	index = id * lmac->mac_to_index_bmap.max + index;
451  
452  	/* Read MAC address to check whether it is ucast or mcast */
453  	cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
454  
455  	u64_to_ether_addr(cfg, mac);
456  	if (is_multicast_ether_addr(mac))
457  		lmac->mcast_filters_count--;
458  
459  	if (!lmac->mcast_filters_count) {
460  		cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
461  		cfg &= ~GENMASK_ULL(2, 1);
462  		cfg |= CGX_DMAC_MCAST_MODE;
463  		cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
464  	}
465  
466  	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
467  
468  	return 0;
469  }
470  
cgx_lmac_addr_max_entries_get(u8 cgx_id,u8 lmac_id)471  int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
472  {
473  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
474  	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
475  
476  	if (lmac)
477  		return lmac->mac_to_index_bmap.max;
478  
479  	return 0;
480  }
481  
cgx_lmac_addr_get(u8 cgx_id,u8 lmac_id)482  u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
483  {
484  	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
485  	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
486  	struct mac_ops *mac_ops;
487  	int index;
488  	u64 cfg;
489  	int id;
490  
491  	mac_ops = cgx_dev->mac_ops;
492  
493  	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
494  
495  	index = id * lmac->mac_to_index_bmap.max;
496  
497  	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
498  	return cfg & CGX_RX_DMAC_ADR_MASK;
499  }
500  
cgx_set_pkind(void * cgxd,u8 lmac_id,int pkind)501  int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
502  {
503  	struct cgx *cgx = cgxd;
504  
505  	if (!is_lmac_valid(cgx, lmac_id))
506  		return -ENODEV;
507  
508  	cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
509  	return 0;
510  }
511  
cgx_get_lmac_type(void * cgxd,int lmac_id)512  static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
513  {
514  	struct cgx *cgx = cgxd;
515  	u64 cfg;
516  
517  	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
518  	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
519  }
520  
cgx_get_lmac_fifo_len(void * cgxd,int lmac_id)521  static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
522  {
523  	struct cgx *cgx = cgxd;
524  	u8 num_lmacs;
525  	u32 fifo_len;
526  
527  	fifo_len = cgx->fifo_len;
528  	num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
529  
530  	switch (num_lmacs) {
531  	case 1:
532  		return fifo_len;
533  	case 2:
534  		return fifo_len / 2;
535  	case 3:
536  		/* LMAC0 gets half of the FIFO, reset 1/4th */
537  		if (lmac_id == 0)
538  			return fifo_len / 2;
539  		return fifo_len / 4;
540  	case 4:
541  	default:
542  		return fifo_len / 4;
543  	}
544  	return 0;
545  }
546  
547  /* Configure CGX LMAC in internal loopback mode */
cgx_lmac_internal_loopback(void * cgxd,int lmac_id,bool enable)548  int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
549  {
550  	struct cgx *cgx = cgxd;
551  	struct lmac *lmac;
552  	u64 cfg;
553  
554  	if (!is_lmac_valid(cgx, lmac_id))
555  		return -ENODEV;
556  
557  	lmac = lmac_pdata(lmac_id, cgx);
558  	if (lmac->lmac_type == LMAC_MODE_SGMII ||
559  	    lmac->lmac_type == LMAC_MODE_QSGMII) {
560  		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
561  		if (enable)
562  			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
563  		else
564  			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
565  		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
566  	} else {
567  		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
568  		if (enable)
569  			cfg |= CGXX_SPUX_CONTROL1_LBK;
570  		else
571  			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
572  		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
573  	}
574  	return 0;
575  }
576  
cgx_lmac_promisc_config(int cgx_id,int lmac_id,bool enable)577  void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
578  {
579  	struct cgx *cgx = cgx_get_pdata(cgx_id);
580  	struct lmac *lmac = lmac_pdata(lmac_id, cgx);
581  	struct mac_ops *mac_ops;
582  	u16 max_dmac;
583  	int index, i;
584  	u64 cfg = 0;
585  	int id;
586  
587  	if (!cgx || !lmac)
588  		return;
589  
590  	max_dmac = lmac->mac_to_index_bmap.max;
591  	id = get_sequence_id_of_lmac(cgx, lmac_id);
592  
593  	mac_ops = cgx->mac_ops;
594  	if (enable) {
595  		/* Enable promiscuous mode on LMAC */
596  		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
597  		cfg &= ~CGX_DMAC_CAM_ACCEPT;
598  		cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
599  		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
600  
601  		for (i = 0; i < max_dmac; i++) {
602  			index = id * max_dmac + i;
603  			cfg = cgx_read(cgx, 0,
604  				       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
605  			cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
606  			cgx_write(cgx, 0,
607  				  (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
608  		}
609  	} else {
610  		/* Disable promiscuous mode */
611  		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
612  		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
613  		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
614  		for (i = 0; i < max_dmac; i++) {
615  			index = id * max_dmac + i;
616  			cfg = cgx_read(cgx, 0,
617  				       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
618  			if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
619  				cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
620  				cgx_write(cgx, 0,
621  					  (CGXX_CMRX_RX_DMAC_CAM0 +
622  					   index * 0x8),
623  					  cfg);
624  			}
625  		}
626  	}
627  }
628  
cgx_lmac_get_pause_frm_status(void * cgxd,int lmac_id,u8 * tx_pause,u8 * rx_pause)629  static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
630  					 u8 *tx_pause, u8 *rx_pause)
631  {
632  	struct cgx *cgx = cgxd;
633  	u64 cfg;
634  
635  	if (is_dev_rpm(cgx))
636  		return 0;
637  
638  	if (!is_lmac_valid(cgx, lmac_id))
639  		return -ENODEV;
640  
641  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
642  	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
643  
644  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
645  	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
646  	return 0;
647  }
648  
649  /* Enable or disable forwarding received pause frames to Tx block */
cgx_lmac_enadis_rx_pause_fwding(void * cgxd,int lmac_id,bool enable)650  void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
651  {
652  	struct cgx *cgx = cgxd;
653  	u8 rx_pause, tx_pause;
654  	bool is_pfc_enabled;
655  	struct lmac *lmac;
656  	u64 cfg;
657  
658  	if (!cgx)
659  		return;
660  
661  	lmac = lmac_pdata(lmac_id, cgx);
662  	if (!lmac)
663  		return;
664  
665  	/* Pause frames are not enabled just return */
666  	if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
667  		return;
668  
669  	cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
670  	is_pfc_enabled = rx_pause ? false : true;
671  
672  	if (enable) {
673  		if (!is_pfc_enabled) {
674  			cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
675  			cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
676  			cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
677  
678  			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
679  			cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
680  			cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
681  		} else {
682  			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
683  			cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
684  			cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
685  		}
686  	} else {
687  
688  		if (!is_pfc_enabled) {
689  			cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
690  			cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
691  			cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
692  
693  			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
694  			cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
695  			cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
696  		} else {
697  			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
698  			cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
699  			cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
700  		}
701  	}
702  }
703  
cgx_get_rx_stats(void * cgxd,int lmac_id,int idx,u64 * rx_stat)704  int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
705  {
706  	struct cgx *cgx = cgxd;
707  
708  	if (!is_lmac_valid(cgx, lmac_id))
709  		return -ENODEV;
710  	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
711  	return 0;
712  }
713  
cgx_get_tx_stats(void * cgxd,int lmac_id,int idx,u64 * tx_stat)714  int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
715  {
716  	struct cgx *cgx = cgxd;
717  
718  	if (!is_lmac_valid(cgx, lmac_id))
719  		return -ENODEV;
720  	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
721  	return 0;
722  }
723  
cgx_features_get(void * cgxd)724  u64 cgx_features_get(void *cgxd)
725  {
726  	return ((struct cgx *)cgxd)->hw_features;
727  }
728  
cgx_stats_reset(void * cgxd,int lmac_id)729  int cgx_stats_reset(void *cgxd, int lmac_id)
730  {
731  	struct cgx *cgx = cgxd;
732  	int stat_id;
733  
734  	if (!is_lmac_valid(cgx, lmac_id))
735  		return -ENODEV;
736  
737  	for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
738  		if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
739  		/* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
740  			cgx_write(cgx, 0,
741  				  (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
742  		else
743  			cgx_write(cgx, lmac_id,
744  				  (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
745  	}
746  
747  	for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
748  		cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
749  
750  	return 0;
751  }
752  
cgx_set_fec_stats_count(struct cgx_link_user_info * linfo)753  static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
754  {
755  	if (!linfo->fec)
756  		return 0;
757  
758  	switch (linfo->lmac_type_id) {
759  	case LMAC_MODE_SGMII:
760  	case LMAC_MODE_XAUI:
761  	case LMAC_MODE_RXAUI:
762  	case LMAC_MODE_QSGMII:
763  		return 0;
764  	case LMAC_MODE_10G_R:
765  	case LMAC_MODE_25G_R:
766  	case LMAC_MODE_100G_R:
767  	case LMAC_MODE_USXGMII:
768  		return 1;
769  	case LMAC_MODE_40G_R:
770  		return 4;
771  	case LMAC_MODE_50G_R:
772  		if (linfo->fec == OTX2_FEC_BASER)
773  			return 2;
774  		else
775  			return 1;
776  	default:
777  		return 0;
778  	}
779  }
780  
cgx_get_fec_stats(void * cgxd,int lmac_id,struct cgx_fec_stats_rsp * rsp)781  int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
782  {
783  	int stats, fec_stats_count = 0;
784  	int corr_reg, uncorr_reg;
785  	struct cgx *cgx = cgxd;
786  
787  	if (!is_lmac_valid(cgx, lmac_id))
788  		return -ENODEV;
789  
790  	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
791  		return 0;
792  
793  	fec_stats_count =
794  		cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
795  	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
796  		corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
797  		uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
798  	} else {
799  		corr_reg = CGXX_SPUX_RSFEC_CORR;
800  		uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
801  	}
802  	for (stats = 0; stats < fec_stats_count; stats++) {
803  		rsp->fec_corr_blks +=
804  			cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
805  		rsp->fec_uncorr_blks +=
806  			cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
807  	}
808  	return 0;
809  }
810  
cgx_lmac_rx_tx_enable(void * cgxd,int lmac_id,bool enable)811  int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
812  {
813  	struct cgx *cgx = cgxd;
814  	u64 cfg;
815  
816  	if (!is_lmac_valid(cgx, lmac_id))
817  		return -ENODEV;
818  
819  	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
820  	if (enable)
821  		cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
822  	else
823  		cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
824  	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
825  	return 0;
826  }
827  
cgx_lmac_tx_enable(void * cgxd,int lmac_id,bool enable)828  int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
829  {
830  	struct cgx *cgx = cgxd;
831  	u64 cfg, last;
832  
833  	if (!is_lmac_valid(cgx, lmac_id))
834  		return -ENODEV;
835  
836  	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
837  	last = cfg;
838  	if (enable)
839  		cfg |= DATA_PKT_TX_EN;
840  	else
841  		cfg &= ~DATA_PKT_TX_EN;
842  
843  	if (cfg != last)
844  		cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
845  	return !!(last & DATA_PKT_TX_EN);
846  }
847  
cgx_lmac_enadis_pause_frm(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause)848  static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
849  				     u8 tx_pause, u8 rx_pause)
850  {
851  	struct cgx *cgx = cgxd;
852  	u64 cfg;
853  
854  	if (is_dev_rpm(cgx))
855  		return 0;
856  
857  	if (!is_lmac_valid(cgx, lmac_id))
858  		return -ENODEV;
859  
860  	cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
861  	cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
862  	cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
863  	cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
864  
865  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
866  	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
867  	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
868  	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
869  
870  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
871  	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
872  	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
873  	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
874  
875  	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
876  	if (tx_pause) {
877  		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
878  	} else {
879  		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
880  		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
881  	}
882  	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
883  	return 0;
884  }
885  
cgx_lmac_pause_frm_config(void * cgxd,int lmac_id,bool enable)886  static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
887  {
888  	struct cgx *cgx = cgxd;
889  	u64 cfg;
890  
891  	if (!is_lmac_valid(cgx, lmac_id))
892  		return;
893  
894  	if (enable) {
895  		/* Set pause time and interval */
896  		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
897  			  DEFAULT_PAUSE_TIME);
898  		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
899  		cfg &= ~0xFFFFULL;
900  		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
901  			  cfg | (DEFAULT_PAUSE_TIME / 2));
902  
903  		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
904  			  DEFAULT_PAUSE_TIME);
905  
906  		cfg = cgx_read(cgx, lmac_id,
907  			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
908  		cfg &= ~0xFFFFULL;
909  		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
910  			  cfg | (DEFAULT_PAUSE_TIME / 2));
911  	}
912  
913  	/* ALL pause frames received are completely ignored */
914  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
915  	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
916  	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
917  
918  	cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
919  	cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
920  	cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
921  
922  	/* Disable pause frames transmission */
923  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
924  	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
925  	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
926  
927  	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
928  	cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
929  	cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
930  	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
931  
932  	/* Disable all PFC classes by default */
933  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
934  	cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
935  	cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
936  }
937  
verify_lmac_fc_cfg(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause,int pfvf_idx)938  int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
939  		       int pfvf_idx)
940  {
941  	struct cgx *cgx = cgxd;
942  	struct lmac *lmac;
943  
944  	lmac = lmac_pdata(lmac_id, cgx);
945  	if (!lmac)
946  		return -ENODEV;
947  
948  	if (!rx_pause)
949  		clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
950  	else
951  		set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
952  
953  	if (!tx_pause)
954  		clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
955  	else
956  		set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
957  
958  	/* check if other pfvfs are using flow control */
959  	if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
960  		dev_warn(&cgx->pdev->dev,
961  			 "Receive Flow control disable not permitted as its used by other PFVFs\n");
962  		return -EPERM;
963  	}
964  
965  	if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
966  		dev_warn(&cgx->pdev->dev,
967  			 "Transmit Flow control disable not permitted as its used by other PFVFs\n");
968  		return -EPERM;
969  	}
970  
971  	return 0;
972  }
973  
cgx_lmac_pfc_config(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause,u16 pfc_en)974  int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
975  			u8 rx_pause, u16 pfc_en)
976  {
977  	struct cgx *cgx = cgxd;
978  	u64 cfg;
979  
980  	if (!is_lmac_valid(cgx, lmac_id))
981  		return -ENODEV;
982  
983  	/* Return as no traffic classes are requested */
984  	if (tx_pause && !pfc_en)
985  		return 0;
986  
987  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
988  	pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
989  
990  	if (rx_pause) {
991  		cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
992  			CGXX_SMUX_CBFC_CTL_BCK_EN |
993  			CGXX_SMUX_CBFC_CTL_DRP_EN);
994  	} else {
995  		cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
996  			CGXX_SMUX_CBFC_CTL_BCK_EN |
997  			CGXX_SMUX_CBFC_CTL_DRP_EN);
998  	}
999  
1000  	if (tx_pause) {
1001  		cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
1002  		cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
1003  	} else {
1004  		cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
1005  		cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
1006  	}
1007  
1008  	cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
1009  
1010  	/* Write source MAC address which will be filled into PFC packet */
1011  	cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
1012  	cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
1013  
1014  	return 0;
1015  }
1016  
cgx_lmac_get_pfc_frm_cfg(void * cgxd,int lmac_id,u8 * tx_pause,u8 * rx_pause)1017  int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
1018  			     u8 *rx_pause)
1019  {
1020  	struct cgx *cgx = cgxd;
1021  	u64 cfg;
1022  
1023  	if (!is_lmac_valid(cgx, lmac_id))
1024  		return -ENODEV;
1025  
1026  	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
1027  
1028  	*rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
1029  	*tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
1030  
1031  	return 0;
1032  }
1033  
cgx_lmac_ptp_config(void * cgxd,int lmac_id,bool enable)1034  void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
1035  {
1036  	struct cgx *cgx = cgxd;
1037  	u64 cfg;
1038  
1039  	if (!cgx)
1040  		return;
1041  
1042  	if (enable) {
1043  		/* Enable inbound PTP timestamping */
1044  		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
1045  		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1046  		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1047  
1048  		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1049  		cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1050  		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
1051  	} else {
1052  		/* Disable inbound PTP stamping */
1053  		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
1054  		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1055  		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1056  
1057  		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1058  		cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1059  		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1060  	}
1061  }
1062  
1063  /* CGX Firmware interface low level support */
cgx_fwi_cmd_send(u64 req,u64 * resp,struct lmac * lmac)1064  int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
1065  {
1066  	struct cgx *cgx = lmac->cgx;
1067  	struct device *dev;
1068  	int err = 0;
1069  	u64 cmd;
1070  
1071  	/* Ensure no other command is in progress */
1072  	err = mutex_lock_interruptible(&lmac->cmd_lock);
1073  	if (err)
1074  		return err;
1075  
1076  	/* Ensure command register is free */
1077  	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
1078  	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
1079  		err = -EBUSY;
1080  		goto unlock;
1081  	}
1082  
1083  	/* Update ownership in command request */
1084  	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
1085  
1086  	/* Mark this lmac as pending, before we start */
1087  	lmac->cmd_pend = true;
1088  
1089  	/* Start command in hardware */
1090  	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1091  
1092  	/* Ensure command is completed without errors */
1093  	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1094  				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1095  		dev = &cgx->pdev->dev;
1096  		dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
1097  			cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
1098  		err = LMAC_AF_ERR_CMD_TIMEOUT;
1099  		goto unlock;
1100  	}
1101  
1102  	/* we have a valid command response */
1103  	smp_rmb(); /* Ensure the latest updates are visible */
1104  	*resp = lmac->resp;
1105  
1106  unlock:
1107  	mutex_unlock(&lmac->cmd_lock);
1108  
1109  	return err;
1110  }
1111  
cgx_fwi_cmd_generic(u64 req,u64 * resp,struct cgx * cgx,int lmac_id)1112  int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1113  {
1114  	struct lmac *lmac;
1115  	int err;
1116  
1117  	lmac = lmac_pdata(lmac_id, cgx);
1118  	if (!lmac)
1119  		return -ENODEV;
1120  
1121  	err = cgx_fwi_cmd_send(req, resp, lmac);
1122  
1123  	/* Check for valid response */
1124  	if (!err) {
1125  		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1126  			return -EIO;
1127  		else
1128  			return 0;
1129  	}
1130  
1131  	return err;
1132  }
1133  
cgx_link_usertable_index_map(int speed)1134  static int cgx_link_usertable_index_map(int speed)
1135  {
1136  	switch (speed) {
1137  	case SPEED_10:
1138  		return CGX_LINK_10M;
1139  	case SPEED_100:
1140  		return CGX_LINK_100M;
1141  	case SPEED_1000:
1142  		return CGX_LINK_1G;
1143  	case SPEED_2500:
1144  		return CGX_LINK_2HG;
1145  	case SPEED_5000:
1146  		return CGX_LINK_5G;
1147  	case SPEED_10000:
1148  		return CGX_LINK_10G;
1149  	case SPEED_20000:
1150  		return CGX_LINK_20G;
1151  	case SPEED_25000:
1152  		return CGX_LINK_25G;
1153  	case SPEED_40000:
1154  		return CGX_LINK_40G;
1155  	case SPEED_50000:
1156  		return CGX_LINK_50G;
1157  	case 80000:
1158  		return CGX_LINK_80G;
1159  	case SPEED_100000:
1160  		return CGX_LINK_100G;
1161  	case SPEED_UNKNOWN:
1162  		return CGX_LINK_NONE;
1163  	}
1164  	return CGX_LINK_NONE;
1165  }
1166  
set_mod_args(struct cgx_set_link_mode_args * args,u32 speed,u8 duplex,u8 autoneg,u64 mode)1167  static void set_mod_args(struct cgx_set_link_mode_args *args,
1168  			 u32 speed, u8 duplex, u8 autoneg, u64 mode)
1169  {
1170  	/* Fill default values incase of user did not pass
1171  	 * valid parameters
1172  	 */
1173  	if (args->duplex == DUPLEX_UNKNOWN)
1174  		args->duplex = duplex;
1175  	if (args->speed == SPEED_UNKNOWN)
1176  		args->speed = speed;
1177  	if (args->an == AUTONEG_UNKNOWN)
1178  		args->an = autoneg;
1179  	args->mode = mode;
1180  	args->ports = 0;
1181  }
1182  
otx2_map_ethtool_link_modes(u64 bitmask,struct cgx_set_link_mode_args * args)1183  static void otx2_map_ethtool_link_modes(u64 bitmask,
1184  					struct cgx_set_link_mode_args *args)
1185  {
1186  	switch (bitmask) {
1187  	case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1188  		set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1189  		break;
1190  	case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1191  		set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1192  		break;
1193  	case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1194  		set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1195  		break;
1196  	case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1197  		set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1198  		break;
1199  	case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1200  		set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1201  		break;
1202  	case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1203  		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1204  		break;
1205  	case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1206  		set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1207  		break;
1208  	case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1209  		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1210  		break;
1211  	case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1212  		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1213  		break;
1214  	case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1215  		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1216  		break;
1217  	case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1218  		set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1219  		break;
1220  	case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1221  		set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1222  		break;
1223  	case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1224  		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1225  		break;
1226  	case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1227  		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1228  		break;
1229  	case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1230  		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1231  		break;
1232  	case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1233  		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1234  		break;
1235  	case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1236  		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1237  		break;
1238  	case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1239  		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1240  		break;
1241  	case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1242  		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1243  		break;
1244  	case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1245  		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1246  		break;
1247  	case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1248  		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1249  		break;
1250  	case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1251  		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1252  		break;
1253  	case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1254  		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1255  		break;
1256  	case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1257  		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1258  		break;
1259  	case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1260  		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1261  		break;
1262  	case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1263  		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1264  		break;
1265  	default:
1266  		set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1267  		break;
1268  	}
1269  }
1270  
link_status_user_format(u64 lstat,struct cgx_link_user_info * linfo,struct cgx * cgx,u8 lmac_id)1271  static inline void link_status_user_format(u64 lstat,
1272  					   struct cgx_link_user_info *linfo,
1273  					   struct cgx *cgx, u8 lmac_id)
1274  {
1275  	const char *lmac_string;
1276  
1277  	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1278  	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1279  	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1280  	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1281  	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1282  	linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
1283  
1284  	if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
1285  		dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
1286  			linfo->lmac_type_id, cgx->cgx_id, lmac_id);
1287  		strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
1288  		return;
1289  	}
1290  
1291  	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1292  	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1293  }
1294  
1295  /* Hardware event handlers */
cgx_link_change_handler(u64 lstat,struct lmac * lmac)1296  static inline void cgx_link_change_handler(u64 lstat,
1297  					   struct lmac *lmac)
1298  {
1299  	struct cgx_link_user_info *linfo;
1300  	struct cgx *cgx = lmac->cgx;
1301  	struct cgx_link_event event;
1302  	struct device *dev;
1303  	int err_type;
1304  
1305  	dev = &cgx->pdev->dev;
1306  
1307  	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1308  	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1309  
1310  	event.cgx_id = cgx->cgx_id;
1311  	event.lmac_id = lmac->lmac_id;
1312  
1313  	/* update the local copy of link status */
1314  	lmac->link_info = event.link_uinfo;
1315  	linfo = &lmac->link_info;
1316  
1317  	if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1318  		return;
1319  
1320  	/* Ensure callback doesn't get unregistered until we finish it */
1321  	spin_lock(&lmac->event_cb_lock);
1322  
1323  	if (!lmac->event_cb.notify_link_chg) {
1324  		dev_dbg(dev, "cgx port %d:%d Link change handler null",
1325  			cgx->cgx_id, lmac->lmac_id);
1326  		if (err_type != CGX_ERR_NONE) {
1327  			dev_err(dev, "cgx port %d:%d Link error %d\n",
1328  				cgx->cgx_id, lmac->lmac_id, err_type);
1329  		}
1330  		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1331  			 cgx->cgx_id, lmac->lmac_id,
1332  			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
1333  		goto err;
1334  	}
1335  
1336  	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1337  		dev_err(dev, "event notification failure\n");
1338  err:
1339  	spin_unlock(&lmac->event_cb_lock);
1340  }
1341  
cgx_cmdresp_is_linkevent(u64 event)1342  static inline bool cgx_cmdresp_is_linkevent(u64 event)
1343  {
1344  	u8 id;
1345  
1346  	id = FIELD_GET(EVTREG_ID, event);
1347  	if (id == CGX_CMD_LINK_BRING_UP ||
1348  	    id == CGX_CMD_LINK_BRING_DOWN ||
1349  	    id == CGX_CMD_MODE_CHANGE)
1350  		return true;
1351  	else
1352  		return false;
1353  }
1354  
cgx_event_is_linkevent(u64 event)1355  static inline bool cgx_event_is_linkevent(u64 event)
1356  {
1357  	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1358  		return true;
1359  	else
1360  		return false;
1361  }
1362  
cgx_fwi_event_handler(int irq,void * data)1363  static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1364  {
1365  	u64 event, offset, clear_bit;
1366  	struct lmac *lmac = data;
1367  	struct cgx *cgx;
1368  
1369  	cgx = lmac->cgx;
1370  
1371  	/* Clear SW_INT for RPM and CMR_INT for CGX */
1372  	offset     = cgx->mac_ops->int_register;
1373  	clear_bit  = cgx->mac_ops->int_ena_bit;
1374  
1375  	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1376  
1377  	if (!FIELD_GET(EVTREG_ACK, event))
1378  		return IRQ_NONE;
1379  
1380  	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1381  	case CGX_EVT_CMD_RESP:
1382  		/* Copy the response. Since only one command is active at a
1383  		 * time, there is no way a response can get overwritten
1384  		 */
1385  		lmac->resp = event;
1386  		/* Ensure response is updated before thread context starts */
1387  		smp_wmb();
1388  
1389  		/* There wont be separate events for link change initiated from
1390  		 * software; Hence report the command responses as events
1391  		 */
1392  		if (cgx_cmdresp_is_linkevent(event))
1393  			cgx_link_change_handler(event, lmac);
1394  
1395  		/* Release thread waiting for completion  */
1396  		lmac->cmd_pend = false;
1397  		wake_up(&lmac->wq_cmd_cmplt);
1398  		break;
1399  	case CGX_EVT_ASYNC:
1400  		if (cgx_event_is_linkevent(event))
1401  			cgx_link_change_handler(event, lmac);
1402  		break;
1403  	}
1404  
1405  	/* Any new event or command response will be posted by firmware
1406  	 * only after the current status is acked.
1407  	 * Ack the interrupt register as well.
1408  	 */
1409  	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1410  	cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1411  
1412  	return IRQ_HANDLED;
1413  }
1414  
1415  /* APIs for PHY management using CGX firmware interface */
1416  
1417  /* callback registration for hardware events like link change */
cgx_lmac_evh_register(struct cgx_event_cb * cb,void * cgxd,int lmac_id)1418  int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1419  {
1420  	struct cgx *cgx = cgxd;
1421  	struct lmac *lmac;
1422  
1423  	lmac = lmac_pdata(lmac_id, cgx);
1424  	if (!lmac)
1425  		return -ENODEV;
1426  
1427  	lmac->event_cb = *cb;
1428  
1429  	return 0;
1430  }
1431  
cgx_lmac_evh_unregister(void * cgxd,int lmac_id)1432  int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1433  {
1434  	struct lmac *lmac;
1435  	unsigned long flags;
1436  	struct cgx *cgx = cgxd;
1437  
1438  	lmac = lmac_pdata(lmac_id, cgx);
1439  	if (!lmac)
1440  		return -ENODEV;
1441  
1442  	spin_lock_irqsave(&lmac->event_cb_lock, flags);
1443  	lmac->event_cb.notify_link_chg = NULL;
1444  	lmac->event_cb.data = NULL;
1445  	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1446  
1447  	return 0;
1448  }
1449  
cgx_get_fwdata_base(u64 * base)1450  int cgx_get_fwdata_base(u64 *base)
1451  {
1452  	u64 req = 0, resp;
1453  	struct cgx *cgx;
1454  	int first_lmac;
1455  	int err;
1456  
1457  	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1458  	if (!cgx)
1459  		return -ENXIO;
1460  
1461  	first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1462  	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1463  	err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1464  	if (!err)
1465  		*base = FIELD_GET(RESP_FWD_BASE, resp);
1466  
1467  	return err;
1468  }
1469  
cgx_set_link_mode(void * cgxd,struct cgx_set_link_mode_args args,int cgx_id,int lmac_id)1470  int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1471  		      int cgx_id, int lmac_id)
1472  {
1473  	struct cgx *cgx = cgxd;
1474  	u64 req = 0, resp;
1475  
1476  	if (!cgx)
1477  		return -ENODEV;
1478  
1479  	if (args.mode)
1480  		otx2_map_ethtool_link_modes(args.mode, &args);
1481  	if (!args.speed && args.duplex && !args.an)
1482  		return -EINVAL;
1483  
1484  	req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1485  	req = FIELD_SET(CMDMODECHANGE_SPEED,
1486  			cgx_link_usertable_index_map(args.speed), req);
1487  	req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1488  	req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1489  	req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1490  	req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1491  
1492  	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1493  }
cgx_set_fec(u64 fec,int cgx_id,int lmac_id)1494  int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1495  {
1496  	u64 req = 0, resp;
1497  	struct cgx *cgx;
1498  	int err = 0;
1499  
1500  	cgx = cgx_get_pdata(cgx_id);
1501  	if (!cgx)
1502  		return -ENXIO;
1503  
1504  	req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1505  	req = FIELD_SET(CMDSETFEC, fec, req);
1506  	err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1507  	if (err)
1508  		return err;
1509  
1510  	cgx->lmac_idmap[lmac_id]->link_info.fec =
1511  			FIELD_GET(RESP_LINKSTAT_FEC, resp);
1512  	return cgx->lmac_idmap[lmac_id]->link_info.fec;
1513  }
1514  
cgx_get_phy_fec_stats(void * cgxd,int lmac_id)1515  int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1516  {
1517  	struct cgx *cgx = cgxd;
1518  	u64 req = 0, resp;
1519  
1520  	if (!cgx)
1521  		return -ENODEV;
1522  
1523  	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1524  	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1525  }
1526  
cgx_fwi_link_change(struct cgx * cgx,int lmac_id,bool enable)1527  static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1528  {
1529  	u64 req = 0;
1530  	u64 resp;
1531  
1532  	if (enable) {
1533  		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1534  		/* On CN10K firmware offloads link bring up/down operations to ECP
1535  		 * On Octeontx2 link operations are handled by firmware itself
1536  		 * which can cause mbox errors so configure maximum time firmware
1537  		 * poll for Link as 1000 ms
1538  		 */
1539  		if (!is_dev_rpm(cgx))
1540  			req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
1541  
1542  	} else {
1543  		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1544  	}
1545  	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1546  }
1547  
cgx_fwi_read_version(u64 * resp,struct cgx * cgx)1548  static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1549  {
1550  	int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1551  	u64 req = 0;
1552  
1553  	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1554  	return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1555  }
1556  
cgx_lmac_verify_fwi_version(struct cgx * cgx)1557  static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1558  {
1559  	struct device *dev = &cgx->pdev->dev;
1560  	int major_ver, minor_ver;
1561  	u64 resp;
1562  	int err;
1563  
1564  	if (!cgx->lmac_count)
1565  		return 0;
1566  
1567  	err = cgx_fwi_read_version(&resp, cgx);
1568  	if (err)
1569  		return err;
1570  
1571  	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1572  	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1573  	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1574  		major_ver, minor_ver);
1575  	if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1576  		return -EIO;
1577  	else
1578  		return 0;
1579  }
1580  
cgx_lmac_linkup_work(struct work_struct * work)1581  static void cgx_lmac_linkup_work(struct work_struct *work)
1582  {
1583  	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1584  	struct device *dev = &cgx->pdev->dev;
1585  	int i, err;
1586  
1587  	/* Do Link up for all the enabled lmacs */
1588  	for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1589  		err = cgx_fwi_link_change(cgx, i, true);
1590  		if (err)
1591  			dev_info(dev, "cgx port %d:%d Link up command failed\n",
1592  				 cgx->cgx_id, i);
1593  	}
1594  }
1595  
cgx_lmac_linkup_start(void * cgxd)1596  int cgx_lmac_linkup_start(void *cgxd)
1597  {
1598  	struct cgx *cgx = cgxd;
1599  
1600  	if (!cgx)
1601  		return -ENODEV;
1602  
1603  	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1604  
1605  	return 0;
1606  }
1607  
cgx_lmac_reset(void * cgxd,int lmac_id,u8 pf_req_flr)1608  int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
1609  {
1610  	struct cgx *cgx = cgxd;
1611  	u64 cfg;
1612  
1613  	if (!is_lmac_valid(cgx, lmac_id))
1614  		return -ENODEV;
1615  
1616  	/* Resetting PFC related CSRs */
1617  	cfg = 0xff;
1618  	cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
1619  
1620  	if (pf_req_flr)
1621  		cgx_lmac_internal_loopback(cgxd, lmac_id, false);
1622  	return 0;
1623  }
1624  
cgx_configure_interrupt(struct cgx * cgx,struct lmac * lmac,int cnt,bool req_free)1625  static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1626  				   int cnt, bool req_free)
1627  {
1628  	struct mac_ops *mac_ops = cgx->mac_ops;
1629  	u64 offset, ena_bit;
1630  	unsigned int irq;
1631  	int err;
1632  
1633  	irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1634  				  cnt * mac_ops->irq_offset);
1635  	offset   = mac_ops->int_set_reg;
1636  	ena_bit  = mac_ops->int_ena_bit;
1637  
1638  	if (req_free) {
1639  		free_irq(irq, lmac);
1640  		return 0;
1641  	}
1642  
1643  	err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1644  	if (err)
1645  		return err;
1646  
1647  	/* Enable interrupt */
1648  	cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1649  	return 0;
1650  }
1651  
cgx_get_nr_lmacs(void * cgxd)1652  int cgx_get_nr_lmacs(void *cgxd)
1653  {
1654  	struct cgx *cgx = cgxd;
1655  
1656  	return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1657  }
1658  
cgx_get_lmacid(void * cgxd,u8 lmac_index)1659  u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1660  {
1661  	struct cgx *cgx = cgxd;
1662  
1663  	return cgx->lmac_idmap[lmac_index]->lmac_id;
1664  }
1665  
cgx_get_lmac_bmap(void * cgxd)1666  unsigned long cgx_get_lmac_bmap(void *cgxd)
1667  {
1668  	struct cgx *cgx = cgxd;
1669  
1670  	return cgx->lmac_bmap;
1671  }
1672  
cgx_lmac_init(struct cgx * cgx)1673  static int cgx_lmac_init(struct cgx *cgx)
1674  {
1675  	struct lmac *lmac;
1676  	u64 lmac_list;
1677  	int i, err;
1678  
1679  	/* lmac_list specifies which lmacs are enabled
1680  	 * when bit n is set to 1, LMAC[n] is enabled
1681  	 */
1682  	if (cgx->mac_ops->non_contiguous_serdes_lane) {
1683  		if (is_dev_rpm2(cgx))
1684  			lmac_list =
1685  				cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
1686  		else
1687  			lmac_list =
1688  				cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1689  	}
1690  
1691  	if (cgx->lmac_count > cgx->max_lmac_per_mac)
1692  		cgx->lmac_count = cgx->max_lmac_per_mac;
1693  
1694  	for (i = 0; i < cgx->lmac_count; i++) {
1695  		lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1696  		if (!lmac)
1697  			return -ENOMEM;
1698  		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1699  		if (!lmac->name) {
1700  			err = -ENOMEM;
1701  			goto err_lmac_free;
1702  		}
1703  		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1704  		if (cgx->mac_ops->non_contiguous_serdes_lane) {
1705  			lmac->lmac_id = __ffs64(lmac_list);
1706  			lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1707  		} else {
1708  			lmac->lmac_id = i;
1709  		}
1710  
1711  		lmac->cgx = cgx;
1712  		lmac->mac_to_index_bmap.max =
1713  				cgx->mac_ops->dmac_filter_count /
1714  				cgx->lmac_count;
1715  
1716  		err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1717  		if (err)
1718  			goto err_name_free;
1719  
1720  		/* Reserve first entry for default MAC address */
1721  		set_bit(0, lmac->mac_to_index_bmap.bmap);
1722  
1723  		lmac->rx_fc_pfvf_bmap.max = 128;
1724  		err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1725  		if (err)
1726  			goto err_dmac_bmap_free;
1727  
1728  		lmac->tx_fc_pfvf_bmap.max = 128;
1729  		err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1730  		if (err)
1731  			goto err_rx_fc_bmap_free;
1732  
1733  		init_waitqueue_head(&lmac->wq_cmd_cmplt);
1734  		mutex_init(&lmac->cmd_lock);
1735  		spin_lock_init(&lmac->event_cb_lock);
1736  		err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1737  		if (err)
1738  			goto err_bitmap_free;
1739  
1740  		/* Add reference */
1741  		cgx->lmac_idmap[lmac->lmac_id] = lmac;
1742  		set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1743  		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1744  		lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
1745  	}
1746  
1747  	/* Start X2P reset on given MAC block */
1748  	cgx->mac_ops->mac_x2p_reset(cgx, true);
1749  	return cgx_lmac_verify_fwi_version(cgx);
1750  
1751  err_bitmap_free:
1752  	rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1753  err_rx_fc_bmap_free:
1754  	rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1755  err_dmac_bmap_free:
1756  	rvu_free_bitmap(&lmac->mac_to_index_bmap);
1757  err_name_free:
1758  	kfree(lmac->name);
1759  err_lmac_free:
1760  	kfree(lmac);
1761  	return err;
1762  }
1763  
cgx_lmac_exit(struct cgx * cgx)1764  static int cgx_lmac_exit(struct cgx *cgx)
1765  {
1766  	struct lmac *lmac;
1767  	int i;
1768  
1769  	if (cgx->cgx_cmd_workq) {
1770  		destroy_workqueue(cgx->cgx_cmd_workq);
1771  		cgx->cgx_cmd_workq = NULL;
1772  	}
1773  
1774  	/* Free all lmac related resources */
1775  	for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1776  		lmac = cgx->lmac_idmap[i];
1777  		if (!lmac)
1778  			continue;
1779  		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1780  		cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1781  		kfree(lmac->mac_to_index_bmap.bmap);
1782  		kfree(lmac->name);
1783  		kfree(lmac);
1784  	}
1785  
1786  	return 0;
1787  }
1788  
cgx_populate_features(struct cgx * cgx)1789  static void cgx_populate_features(struct cgx *cgx)
1790  {
1791  	u64 cfg;
1792  
1793  	cfg = cgx_read(cgx, 0, CGX_CONST);
1794  	cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1795  	cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
1796  
1797  	if (is_dev_rpm(cgx))
1798  		cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1799  				    RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1800  	else
1801  		cgx->hw_features = (RVU_LMAC_FEAT_FC  | RVU_LMAC_FEAT_HIGIG2 |
1802  				    RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1803  }
1804  
cgx_get_rxid_mapoffset(struct cgx * cgx)1805  static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
1806  {
1807  	if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
1808  	    is_dev_rpm2(cgx))
1809  		return 0x80;
1810  	else
1811  		return 0x60;
1812  }
1813  
cgx_x2p_reset(void * cgxd,bool enable)1814  static void cgx_x2p_reset(void *cgxd, bool enable)
1815  {
1816  	struct cgx *cgx = cgxd;
1817  	int lmac_id;
1818  	u64 cfg;
1819  
1820  	if (enable) {
1821  		for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac)
1822  			cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false);
1823  
1824  		usleep_range(1000, 2000);
1825  
1826  		cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
1827  		cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP;
1828  		cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
1829  	} else {
1830  		cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
1831  		cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP);
1832  		cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
1833  	}
1834  }
1835  
cgx_enadis_rx(void * cgxd,int lmac_id,bool enable)1836  static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable)
1837  {
1838  	struct cgx *cgx = cgxd;
1839  	u64 cfg;
1840  
1841  	if (!is_lmac_valid(cgx, lmac_id))
1842  		return -ENODEV;
1843  
1844  	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
1845  	if (enable)
1846  		cfg |= DATA_PKT_RX_EN;
1847  	else
1848  		cfg &= ~DATA_PKT_RX_EN;
1849  	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
1850  	return 0;
1851  }
1852  
1853  static struct mac_ops	cgx_mac_ops    = {
1854  	.name		=       "cgx",
1855  	.csr_offset	=       0,
1856  	.lmac_offset    =       18,
1857  	.int_register	=       CGXX_CMRX_INT,
1858  	.int_set_reg	=       CGXX_CMRX_INT_ENA_W1S,
1859  	.irq_offset	=       9,
1860  	.int_ena_bit    =       FW_CGX_INT,
1861  	.lmac_fwi	=	CGX_LMAC_FWI,
1862  	.non_contiguous_serdes_lane = false,
1863  	.rx_stats_cnt   =       9,
1864  	.tx_stats_cnt   =       18,
1865  	.dmac_filter_count =    32,
1866  	.get_nr_lmacs	=	cgx_get_nr_lmacs,
1867  	.get_lmac_type  =       cgx_get_lmac_type,
1868  	.lmac_fifo_len	=	cgx_get_lmac_fifo_len,
1869  	.mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1870  	.mac_get_rx_stats  =	cgx_get_rx_stats,
1871  	.mac_get_tx_stats  =	cgx_get_tx_stats,
1872  	.get_fec_stats	   =	cgx_get_fec_stats,
1873  	.mac_enadis_rx_pause_fwding =	cgx_lmac_enadis_rx_pause_fwding,
1874  	.mac_get_pause_frm_status =	cgx_lmac_get_pause_frm_status,
1875  	.mac_enadis_pause_frm =		cgx_lmac_enadis_pause_frm,
1876  	.mac_pause_frm_config =		cgx_lmac_pause_frm_config,
1877  	.mac_enadis_ptp_config =	cgx_lmac_ptp_config,
1878  	.mac_rx_tx_enable =		cgx_lmac_rx_tx_enable,
1879  	.mac_tx_enable =		cgx_lmac_tx_enable,
1880  	.pfc_config =                   cgx_lmac_pfc_config,
1881  	.mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
1882  	.mac_reset   =			cgx_lmac_reset,
1883  	.mac_stats_reset       =	cgx_stats_reset,
1884  	.mac_x2p_reset                   =      cgx_x2p_reset,
1885  	.mac_enadis_rx			 =      cgx_enadis_rx,
1886  };
1887  
cgx_probe(struct pci_dev * pdev,const struct pci_device_id * id)1888  static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1889  {
1890  	struct device *dev = &pdev->dev;
1891  	struct cgx *cgx;
1892  	int err, nvec;
1893  
1894  	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1895  	if (!cgx)
1896  		return -ENOMEM;
1897  	cgx->pdev = pdev;
1898  
1899  	pci_set_drvdata(pdev, cgx);
1900  
1901  	/* Use mac_ops to get MAC specific features */
1902  	if (is_dev_rpm(cgx))
1903  		cgx->mac_ops = rpm_get_mac_ops(cgx);
1904  	else
1905  		cgx->mac_ops = &cgx_mac_ops;
1906  
1907  	cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
1908  
1909  	err = pci_enable_device(pdev);
1910  	if (err) {
1911  		dev_err(dev, "Failed to enable PCI device\n");
1912  		pci_set_drvdata(pdev, NULL);
1913  		return err;
1914  	}
1915  
1916  	err = pci_request_regions(pdev, DRV_NAME);
1917  	if (err) {
1918  		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1919  		goto err_disable_device;
1920  	}
1921  
1922  	/* MAP configuration registers */
1923  	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1924  	if (!cgx->reg_base) {
1925  		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1926  		err = -ENOMEM;
1927  		goto err_release_regions;
1928  	}
1929  
1930  	cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1931  	if (!cgx->lmac_count) {
1932  		dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
1933  		err = -EOPNOTSUPP;
1934  		goto err_release_regions;
1935  	}
1936  
1937  	nvec = pci_msix_vec_count(cgx->pdev);
1938  	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1939  	if (err < 0 || err != nvec) {
1940  		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1941  			nvec, err);
1942  		goto err_release_regions;
1943  	}
1944  
1945  	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1946  		& CGX_ID_MASK;
1947  
1948  	/* init wq for processing linkup requests */
1949  	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1950  	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1951  	if (!cgx->cgx_cmd_workq) {
1952  		dev_err(dev, "alloc workqueue failed for cgx cmd");
1953  		err = -ENOMEM;
1954  		goto err_free_irq_vectors;
1955  	}
1956  
1957  	list_add(&cgx->cgx_list, &cgx_list);
1958  
1959  
1960  	cgx_populate_features(cgx);
1961  
1962  	mutex_init(&cgx->lock);
1963  
1964  	err = cgx_lmac_init(cgx);
1965  	if (err)
1966  		goto err_release_lmac;
1967  
1968  	return 0;
1969  
1970  err_release_lmac:
1971  	cgx_lmac_exit(cgx);
1972  	list_del(&cgx->cgx_list);
1973  err_free_irq_vectors:
1974  	pci_free_irq_vectors(pdev);
1975  err_release_regions:
1976  	pci_release_regions(pdev);
1977  err_disable_device:
1978  	pci_disable_device(pdev);
1979  	pci_set_drvdata(pdev, NULL);
1980  	return err;
1981  }
1982  
cgx_remove(struct pci_dev * pdev)1983  static void cgx_remove(struct pci_dev *pdev)
1984  {
1985  	struct cgx *cgx = pci_get_drvdata(pdev);
1986  
1987  	if (cgx) {
1988  		cgx_lmac_exit(cgx);
1989  		list_del(&cgx->cgx_list);
1990  	}
1991  	pci_free_irq_vectors(pdev);
1992  	pci_release_regions(pdev);
1993  	pci_disable_device(pdev);
1994  	pci_set_drvdata(pdev, NULL);
1995  }
1996  
1997  struct pci_driver cgx_driver = {
1998  	.name = DRV_NAME,
1999  	.id_table = cgx_id_table,
2000  	.probe = cgx_probe,
2001  	.remove = cgx_remove,
2002  };
2003