xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/mcs.c (revision d0c44de2d8ffd2e4780d360b34ee6614aa4af080)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Marvell MCS driver
3   *
4   * Copyright (C) 2022 Marvell.
5   */
6  
7  #include <linux/bitfield.h>
8  #include <linux/delay.h>
9  #include <linux/device.h>
10  #include <linux/module.h>
11  #include <linux/pci.h>
12  
13  #include "mcs.h"
14  #include "mcs_reg.h"
15  
16  #define DRV_NAME	"Marvell MCS Driver"
17  
18  #define PCI_CFG_REG_BAR_NUM	0
19  
20  static const struct pci_device_id mcs_id_table[] = {
21  	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
22  	{ 0, }  /* end of table */
23  };
24  
25  static LIST_HEAD(mcs_list);
26  
mcs_get_tx_secy_stats(struct mcs * mcs,struct mcs_secy_stats * stats,int id)27  void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
28  {
29  	u64 reg;
30  
31  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
32  	stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
33  
34  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
35  	stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
36  
37  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
38  	stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
39  
40  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
41  	stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
42  
43  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
44  	stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
45  
46  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
47  	stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
48  
49  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
50  	stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
51  
52  	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
53  	stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
54  
55  	reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
56  	stats->octet_encrypted_cnt =  mcs_reg_read(mcs, reg);
57  
58  	reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
59  	stats->octet_protected_cnt =  mcs_reg_read(mcs, reg);
60  
61  	reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
62  	stats->pkt_noactivesa_cnt =  mcs_reg_read(mcs, reg);
63  
64  	reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
65  	stats->pkt_toolong_cnt =  mcs_reg_read(mcs, reg);
66  
67  	reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
68  	stats->pkt_untagged_cnt =  mcs_reg_read(mcs, reg);
69  }
70  
mcs_get_rx_secy_stats(struct mcs * mcs,struct mcs_secy_stats * stats,int id)71  void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
72  {
73  	u64 reg;
74  
75  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
76  	stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
77  
78  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
79  	stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
80  
81  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
82  	stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
83  
84  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
85  	stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
86  
87  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
88  	stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
89  
90  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
91  	stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
92  
93  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
94  	stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
95  
96  	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
97  	stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
98  
99  	reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
100  	stats->octet_decrypted_cnt =  mcs_reg_read(mcs, reg);
101  
102  	reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
103  	stats->octet_validated_cnt =  mcs_reg_read(mcs, reg);
104  
105  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
106  	stats->pkt_port_disabled_cnt =  mcs_reg_read(mcs, reg);
107  
108  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
109  	stats->pkt_badtag_cnt =  mcs_reg_read(mcs, reg);
110  
111  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
112  	stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
113  
114  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
115  	stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
116  
117  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
118  	stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
119  
120  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
121  	stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
122  
123  	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
124  	stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
125  
126  	if (mcs->hw->mcs_blks > 1) {
127  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
128  		stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
129  	}
130  }
131  
mcs_get_flowid_stats(struct mcs * mcs,struct mcs_flowid_stats * stats,int id,int dir)132  void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
133  			  int id, int dir)
134  {
135  	u64 reg;
136  
137  	if (dir == MCS_RX)
138  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
139  	else
140  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
141  
142  	stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
143  }
144  
mcs_get_port_stats(struct mcs * mcs,struct mcs_port_stats * stats,int id,int dir)145  void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
146  			int id, int dir)
147  {
148  	u64 reg;
149  
150  	if (dir == MCS_RX) {
151  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
152  		stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
153  
154  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
155  		stats->parser_err_cnt = mcs_reg_read(mcs, reg);
156  		if (mcs->hw->mcs_blks > 1) {
157  			reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
158  			stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
159  		}
160  	} else {
161  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
162  		stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
163  
164  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
165  		stats->parser_err_cnt = mcs_reg_read(mcs, reg);
166  
167  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
168  		stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
169  	}
170  }
171  
mcs_get_sa_stats(struct mcs * mcs,struct mcs_sa_stats * stats,int id,int dir)172  void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
173  {
174  	u64 reg;
175  
176  	if (dir == MCS_RX) {
177  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
178  		stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
179  
180  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
181  		stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
182  
183  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
184  		stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
185  
186  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
187  		stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
188  
189  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
190  		stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
191  	} else {
192  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
193  		stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
194  
195  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
196  		stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
197  	}
198  }
199  
mcs_get_sc_stats(struct mcs * mcs,struct mcs_sc_stats * stats,int id,int dir)200  void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
201  		      int id, int dir)
202  {
203  	u64 reg;
204  
205  	if (dir == MCS_RX) {
206  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
207  		stats->hit_cnt = mcs_reg_read(mcs, reg);
208  
209  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
210  		stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
211  
212  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
213  		stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
214  
215  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
216  		stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
217  
218  		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
219  		stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
220  
221  		if (mcs->hw->mcs_blks > 1) {
222  			reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
223  			stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
224  
225  			reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
226  			stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
227  		}
228  		if (mcs->hw->mcs_blks == 1) {
229  			reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
230  			stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
231  
232  			reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
233  			stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
234  		}
235  	} else {
236  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
237  		stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
238  
239  		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
240  		stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
241  
242  		if (mcs->hw->mcs_blks == 1) {
243  			reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
244  			stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
245  
246  			reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
247  			stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
248  		}
249  	}
250  }
251  
mcs_clear_stats(struct mcs * mcs,u8 type,u8 id,int dir)252  void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
253  {
254  	struct mcs_flowid_stats flowid_st;
255  	struct mcs_port_stats port_st;
256  	struct mcs_secy_stats secy_st;
257  	struct mcs_sc_stats sc_st;
258  	struct mcs_sa_stats sa_st;
259  	u64 reg;
260  
261  	if (dir == MCS_RX)
262  		reg = MCSX_CSE_RX_SLAVE_CTRL;
263  	else
264  		reg = MCSX_CSE_TX_SLAVE_CTRL;
265  
266  	mcs_reg_write(mcs, reg, BIT_ULL(0));
267  
268  	switch (type) {
269  	case MCS_FLOWID_STATS:
270  		mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
271  		break;
272  	case MCS_SECY_STATS:
273  		if (dir == MCS_RX)
274  			mcs_get_rx_secy_stats(mcs, &secy_st, id);
275  		else
276  			mcs_get_tx_secy_stats(mcs, &secy_st, id);
277  		break;
278  	case MCS_SC_STATS:
279  		mcs_get_sc_stats(mcs, &sc_st, id, dir);
280  		break;
281  	case MCS_SA_STATS:
282  		mcs_get_sa_stats(mcs, &sa_st, id, dir);
283  		break;
284  	case MCS_PORT_STATS:
285  		mcs_get_port_stats(mcs, &port_st, id, dir);
286  		break;
287  	}
288  
289  	mcs_reg_write(mcs, reg, 0x0);
290  }
291  
mcs_clear_all_stats(struct mcs * mcs,u16 pcifunc,int dir)292  int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
293  {
294  	struct mcs_rsrc_map *map;
295  	int id;
296  
297  	if (dir == MCS_RX)
298  		map = &mcs->rx;
299  	else
300  		map = &mcs->tx;
301  
302  	/* Clear FLOWID stats */
303  	for (id = 0; id < map->flow_ids.max; id++) {
304  		if (map->flowid2pf_map[id] != pcifunc)
305  			continue;
306  		mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
307  	}
308  
309  	/* Clear SECY stats */
310  	for (id = 0; id < map->secy.max; id++) {
311  		if (map->secy2pf_map[id] != pcifunc)
312  			continue;
313  		mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
314  	}
315  
316  	/* Clear SC stats */
317  	for (id = 0; id < map->secy.max; id++) {
318  		if (map->sc2pf_map[id] != pcifunc)
319  			continue;
320  		mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
321  	}
322  
323  	/* Clear SA stats */
324  	for (id = 0; id < map->sa.max; id++) {
325  		if (map->sa2pf_map[id] != pcifunc)
326  			continue;
327  		mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
328  	}
329  	return 0;
330  }
331  
mcs_pn_table_write(struct mcs * mcs,u8 pn_id,u64 next_pn,u8 dir)332  void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
333  {
334  	u64 reg;
335  
336  	if (dir == MCS_RX)
337  		reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
338  	else
339  		reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
340  	mcs_reg_write(mcs, reg, next_pn);
341  }
342  
cn10kb_mcs_tx_sa_mem_map_write(struct mcs * mcs,struct mcs_tx_sc_sa_map * map)343  void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
344  {
345  	u64 reg, val;
346  
347  	val = (map->sa_index0 & 0xFF) |
348  	      (map->sa_index1 & 0xFF) << 9 |
349  	      (map->rekey_ena & 0x1) << 18 |
350  	      (map->sa_index0_vld & 0x1) << 19 |
351  	      (map->sa_index1_vld & 0x1) << 20 |
352  	      (map->tx_sa_active & 0x1) << 21 |
353  	      map->sectag_sci << 22;
354  	reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
355  	mcs_reg_write(mcs, reg, val);
356  
357  	val = map->sectag_sci >> 42;
358  	reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
359  	mcs_reg_write(mcs, reg, val);
360  }
361  
cn10kb_mcs_rx_sa_mem_map_write(struct mcs * mcs,struct mcs_rx_sc_sa_map * map)362  void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
363  {
364  	u64 val, reg;
365  
366  	val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
367  
368  	reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
369  	mcs_reg_write(mcs, reg, val);
370  }
371  
mcs_sa_plcy_write(struct mcs * mcs,u64 * plcy,int sa_id,int dir)372  void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
373  {
374  	int reg_id;
375  	u64 reg;
376  
377  	if (dir == MCS_RX) {
378  		for (reg_id = 0; reg_id < 8; reg_id++) {
379  			reg =  MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
380  			mcs_reg_write(mcs, reg, plcy[reg_id]);
381  		}
382  	} else {
383  		for (reg_id = 0; reg_id < 9; reg_id++) {
384  			reg =  MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
385  			mcs_reg_write(mcs, reg, plcy[reg_id]);
386  		}
387  	}
388  }
389  
mcs_ena_dis_sc_cam_entry(struct mcs * mcs,int sc_id,int ena)390  void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
391  {
392  	u64 reg, val;
393  
394  	reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
395  	if (sc_id > 63)
396  		reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
397  
398  	if (ena)
399  		val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
400  	else
401  		val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
402  
403  	mcs_reg_write(mcs, reg, val);
404  }
405  
mcs_rx_sc_cam_write(struct mcs * mcs,u64 sci,u64 secy,int sc_id)406  void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
407  {
408  	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
409  	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
410  	/* Enable SC CAM */
411  	mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
412  }
413  
mcs_secy_plcy_write(struct mcs * mcs,u64 plcy,int secy_id,int dir)414  void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
415  {
416  	u64 reg;
417  
418  	if (dir == MCS_RX)
419  		reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
420  	else
421  		reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
422  
423  	mcs_reg_write(mcs, reg, plcy);
424  
425  	if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
426  		mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
427  }
428  
cn10kb_mcs_flowid_secy_map(struct mcs * mcs,struct secy_mem_map * map,int dir)429  void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
430  {
431  	u64 reg, val;
432  
433  	val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
434  	if (dir == MCS_RX) {
435  		reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
436  	} else {
437  		val |= (map->sc & 0x7F) << 9;
438  		reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
439  	}
440  
441  	mcs_reg_write(mcs, reg, val);
442  }
443  
mcs_ena_dis_flowid_entry(struct mcs * mcs,int flow_id,int dir,int ena)444  void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
445  {
446  	u64 reg, val;
447  
448  	if (dir == MCS_RX) {
449  		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
450  		if (flow_id > 63)
451  			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
452  	} else {
453  		reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
454  		if (flow_id > 63)
455  			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
456  	}
457  
458  	/* Enable/Disable the tcam entry */
459  	if (ena)
460  		val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
461  	else
462  		val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
463  
464  	mcs_reg_write(mcs, reg, val);
465  }
466  
mcs_flowid_entry_write(struct mcs * mcs,u64 * data,u64 * mask,int flow_id,int dir)467  void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
468  {
469  	int reg_id;
470  	u64 reg;
471  
472  	if (dir == MCS_RX) {
473  		for (reg_id = 0; reg_id < 4; reg_id++) {
474  			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
475  			mcs_reg_write(mcs, reg, data[reg_id]);
476  		}
477  		for (reg_id = 0; reg_id < 4; reg_id++) {
478  			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
479  			mcs_reg_write(mcs, reg, mask[reg_id]);
480  		}
481  	} else {
482  		for (reg_id = 0; reg_id < 4; reg_id++) {
483  			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
484  			mcs_reg_write(mcs, reg, data[reg_id]);
485  		}
486  		for (reg_id = 0; reg_id < 4; reg_id++) {
487  			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
488  			mcs_reg_write(mcs, reg, mask[reg_id]);
489  		}
490  	}
491  }
492  
mcs_install_flowid_bypass_entry(struct mcs * mcs)493  int mcs_install_flowid_bypass_entry(struct mcs *mcs)
494  {
495  	int flow_id, secy_id, reg_id;
496  	struct secy_mem_map map;
497  	u64 reg, plcy = 0;
498  
499  	/* Flow entry */
500  	flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
501  	__set_bit(flow_id, mcs->rx.flow_ids.bmap);
502  	__set_bit(flow_id, mcs->tx.flow_ids.bmap);
503  
504  	for (reg_id = 0; reg_id < 4; reg_id++) {
505  		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
506  		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
507  	}
508  	for (reg_id = 0; reg_id < 4; reg_id++) {
509  		reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
510  		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
511  	}
512  	/* secy */
513  	secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
514  	__set_bit(secy_id, mcs->rx.secy.bmap);
515  	__set_bit(secy_id, mcs->tx.secy.bmap);
516  
517  	/* Set validate frames to NULL and enable control port */
518  	plcy = 0x7ull;
519  	if (mcs->hw->mcs_blks > 1)
520  		plcy = BIT_ULL(0) | 0x3ull << 4;
521  	mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
522  
523  	/* Enable control port and set mtu to max */
524  	plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
525  	if (mcs->hw->mcs_blks > 1)
526  		plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
527  	mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
528  
529  	/* Map flowid to secy */
530  	map.secy = secy_id;
531  	map.ctrl_pkt = 0;
532  	map.flow_id = flow_id;
533  	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
534  	map.sc = secy_id;
535  	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
536  
537  	/* Enable Flowid entry */
538  	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
539  	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
540  
541  	return 0;
542  }
543  
mcs_clear_secy_plcy(struct mcs * mcs,int secy_id,int dir)544  void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
545  {
546  	struct mcs_rsrc_map *map;
547  	int flow_id;
548  
549  	if (dir == MCS_RX)
550  		map = &mcs->rx;
551  	else
552  		map = &mcs->tx;
553  
554  	/* Clear secy memory to zero */
555  	mcs_secy_plcy_write(mcs, 0, secy_id, dir);
556  
557  	/* Disable the tcam entry using this secy */
558  	for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
559  		if (map->flowid2secy_map[flow_id] != secy_id)
560  			continue;
561  		mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
562  	}
563  }
564  
mcs_alloc_ctrlpktrule(struct rsrc_bmap * rsrc,u16 * pf_map,u16 offset,u16 pcifunc)565  int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
566  {
567  	int rsrc_id;
568  
569  	if (!rsrc->bmap)
570  		return -EINVAL;
571  
572  	rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
573  	if (rsrc_id >= rsrc->max)
574  		return -ENOSPC;
575  
576  	bitmap_set(rsrc->bmap, rsrc_id, 1);
577  	pf_map[rsrc_id] = pcifunc;
578  
579  	return rsrc_id;
580  }
581  
mcs_free_ctrlpktrule(struct mcs * mcs,struct mcs_free_ctrl_pkt_rule_req * req)582  int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
583  {
584  	u16 pcifunc = req->hdr.pcifunc;
585  	struct mcs_rsrc_map *map;
586  	u64 dis, reg;
587  	int id, rc;
588  
589  	reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
590  	map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
591  
592  	if (req->all) {
593  		for (id = 0; id < map->ctrlpktrule.max; id++) {
594  			if (map->ctrlpktrule2pf_map[id] != pcifunc)
595  				continue;
596  			mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
597  			dis = mcs_reg_read(mcs, reg);
598  			dis &= ~BIT_ULL(id);
599  			mcs_reg_write(mcs, reg, dis);
600  		}
601  		return 0;
602  	}
603  
604  	rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
605  	dis = mcs_reg_read(mcs, reg);
606  	dis &= ~BIT_ULL(req->rule_idx);
607  	mcs_reg_write(mcs, reg, dis);
608  
609  	return rc;
610  }
611  
mcs_ctrlpktrule_write(struct mcs * mcs,struct mcs_ctrl_pkt_rule_write_req * req)612  int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
613  {
614  	u64 reg, enb;
615  	u64 idx;
616  
617  	switch (req->rule_type) {
618  	case MCS_CTRL_PKT_RULE_TYPE_ETH:
619  		req->data0 &= GENMASK(15, 0);
620  		if (req->data0 != ETH_P_PAE)
621  			return -EINVAL;
622  
623  		idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
624  		reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
625  		      MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
626  
627  		mcs_reg_write(mcs, reg, req->data0);
628  		break;
629  	case MCS_CTRL_PKT_RULE_TYPE_DA:
630  		if (!(req->data0 & BIT_ULL(40)))
631  			return -EINVAL;
632  
633  		idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
634  		reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
635  		      MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
636  
637  		mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
638  		break;
639  	case MCS_CTRL_PKT_RULE_TYPE_RANGE:
640  		if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
641  			return -EINVAL;
642  
643  		idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
644  		if (req->dir == MCS_RX) {
645  			reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
646  			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
647  			reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
648  			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
649  		} else {
650  			reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
651  			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
652  			reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
653  			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
654  		}
655  		break;
656  	case MCS_CTRL_PKT_RULE_TYPE_COMBO:
657  		req->data2 &= GENMASK(15, 0);
658  		if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
659  		    !(req->data1 & BIT_ULL(40)))
660  			return -EINVAL;
661  
662  		idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
663  		if (req->dir == MCS_RX) {
664  			reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
665  			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
666  			reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
667  			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
668  			reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
669  			mcs_reg_write(mcs, reg, req->data2);
670  		} else {
671  			reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
672  			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
673  			reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
674  			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
675  			reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
676  			mcs_reg_write(mcs, reg, req->data2);
677  		}
678  		break;
679  	case MCS_CTRL_PKT_RULE_TYPE_MAC:
680  		if (!(req->data0 & BIT_ULL(40)))
681  			return -EINVAL;
682  
683  		idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
684  		reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
685  		      MCSX_PEX_TX_SLAVE_RULE_MAC;
686  
687  		mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
688  		break;
689  	}
690  
691  	reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
692  
693  	enb = mcs_reg_read(mcs, reg);
694  	enb |= BIT_ULL(req->rule_idx);
695  	mcs_reg_write(mcs, reg, enb);
696  
697  	return 0;
698  }
699  
mcs_free_rsrc(struct rsrc_bmap * rsrc,u16 * pf_map,int rsrc_id,u16 pcifunc)700  int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
701  {
702  	/* Check if the rsrc_id is mapped to PF/VF */
703  	if (pf_map[rsrc_id] != pcifunc)
704  		return -EINVAL;
705  
706  	rvu_free_rsrc(rsrc, rsrc_id);
707  	pf_map[rsrc_id] = 0;
708  	return 0;
709  }
710  
711  /* Free all the cam resources mapped to pf */
mcs_free_all_rsrc(struct mcs * mcs,int dir,u16 pcifunc)712  int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
713  {
714  	struct mcs_rsrc_map *map;
715  	int id;
716  
717  	if (dir == MCS_RX)
718  		map = &mcs->rx;
719  	else
720  		map = &mcs->tx;
721  
722  	/* free tcam entries */
723  	for (id = 0; id < map->flow_ids.max; id++) {
724  		if (map->flowid2pf_map[id] != pcifunc)
725  			continue;
726  		mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
727  			      id, pcifunc);
728  		mcs_ena_dis_flowid_entry(mcs, id, dir, false);
729  	}
730  
731  	/* free secy entries */
732  	for (id = 0; id < map->secy.max; id++) {
733  		if (map->secy2pf_map[id] != pcifunc)
734  			continue;
735  		mcs_free_rsrc(&map->secy, map->secy2pf_map,
736  			      id, pcifunc);
737  		mcs_clear_secy_plcy(mcs, id, dir);
738  	}
739  
740  	/* free sc entries */
741  	for (id = 0; id < map->secy.max; id++) {
742  		if (map->sc2pf_map[id] != pcifunc)
743  			continue;
744  		mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
745  
746  		/* Disable SC CAM only on RX side */
747  		if (dir == MCS_RX)
748  			mcs_ena_dis_sc_cam_entry(mcs, id, false);
749  	}
750  
751  	/* free sa entries */
752  	for (id = 0; id < map->sa.max; id++) {
753  		if (map->sa2pf_map[id] != pcifunc)
754  			continue;
755  		mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
756  	}
757  	return 0;
758  }
759  
mcs_alloc_rsrc(struct rsrc_bmap * rsrc,u16 * pf_map,u16 pcifunc)760  int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
761  {
762  	int rsrc_id;
763  
764  	rsrc_id = rvu_alloc_rsrc(rsrc);
765  	if (rsrc_id < 0)
766  		return -ENOMEM;
767  	pf_map[rsrc_id] = pcifunc;
768  	return rsrc_id;
769  }
770  
mcs_alloc_all_rsrc(struct mcs * mcs,u8 * flow_id,u8 * secy_id,u8 * sc_id,u8 * sa1_id,u8 * sa2_id,u16 pcifunc,int dir)771  int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
772  		       u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
773  {
774  	struct mcs_rsrc_map *map;
775  	int id;
776  
777  	if (dir == MCS_RX)
778  		map = &mcs->rx;
779  	else
780  		map = &mcs->tx;
781  
782  	id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
783  	if (id < 0)
784  		return -ENOMEM;
785  	*flow_id = id;
786  
787  	id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
788  	if (id < 0)
789  		return -ENOMEM;
790  	*secy_id = id;
791  
792  	id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
793  	if (id < 0)
794  		return -ENOMEM;
795  	*sc_id = id;
796  
797  	id =  mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
798  	if (id < 0)
799  		return -ENOMEM;
800  	*sa1_id = id;
801  
802  	id =  mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
803  	if (id < 0)
804  		return -ENOMEM;
805  	*sa2_id = id;
806  
807  	return 0;
808  }
809  
cn10kb_mcs_tx_pn_wrapped_handler(struct mcs * mcs)810  static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
811  {
812  	struct mcs_intr_event event = { 0 };
813  	struct rsrc_bmap *sc_bmap;
814  	u64 val;
815  	int sc;
816  
817  	sc_bmap = &mcs->tx.sc;
818  
819  	event.mcs_id = mcs->mcs_id;
820  	event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
821  
822  	for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
823  		val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
824  
825  		if (mcs->tx_sa_active[sc])
826  			/* SA_index1 was used and got expired */
827  			event.sa_id = (val >> 9) & 0xFF;
828  		else
829  			/* SA_index0 was used and got expired */
830  			event.sa_id = val & 0xFF;
831  
832  		event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
833  		mcs_add_intr_wq_entry(mcs, &event);
834  	}
835  }
836  
cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs * mcs)837  static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
838  {
839  	struct mcs_intr_event event = { 0 };
840  	struct rsrc_bmap *sc_bmap;
841  	u64 val, status;
842  	int sc;
843  
844  	sc_bmap = &mcs->tx.sc;
845  
846  	event.mcs_id = mcs->mcs_id;
847  	event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
848  
849  	/* TX SA interrupt is raised only if autorekey is enabled.
850  	 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
851  	 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
852  	 * SA in SA_index1 got expired else SA in SA_index0 got expired.
853  	 */
854  	for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
855  		val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
856  		/* Auto rekey is enable */
857  		if (!((val >> 18) & 0x1))
858  			continue;
859  
860  		status = (val >> 21) & 0x1;
861  
862  		/* Check if tx_sa_active status had changed */
863  		if (status == mcs->tx_sa_active[sc])
864  			continue;
865  		/* SA_index0 is expired */
866  		if (status)
867  			event.sa_id = val & 0xFF;
868  		else
869  			event.sa_id = (val >> 9) & 0xFF;
870  
871  		event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
872  		mcs_add_intr_wq_entry(mcs, &event);
873  	}
874  }
875  
mcs_rx_pn_thresh_reached_handler(struct mcs * mcs)876  static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
877  {
878  	struct mcs_intr_event event = { 0 };
879  	int sa, reg;
880  	u64 intr;
881  
882  	/* Check expired SAs */
883  	for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
884  		/* Bit high in *PN_THRESH_REACHEDX implies
885  		 * corresponding SAs are expired.
886  		 */
887  		intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
888  		for (sa = 0; sa < 64; sa++) {
889  			if (!(intr & BIT_ULL(sa)))
890  				continue;
891  
892  			event.mcs_id = mcs->mcs_id;
893  			event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
894  			event.sa_id = sa + (reg * 64);
895  			event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
896  			mcs_add_intr_wq_entry(mcs, &event);
897  		}
898  	}
899  }
900  
mcs_rx_misc_intr_handler(struct mcs * mcs,u64 intr)901  static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
902  {
903  	struct mcs_intr_event event = { 0 };
904  
905  	event.mcs_id = mcs->mcs_id;
906  	event.pcifunc = mcs->pf_map[0];
907  
908  	if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
909  		event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
910  	if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
911  		event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
912  	if (intr & MCS_CPM_RX_INT_SL_GTE48)
913  		event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
914  	if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
915  		event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
916  	if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
917  		event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
918  	if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
919  		event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
920  
921  	mcs_add_intr_wq_entry(mcs, &event);
922  }
923  
mcs_tx_misc_intr_handler(struct mcs * mcs,u64 intr)924  static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
925  {
926  	struct mcs_intr_event event = { 0 };
927  
928  	if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
929  		return;
930  
931  	event.mcs_id = mcs->mcs_id;
932  	event.pcifunc = mcs->pf_map[0];
933  
934  	event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
935  
936  	mcs_add_intr_wq_entry(mcs, &event);
937  }
938  
cn10kb_mcs_bbe_intr_handler(struct mcs * mcs,u64 intr,enum mcs_direction dir)939  void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
940  				 enum mcs_direction dir)
941  {
942  	u64 val, reg;
943  	int lmac;
944  
945  	if (!(intr & 0x6ULL))
946  		return;
947  
948  	if (intr & BIT_ULL(1))
949  		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
950  					MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
951  	else
952  		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
953  					MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
954  	val = mcs_reg_read(mcs, reg);
955  
956  	/* policy/data over flow occurred */
957  	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
958  		if (!(val & BIT_ULL(lmac)))
959  			continue;
960  		dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
961  	}
962  }
963  
cn10kb_mcs_pab_intr_handler(struct mcs * mcs,u64 intr,enum mcs_direction dir)964  void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
965  				 enum mcs_direction dir)
966  {
967  	int lmac;
968  
969  	if (!(intr & 0xFFFFFULL))
970  		return;
971  
972  	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
973  		if (intr & BIT_ULL(lmac))
974  			dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
975  	}
976  }
977  
mcs_ip_intr_handler(int irq,void * mcs_irq)978  static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
979  {
980  	struct mcs *mcs = (struct mcs *)mcs_irq;
981  	u64 intr, cpm_intr, bbe_intr, pab_intr;
982  
983  	/* Disable  the interrupt */
984  	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
985  
986  	/* Check which block has interrupt*/
987  	intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
988  
989  	/* CPM RX */
990  	if (intr & MCS_CPM_RX_INT_ENA) {
991  		/* Check for PN thresh interrupt bit */
992  		cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
993  
994  		if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
995  			mcs_rx_pn_thresh_reached_handler(mcs);
996  
997  		if (cpm_intr & MCS_CPM_RX_INT_ALL)
998  			mcs_rx_misc_intr_handler(mcs, cpm_intr);
999  
1000  		/* Clear the interrupt */
1001  		mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
1002  	}
1003  
1004  	/* CPM TX */
1005  	if (intr & MCS_CPM_TX_INT_ENA) {
1006  		cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
1007  
1008  		if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
1009  			if (mcs->hw->mcs_blks > 1)
1010  				cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1011  			else
1012  				cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1013  		}
1014  
1015  		if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
1016  			mcs_tx_misc_intr_handler(mcs, cpm_intr);
1017  
1018  		if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
1019  			if (mcs->hw->mcs_blks > 1)
1020  				cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
1021  			else
1022  				cn10kb_mcs_tx_pn_wrapped_handler(mcs);
1023  		}
1024  		/* Clear the interrupt */
1025  		mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
1026  	}
1027  
1028  	/* BBE RX */
1029  	if (intr & MCS_BBE_RX_INT_ENA) {
1030  		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
1031  		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
1032  
1033  		/* Clear the interrupt */
1034  		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
1035  		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
1036  	}
1037  
1038  	/* BBE TX */
1039  	if (intr & MCS_BBE_TX_INT_ENA) {
1040  		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
1041  		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
1042  
1043  		/* Clear the interrupt */
1044  		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
1045  		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
1046  	}
1047  
1048  	/* PAB RX */
1049  	if (intr & MCS_PAB_RX_INT_ENA) {
1050  		pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
1051  		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
1052  
1053  		/* Clear the interrupt */
1054  		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
1055  		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
1056  	}
1057  
1058  	/* PAB TX */
1059  	if (intr & MCS_PAB_TX_INT_ENA) {
1060  		pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
1061  		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
1062  
1063  		/* Clear the interrupt */
1064  		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
1065  		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
1066  	}
1067  
1068  	/* Clear and enable the interrupt */
1069  	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
1070  	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1071  
1072  	return IRQ_HANDLED;
1073  }
1074  
alloc_mem(struct mcs * mcs,int n)1075  static void *alloc_mem(struct mcs *mcs, int n)
1076  {
1077  	return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
1078  }
1079  
mcs_alloc_struct_mem(struct mcs * mcs,struct mcs_rsrc_map * res)1080  static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
1081  {
1082  	struct hwinfo *hw = mcs->hw;
1083  	int err;
1084  
1085  	res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
1086  	if (!res->flowid2pf_map)
1087  		return -ENOMEM;
1088  
1089  	res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
1090  	if (!res->secy2pf_map)
1091  		return -ENOMEM;
1092  
1093  	res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
1094  	if (!res->sc2pf_map)
1095  		return -ENOMEM;
1096  
1097  	res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
1098  	if (!res->sa2pf_map)
1099  		return -ENOMEM;
1100  
1101  	res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
1102  	if (!res->flowid2secy_map)
1103  		return -ENOMEM;
1104  
1105  	res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
1106  	if (!res->ctrlpktrule2pf_map)
1107  		return -ENOMEM;
1108  
1109  	res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
1110  	err = rvu_alloc_bitmap(&res->flow_ids);
1111  	if (err)
1112  		return err;
1113  
1114  	res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
1115  	err = rvu_alloc_bitmap(&res->secy);
1116  	if (err)
1117  		return err;
1118  
1119  	res->sc.max = hw->sc_entries;
1120  	err = rvu_alloc_bitmap(&res->sc);
1121  	if (err)
1122  		return err;
1123  
1124  	res->sa.max = hw->sa_entries;
1125  	err = rvu_alloc_bitmap(&res->sa);
1126  	if (err)
1127  		return err;
1128  
1129  	res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
1130  	err = rvu_alloc_bitmap(&res->ctrlpktrule);
1131  	if (err)
1132  		return err;
1133  
1134  	return 0;
1135  }
1136  
mcs_register_interrupts(struct mcs * mcs)1137  static int mcs_register_interrupts(struct mcs *mcs)
1138  {
1139  	int ret = 0;
1140  
1141  	mcs->num_vec = pci_msix_vec_count(mcs->pdev);
1142  
1143  	ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
1144  				    mcs->num_vec, PCI_IRQ_MSIX);
1145  	if (ret < 0) {
1146  		dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
1147  			mcs->num_vec, ret);
1148  		return ret;
1149  	}
1150  
1151  	ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
1152  			  mcs_ip_intr_handler, 0, "MCS_IP", mcs);
1153  	if (ret) {
1154  		dev_err(mcs->dev, "MCS IP irq registration failed\n");
1155  		goto exit;
1156  	}
1157  
1158  	/* MCS enable IP interrupts */
1159  	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1160  
1161  	/* Enable CPM Rx/Tx interrupts */
1162  	mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
1163  		      MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
1164  		      MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
1165  		      MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
1166  
1167  	mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
1168  	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
1169  
1170  	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
1171  	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
1172  
1173  	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
1174  	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
1175  
1176  	mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
1177  	if (!mcs->tx_sa_active) {
1178  		ret = -ENOMEM;
1179  		goto free_irq;
1180  	}
1181  
1182  	return ret;
1183  
1184  free_irq:
1185  	free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
1186  exit:
1187  	pci_free_irq_vectors(mcs->pdev);
1188  	mcs->num_vec = 0;
1189  	return ret;
1190  }
1191  
mcs_get_blkcnt(void)1192  int mcs_get_blkcnt(void)
1193  {
1194  	struct mcs *mcs;
1195  	int idmax = -ENODEV;
1196  
1197  	/* Check MCS block is present in hardware */
1198  	if (!pci_dev_present(mcs_id_table))
1199  		return 0;
1200  
1201  	list_for_each_entry(mcs, &mcs_list, mcs_list)
1202  		if (mcs->mcs_id > idmax)
1203  			idmax = mcs->mcs_id;
1204  
1205  	if (idmax < 0)
1206  		return 0;
1207  
1208  	return idmax + 1;
1209  }
1210  
mcs_get_pdata(int mcs_id)1211  struct mcs *mcs_get_pdata(int mcs_id)
1212  {
1213  	struct mcs *mcs_dev;
1214  
1215  	list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
1216  		if (mcs_dev->mcs_id == mcs_id)
1217  			return mcs_dev;
1218  	}
1219  	return NULL;
1220  }
1221  
is_mcs_bypass(int mcs_id)1222  bool is_mcs_bypass(int mcs_id)
1223  {
1224  	struct mcs *mcs_dev;
1225  
1226  	list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
1227  		if (mcs_dev->mcs_id == mcs_id)
1228  			return mcs_dev->bypass;
1229  	}
1230  	return true;
1231  }
1232  
mcs_set_port_cfg(struct mcs * mcs,struct mcs_port_cfg_set_req * req)1233  void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
1234  {
1235  	u64 val = 0;
1236  
1237  	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
1238  		      req->port_mode & MCS_PORT_MODE_MASK);
1239  
1240  	req->cstm_tag_rel_mode_sel &= 0x3;
1241  
1242  	if (mcs->hw->mcs_blks > 1) {
1243  		req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
1244  		val = (u32)req->fifo_skid << 0x10;
1245  		val |= req->fifo_skid;
1246  		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
1247  		mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
1248  			      req->cstm_tag_rel_mode_sel);
1249  		val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
1250  
1251  		if (req->custom_hdr_enb)
1252  			val |= BIT_ULL(req->port_id);
1253  		else
1254  			val &= ~BIT_ULL(req->port_id);
1255  
1256  		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
1257  	} else {
1258  		val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
1259  		val |= (req->cstm_tag_rel_mode_sel << 2);
1260  		mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
1261  	}
1262  }
1263  
mcs_get_port_cfg(struct mcs * mcs,struct mcs_port_cfg_get_req * req,struct mcs_port_cfg_get_rsp * rsp)1264  void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
1265  		      struct mcs_port_cfg_get_rsp *rsp)
1266  {
1267  	u64 reg = 0;
1268  
1269  	rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
1270  			 MCS_PORT_MODE_MASK;
1271  
1272  	if (mcs->hw->mcs_blks > 1) {
1273  		reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
1274  		rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
1275  		reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
1276  		rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
1277  		if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
1278  			rsp->custom_hdr_enb = 1;
1279  	} else {
1280  		reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
1281  		rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
1282  	}
1283  
1284  	rsp->port_id = req->port_id;
1285  	rsp->mcs_id = req->mcs_id;
1286  }
1287  
mcs_get_custom_tag_cfg(struct mcs * mcs,struct mcs_custom_tag_cfg_get_req * req,struct mcs_custom_tag_cfg_get_rsp * rsp)1288  void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
1289  			    struct mcs_custom_tag_cfg_get_rsp *rsp)
1290  {
1291  	u64 reg = 0, val = 0;
1292  	u8 idx;
1293  
1294  	for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
1295  		if (mcs->hw->mcs_blks > 1)
1296  			reg  = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
1297  				MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
1298  		else
1299  			reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
1300  				MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
1301  
1302  		val = mcs_reg_read(mcs, reg);
1303  		if (mcs->hw->mcs_blks > 1) {
1304  			rsp->cstm_etype[idx] = val & GENMASK(15, 0);
1305  			rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
1306  			reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
1307  				MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
1308  			rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
1309  		} else {
1310  			rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
1311  			rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
1312  			rsp->cstm_etype_en |= (val & 0x1) << idx;
1313  		}
1314  	}
1315  
1316  	rsp->mcs_id = req->mcs_id;
1317  	rsp->dir = req->dir;
1318  }
1319  
mcs_reset_port(struct mcs * mcs,u8 port_id,u8 reset)1320  void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
1321  {
1322  	u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
1323  
1324  	mcs_reg_write(mcs, reg, reset & 0x1);
1325  }
1326  
1327  /* Set lmac to bypass/operational mode */
mcs_set_lmac_mode(struct mcs * mcs,int lmac_id,u8 mode)1328  void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
1329  {
1330  	u64 reg;
1331  	int id = lmac_id * 2;
1332  
1333  	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
1334  	mcs_reg_write(mcs, reg, (u64)mode);
1335  	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
1336  	mcs_reg_write(mcs, reg, (u64)mode);
1337  }
1338  
mcs_pn_threshold_set(struct mcs * mcs,struct mcs_set_pn_threshold * pn)1339  void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
1340  {
1341  	u64 reg;
1342  
1343  	if (pn->dir == MCS_RX)
1344  		reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
1345  	else
1346  		reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
1347  
1348  	mcs_reg_write(mcs, reg, pn->threshold);
1349  }
1350  
cn10kb_mcs_parser_cfg(struct mcs * mcs)1351  void cn10kb_mcs_parser_cfg(struct mcs *mcs)
1352  {
1353  	u64 reg, val;
1354  
1355  	/* VLAN CTag */
1356  	val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
1357  	/* RX */
1358  	reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
1359  	mcs_reg_write(mcs, reg, val);
1360  
1361  	/* TX */
1362  	reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
1363  	mcs_reg_write(mcs, reg, val);
1364  
1365  	/* VLAN STag */
1366  	val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
1367  	/* RX */
1368  	reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
1369  	mcs_reg_write(mcs, reg, val);
1370  
1371  	/* TX */
1372  	reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
1373  	mcs_reg_write(mcs, reg, val);
1374  }
1375  
mcs_lmac_init(struct mcs * mcs,int lmac_id)1376  static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
1377  {
1378  	u64 reg;
1379  
1380  	/* Port mode 25GB */
1381  	reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
1382  	mcs_reg_write(mcs, reg, 0);
1383  
1384  	if (mcs->hw->mcs_blks > 1) {
1385  		reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
1386  		mcs_reg_write(mcs, reg, 0xe000e);
1387  		return;
1388  	}
1389  
1390  	reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
1391  	mcs_reg_write(mcs, reg, 0);
1392  }
1393  
mcs_set_lmac_channels(int mcs_id,u16 base)1394  int mcs_set_lmac_channels(int mcs_id, u16 base)
1395  {
1396  	struct mcs *mcs;
1397  	int lmac;
1398  	u64 cfg;
1399  
1400  	mcs = mcs_get_pdata(mcs_id);
1401  	if (!mcs)
1402  		return -ENODEV;
1403  	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
1404  		cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
1405  		cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
1406  		cfg |=	FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
1407  		cfg |=	FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
1408  		mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
1409  		base += 16;
1410  	}
1411  	return 0;
1412  }
1413  
mcs_x2p_calibration(struct mcs * mcs)1414  static int mcs_x2p_calibration(struct mcs *mcs)
1415  {
1416  	unsigned long timeout = jiffies + usecs_to_jiffies(20000);
1417  	int i, err = 0;
1418  	u64 val;
1419  
1420  	/* set X2P calibration */
1421  	val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1422  	val |= BIT_ULL(5);
1423  	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1424  
1425  	/* Wait for calibration to complete */
1426  	while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
1427  		if (time_before(jiffies, timeout)) {
1428  			usleep_range(80, 100);
1429  			continue;
1430  		} else {
1431  			err = -EBUSY;
1432  			dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
1433  			return err;
1434  		}
1435  	}
1436  
1437  	val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
1438  	for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
1439  		if (val & BIT_ULL(1 + i))
1440  			continue;
1441  		err = -EBUSY;
1442  		dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
1443  	}
1444  	/* Clear X2P calibrate */
1445  	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
1446  
1447  	return err;
1448  }
1449  
mcs_set_external_bypass(struct mcs * mcs,bool bypass)1450  static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
1451  {
1452  	u64 val;
1453  
1454  	/* Set MCS to external bypass */
1455  	val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1456  	if (bypass)
1457  		val |= BIT_ULL(6);
1458  	else
1459  		val &= ~BIT_ULL(6);
1460  	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1461  	mcs->bypass = bypass;
1462  }
1463  
mcs_global_cfg(struct mcs * mcs)1464  static void mcs_global_cfg(struct mcs *mcs)
1465  {
1466  	/* Disable external bypass */
1467  	mcs_set_external_bypass(mcs, false);
1468  
1469  	/* Reset TX/RX stats memory */
1470  	mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
1471  	mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
1472  
1473  	/* Set MCS to perform standard IEEE802.1AE macsec processing */
1474  	if (mcs->hw->mcs_blks == 1) {
1475  		mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
1476  		return;
1477  	}
1478  
1479  	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
1480  	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
1481  }
1482  
cn10kb_mcs_set_hw_capabilities(struct mcs * mcs)1483  void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
1484  {
1485  	struct hwinfo *hw = mcs->hw;
1486  
1487  	hw->tcam_entries = 128;		/* TCAM entries */
1488  	hw->secy_entries  = 128;	/* SecY entries */
1489  	hw->sc_entries = 128;		/* SC CAM entries */
1490  	hw->sa_entries = 256;		/* SA entries */
1491  	hw->lmac_cnt = 20;		/* lmacs/ports per mcs block */
1492  	hw->mcs_x2p_intf = 5;		/* x2p clabration intf */
1493  	hw->mcs_blks = 1;		/* MCS blocks */
1494  	hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
1495  }
1496  
1497  static struct mcs_ops cn10kb_mcs_ops = {
1498  	.mcs_set_hw_capabilities	= cn10kb_mcs_set_hw_capabilities,
1499  	.mcs_parser_cfg			= cn10kb_mcs_parser_cfg,
1500  	.mcs_tx_sa_mem_map_write	= cn10kb_mcs_tx_sa_mem_map_write,
1501  	.mcs_rx_sa_mem_map_write	= cn10kb_mcs_rx_sa_mem_map_write,
1502  	.mcs_flowid_secy_map		= cn10kb_mcs_flowid_secy_map,
1503  	.mcs_bbe_intr_handler		= cn10kb_mcs_bbe_intr_handler,
1504  	.mcs_pab_intr_handler		= cn10kb_mcs_pab_intr_handler,
1505  };
1506  
mcs_probe(struct pci_dev * pdev,const struct pci_device_id * id)1507  static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1508  {
1509  	struct device *dev = &pdev->dev;
1510  	int lmac, err = 0;
1511  	struct mcs *mcs;
1512  
1513  	mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
1514  	if (!mcs)
1515  		return -ENOMEM;
1516  
1517  	mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
1518  	if (!mcs->hw)
1519  		return -ENOMEM;
1520  
1521  	err = pci_enable_device(pdev);
1522  	if (err) {
1523  		dev_err(dev, "Failed to enable PCI device\n");
1524  		pci_set_drvdata(pdev, NULL);
1525  		return err;
1526  	}
1527  
1528  	err = pci_request_regions(pdev, DRV_NAME);
1529  	if (err) {
1530  		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1531  		goto exit;
1532  	}
1533  
1534  	mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1535  	if (!mcs->reg_base) {
1536  		dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
1537  		err = -ENOMEM;
1538  		goto exit;
1539  	}
1540  
1541  	pci_set_drvdata(pdev, mcs);
1542  	mcs->pdev = pdev;
1543  	mcs->dev = &pdev->dev;
1544  
1545  	if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
1546  		mcs->mcs_ops = &cn10kb_mcs_ops;
1547  	else
1548  		mcs->mcs_ops = cnf10kb_get_mac_ops();
1549  
1550  	/* Set hardware capabilities */
1551  	mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
1552  
1553  	mcs_global_cfg(mcs);
1554  
1555  	/* Perform X2P clibration */
1556  	err = mcs_x2p_calibration(mcs);
1557  	if (err)
1558  		goto err_x2p;
1559  
1560  	mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1561  			& MCS_ID_MASK;
1562  
1563  	/* Set mcs tx side resources */
1564  	err = mcs_alloc_struct_mem(mcs, &mcs->tx);
1565  	if (err)
1566  		goto err_x2p;
1567  
1568  	/* Set mcs rx side resources */
1569  	err = mcs_alloc_struct_mem(mcs, &mcs->rx);
1570  	if (err)
1571  		goto err_x2p;
1572  
1573  	/* per port config */
1574  	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
1575  		mcs_lmac_init(mcs, lmac);
1576  
1577  	/* Parser configuration */
1578  	mcs->mcs_ops->mcs_parser_cfg(mcs);
1579  
1580  	err = mcs_register_interrupts(mcs);
1581  	if (err)
1582  		goto exit;
1583  
1584  	list_add(&mcs->mcs_list, &mcs_list);
1585  	mutex_init(&mcs->stats_lock);
1586  
1587  	return 0;
1588  
1589  err_x2p:
1590  	/* Enable external bypass */
1591  	mcs_set_external_bypass(mcs, true);
1592  exit:
1593  	pci_release_regions(pdev);
1594  	pci_disable_device(pdev);
1595  	pci_set_drvdata(pdev, NULL);
1596  	return err;
1597  }
1598  
mcs_remove(struct pci_dev * pdev)1599  static void mcs_remove(struct pci_dev *pdev)
1600  {
1601  	struct mcs *mcs = pci_get_drvdata(pdev);
1602  
1603  	/* Set MCS to external bypass */
1604  	mcs_set_external_bypass(mcs, true);
1605  	free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
1606  	pci_free_irq_vectors(pdev);
1607  	pci_release_regions(pdev);
1608  	pci_disable_device(pdev);
1609  	pci_set_drvdata(pdev, NULL);
1610  }
1611  
1612  struct pci_driver mcs_driver = {
1613  	.name = DRV_NAME,
1614  	.id_table = mcs_id_table,
1615  	.probe = mcs_probe,
1616  	.remove = mcs_remove,
1617  };
1618