1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2022 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/firmware.h>
12 #include <linux/stddef.h>
13 #include <linux/debugfs.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "npc.h"
19 #include "cgx.h"
20 #include "rvu_npc_fs.h"
21 #include "rvu_npc_hash.h"
22 
23 static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
24 				size_t width_bits)
25 {
26 	const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
27 	const size_t msb = start_bit + width_bits - 1;
28 	const size_t lword = start_bit >> 6;
29 	const size_t uword = msb >> 6;
30 	size_t lbits;
31 	u64 hi, lo;
32 
33 	if (lword == uword)
34 		return (input[lword] >> (start_bit & 63)) & mask;
35 
36 	lbits = 64 - (start_bit & 63);
37 	hi = input[uword];
38 	lo = (input[lword] >> (start_bit & 63));
39 	return ((hi << lbits) | lo) & mask;
40 }
41 
42 static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
43 {
44 	u64 prev_orig_word = 0;
45 	u64 cur_orig_word = 0;
46 	size_t extra = key_bit_len % 64;
47 	size_t max_idx = key_bit_len / 64;
48 	size_t i;
49 
50 	if (extra)
51 		max_idx++;
52 
53 	for (i = 0; i < max_idx; i++) {
54 		cur_orig_word = key[i];
55 		key[i] = key[i] << 1;
56 		key[i] |= ((prev_orig_word >> 63) & 0x1);
57 		prev_orig_word = cur_orig_word;
58 	}
59 }
60 
61 static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
62 				 size_t key_bit_len)
63 {
64 	u32 hash_out = 0;
65 	u64 temp_data = 0;
66 	int i;
67 
68 	for (i = data_bit_len - 1; i >= 0; i--) {
69 		temp_data = (data[i / 64]);
70 		temp_data = temp_data >> (i % 64);
71 		temp_data &= 0x1;
72 		if (temp_data)
73 			hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
74 
75 		rvu_npc_lshift_key(key, key_bit_len);
76 	}
77 
78 	return hash_out;
79 }
80 
81 u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
82 			u8 intf, u8 hash_idx)
83 {
84 	u64 hash_key[3];
85 	u64 data_padded[2];
86 	u32 field_hash;
87 
88 	hash_key[0] = rsp.secret_key[1] << 31;
89 	hash_key[0] |= rsp.secret_key[2];
90 	hash_key[1] = rsp.secret_key[1] >> 33;
91 	hash_key[1] |= rsp.secret_key[0] << 31;
92 	hash_key[2] = rsp.secret_key[0] >> 33;
93 
94 	data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
95 	data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
96 	field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
97 
98 	field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
99 	field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
100 	return field_hash;
101 }
102 
103 static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
104 			       u8 intf, int lid, int lt, int ld)
105 {
106 	u8 hdr, key;
107 	u64 cfg;
108 
109 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
110 	hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
111 	key = FIELD_GET(NPC_KEY_OFFSET, cfg);
112 
113 	/* Update use_hash(bit-20) to 'true' and
114 	 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
115 	 */
116 	cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
117 				  hdr, 0x1, 0x0, key);
118 
119 	return cfg;
120 }
121 
122 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
123 				     u8 intf)
124 {
125 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
126 	int lid, lt, ld, hash_cnt = 0;
127 
128 	if (is_npc_intf_tx(intf))
129 		return;
130 
131 	/* Program HASH_CFG */
132 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
133 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
134 			for (ld = 0; ld < NPC_MAX_LD; ld++) {
135 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
136 					u64 cfg;
137 
138 					if (hash_cnt == NPC_MAX_HASH)
139 						return;
140 
141 					cfg = npc_update_use_hash(rvu, blkaddr,
142 								  intf, lid, lt, ld);
143 					/* Set updated KEX configuration */
144 					SET_KEX_LD(intf, lid, lt, ld, cfg);
145 					/* Set HASH configuration */
146 					SET_KEX_LD_HASH(intf, ld,
147 							mkex_hash->hash[intf][ld]);
148 					SET_KEX_LD_HASH_MASK(intf, ld, 0,
149 							     mkex_hash->hash_mask[intf][ld][0]);
150 					SET_KEX_LD_HASH_MASK(intf, ld, 1,
151 							     mkex_hash->hash_mask[intf][ld][1]);
152 					SET_KEX_LD_HASH_CTRL(intf, ld,
153 							     mkex_hash->hash_ctrl[intf][ld]);
154 
155 					hash_cnt++;
156 				}
157 			}
158 		}
159 	}
160 }
161 
162 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
163 				     u8 intf)
164 {
165 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
166 	int lid, lt, ld, hash_cnt = 0;
167 
168 	if (is_npc_intf_rx(intf))
169 		return;
170 
171 	/* Program HASH_CFG */
172 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
173 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
174 			for (ld = 0; ld < NPC_MAX_LD; ld++)
175 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
176 					u64 cfg;
177 
178 					if (hash_cnt == NPC_MAX_HASH)
179 						return;
180 
181 					cfg = npc_update_use_hash(rvu, blkaddr,
182 								  intf, lid, lt, ld);
183 					/* Set updated KEX configuration */
184 					SET_KEX_LD(intf, lid, lt, ld, cfg);
185 					/* Set HASH configuration */
186 					SET_KEX_LD_HASH(intf, ld,
187 							mkex_hash->hash[intf][ld]);
188 					SET_KEX_LD_HASH_MASK(intf, ld, 0,
189 							     mkex_hash->hash_mask[intf][ld][0]);
190 					SET_KEX_LD_HASH_MASK(intf, ld, 1,
191 							     mkex_hash->hash_mask[intf][ld][1]);
192 					SET_KEX_LD_HASH_CTRL(intf, ld,
193 							     mkex_hash->hash_ctrl[intf][ld]);
194 					hash_cnt++;
195 				}
196 		}
197 	}
198 }
199 
200 void npc_config_secret_key(struct rvu *rvu, int blkaddr)
201 {
202 	struct hw_cap *hwcap = &rvu->hw->cap;
203 	struct rvu_hwinfo *hw = rvu->hw;
204 	u8 intf;
205 
206 	if (!hwcap->npc_hash_extract)
207 		return;
208 
209 	for (intf = 0; intf < hw->npc_intfs; intf++) {
210 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
211 			    RVU_NPC_HASH_SECRET_KEY0);
212 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
213 			    RVU_NPC_HASH_SECRET_KEY1);
214 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
215 			    RVU_NPC_HASH_SECRET_KEY2);
216 	}
217 }
218 
219 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
220 {
221 	struct hw_cap *hwcap = &rvu->hw->cap;
222 	struct rvu_hwinfo *hw = rvu->hw;
223 	u8 intf;
224 
225 	if (!hwcap->npc_hash_extract)
226 		return;
227 
228 	for (intf = 0; intf < hw->npc_intfs; intf++) {
229 		npc_program_mkex_hash_rx(rvu, blkaddr, intf);
230 		npc_program_mkex_hash_tx(rvu, blkaddr, intf);
231 	}
232 }
233 
234 void npc_update_field_hash(struct rvu *rvu, u8 intf,
235 			   struct mcam_entry *entry,
236 			   int blkaddr,
237 			   u64 features,
238 			   struct flow_msg *pkt,
239 			   struct flow_msg *mask,
240 			   struct flow_msg *opkt,
241 			   struct flow_msg *omask)
242 {
243 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
244 	struct npc_get_field_hash_info_req req;
245 	struct npc_get_field_hash_info_rsp rsp;
246 	u64 ldata[2], cfg;
247 	u32 field_hash;
248 	u8 hash_idx;
249 
250 	if (!rvu->hw->cap.npc_hash_extract) {
251 		dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
252 		return;
253 	}
254 
255 	req.intf = intf;
256 	rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
257 
258 	for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
259 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
260 		if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
261 			u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
262 			u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
263 			u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
264 
265 			if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
266 				switch (ltype & ltype_mask) {
267 				/* If hash extract enabled is supported for IPv6 then
268 				 * 128 bit IPv6 source and destination addressed
269 				 * is hashed to 32 bit value.
270 				 */
271 				case NPC_LT_LC_IP6:
272 					/* ld[0] == hash_idx[0] == Source IPv6
273 					 * ld[1] == hash_idx[1] == Destination IPv6
274 					 */
275 					if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
276 						u32 src_ip[IPV6_WORDS];
277 
278 						be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
279 						ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
280 						ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
281 						field_hash = npc_field_hash_calc(ldata,
282 										 rsp,
283 										 intf,
284 										 hash_idx);
285 						npc_update_entry(rvu, NPC_SIP_IPV6, entry,
286 								 field_hash, 0,
287 								 GENMASK(31, 0), 0, intf);
288 						memcpy(&opkt->ip6src, &pkt->ip6src,
289 						       sizeof(pkt->ip6src));
290 						memcpy(&omask->ip6src, &mask->ip6src,
291 						       sizeof(mask->ip6src));
292 					} else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
293 						u32 dst_ip[IPV6_WORDS];
294 
295 						be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
296 						ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
297 						ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
298 						field_hash = npc_field_hash_calc(ldata,
299 										 rsp,
300 										 intf,
301 										 hash_idx);
302 						npc_update_entry(rvu, NPC_DIP_IPV6, entry,
303 								 field_hash, 0,
304 								 GENMASK(31, 0), 0, intf);
305 						memcpy(&opkt->ip6dst, &pkt->ip6dst,
306 						       sizeof(pkt->ip6dst));
307 						memcpy(&omask->ip6dst, &mask->ip6dst,
308 						       sizeof(mask->ip6dst));
309 					}
310 
311 					break;
312 				}
313 			}
314 		}
315 	}
316 }
317 
318 int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
319 					     struct npc_get_field_hash_info_req *req,
320 					     struct npc_get_field_hash_info_rsp *rsp)
321 {
322 	u64 *secret_key = rsp->secret_key;
323 	u8 intf = req->intf;
324 	int i, j, blkaddr;
325 
326 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
327 	if (blkaddr < 0) {
328 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
329 		return -EINVAL;
330 	}
331 
332 	secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
333 	secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
334 	secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
335 
336 	for (i = 0; i < NPC_MAX_HASH; i++) {
337 		for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
338 			rsp->hash_mask[NIX_INTF_RX][i][j] =
339 				GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
340 			rsp->hash_mask[NIX_INTF_TX][i][j] =
341 				GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
342 		}
343 	}
344 
345 	for (i = 0; i < NPC_MAX_INTF; i++)
346 		for (j = 0; j < NPC_MAX_HASH; j++)
347 			rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
348 
349 	return 0;
350 }
351 
352 /**
353  *	rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
354  *	@mac_addr: MAC address.
355  *	Return: mdata for exact match table.
356  */
357 static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
358 {
359 	u64 mac = 0;
360 	int index;
361 
362 	for (index = ETH_ALEN - 1; index >= 0; index--)
363 		mac |= ((u64)*mac_addr++) << (8 * index);
364 
365 	return mac;
366 }
367 
368 /**
369  *	rvu_exact_prepare_mdata - Make mdata for mcam entry
370  *	@mac: MAC address
371  *	@chan: Channel number.
372  *	@ctype: Channel Type.
373  *	@mask: LDATA mask.
374  *	Return: Meta data
375  */
376 static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
377 {
378 	u64 ldata = rvu_npc_exact_mac2u64(mac);
379 
380 	/* Please note that mask is 48bit which excludes chan and ctype.
381 	 * Increase mask bits if we need to include them as well.
382 	 */
383 	ldata |= ((u64)chan << 48);
384 	ldata |= ((u64)ctype  << 60);
385 	ldata &= mask;
386 	ldata = ldata << 2;
387 
388 	return ldata;
389 }
390 
391 /**
392  *      rvu_exact_calculate_hash - calculate hash index to mem table.
393  *	@rvu: resource virtualization unit.
394  *	@chan: Channel number
395  *	@ctype: Channel type.
396  *	@mac: MAC address
397  *	@mask: HASH mask.
398  *	@table_depth: Depth of table.
399  *	Return: Hash value
400  */
401 static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
402 				    u64 mask, u32 table_depth)
403 {
404 	struct npc_exact_table *table = rvu->hw->table;
405 	u64 hash_key[2];
406 	u64 key_in[2];
407 	u64 ldata;
408 	u32 hash;
409 
410 	key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
411 	key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
412 
413 	hash_key[0] = key_in[0] << 31;
414 	hash_key[0] |= key_in[1];
415 	hash_key[1] = key_in[0] >> 33;
416 
417 	ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
418 
419 	dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
420 		ldata, hash_key[1], hash_key[0]);
421 	hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
422 
423 	hash &= table->mem_table.hash_mask;
424 	hash += table->mem_table.hash_offset;
425 	dev_dbg(rvu->dev, "%s: hash=%x\n", __func__,  hash);
426 
427 	return hash;
428 }
429 
430 /**
431  *      rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
432  *      @rvu: resource virtualization unit.
433  *	@way: Indicate way to table.
434  *	@index: Hash index to 4 way table.
435  *	@hash: Hash value.
436  *
437  *	Searches 4 way table using hash index. Returns 0 on success.
438  *	Return: 0 upon success.
439  */
440 static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
441 					       u32 *index, unsigned int hash)
442 {
443 	struct npc_exact_table *table;
444 	int depth, i;
445 
446 	table = rvu->hw->table;
447 	depth = table->mem_table.depth;
448 
449 	/* Check all the 4 ways for a free slot. */
450 	mutex_lock(&table->lock);
451 	for (i = 0; i <  table->mem_table.ways; i++) {
452 		if (test_bit(hash + i * depth, table->mem_table.bmap))
453 			continue;
454 
455 		set_bit(hash + i * depth, table->mem_table.bmap);
456 		mutex_unlock(&table->lock);
457 
458 		dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
459 			__func__, i, hash);
460 
461 		*way = i;
462 		*index = hash;
463 		return 0;
464 	}
465 	mutex_unlock(&table->lock);
466 
467 	dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
468 		bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
469 	return -ENOSPC;
470 }
471 
472 /**
473  *	rvu_npc_exact_free_id - Free seq id from bitmat.
474  *	@rvu: Resource virtualization unit.
475  *	@seq_id: Sequence identifier to be freed.
476  */
477 static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
478 {
479 	struct npc_exact_table *table;
480 
481 	table = rvu->hw->table;
482 	mutex_lock(&table->lock);
483 	clear_bit(seq_id, table->id_bmap);
484 	mutex_unlock(&table->lock);
485 	dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
486 }
487 
488 /**
489  *	rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
490  *	@rvu: Resource virtualization unit.
491  *	@seq_id: Sequence identifier.
492  *	Return: True or false.
493  */
494 static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
495 {
496 	struct npc_exact_table *table;
497 	u32 idx;
498 
499 	table = rvu->hw->table;
500 
501 	mutex_lock(&table->lock);
502 	idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
503 	if (idx == table->tot_ids) {
504 		mutex_unlock(&table->lock);
505 		dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
506 			__func__, table->tot_ids);
507 
508 		return false;
509 	}
510 
511 	/* Mark bit map to indicate that slot is used.*/
512 	set_bit(idx, table->id_bmap);
513 	mutex_unlock(&table->lock);
514 
515 	*seq_id = idx;
516 	dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
517 
518 	return true;
519 }
520 
521 /**
522  *      rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
523  *      @rvu: resource virtualization unit.
524  *	@index: Index to exact CAM table.
525  *	Return: 0 upon success; else error number.
526  */
527 static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
528 {
529 	struct npc_exact_table *table;
530 	u32 idx;
531 
532 	table = rvu->hw->table;
533 
534 	mutex_lock(&table->lock);
535 	idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
536 	if (idx == table->cam_table.depth) {
537 		mutex_unlock(&table->lock);
538 		dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
539 			 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
540 		return -ENOSPC;
541 	}
542 
543 	/* Mark bit map to indicate that slot is used.*/
544 	set_bit(idx, table->cam_table.bmap);
545 	mutex_unlock(&table->lock);
546 
547 	*index = idx;
548 	dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
549 		__func__, idx);
550 	return 0;
551 }
552 
553 /**
554  *	rvu_exact_prepare_table_entry - Data for exact match table entry.
555  *	@rvu: Resource virtualization unit.
556  *	@enable: Enable/Disable entry
557  *	@ctype: Software defined channel type. Currently set as 0.
558  *	@chan: Channel number.
559  *	@mac_addr: Destination mac address.
560  *	Return: mdata for exact match table.
561  */
562 static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
563 					 u8 ctype, u16 chan, u8 *mac_addr)
564 
565 {
566 	u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
567 
568 	/* Enable or disable */
569 	u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
570 
571 	/* Set Ctype */
572 	mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
573 
574 	/* Set chan */
575 	mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
576 
577 	/* MAC address */
578 	mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
579 
580 	return mdata;
581 }
582 
583 /**
584  *	rvu_exact_config_secret_key - Configure secret key.
585  *	@rvu: Resource virtualization unit.
586  */
587 static void rvu_exact_config_secret_key(struct rvu *rvu)
588 {
589 	int blkaddr;
590 
591 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
592 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
593 		    RVU_NPC_HASH_SECRET_KEY0);
594 
595 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
596 		    RVU_NPC_HASH_SECRET_KEY1);
597 
598 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
599 		    RVU_NPC_HASH_SECRET_KEY2);
600 }
601 
602 /**
603  *	rvu_exact_config_search_key - Configure search key
604  *	@rvu: Resource virtualization unit.
605  */
606 static void rvu_exact_config_search_key(struct rvu *rvu)
607 {
608 	int blkaddr;
609 	u64 reg_val;
610 
611 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
612 
613 	/* HDR offset */
614 	reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
615 
616 	/* BYTESM1, number of bytes - 1 */
617 	reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
618 
619 	/* Enable LID and set LID to  NPC_LID_LA */
620 	reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
621 	reg_val |= FIELD_PREP(GENMASK_ULL(10, 8),  NPC_LID_LA);
622 
623 	/* Clear layer type based extraction */
624 
625 	/* Disable LT_EN */
626 	reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
627 
628 	/* Set LTYPE_MATCH to 0 */
629 	reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
630 
631 	/* Set LTYPE_MASK to 0 */
632 	reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
633 
634 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
635 }
636 
637 /**
638  *	rvu_exact_config_result_ctrl - Set exact table hash control
639  *	@rvu: Resource virtualization unit.
640  *	@depth: Depth of Exact match table.
641  *
642  *	Sets mask and offset for hash for mem table.
643  */
644 static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
645 {
646 	int blkaddr;
647 	u64 reg = 0;
648 
649 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
650 
651 	/* Set mask. Note that depth is a power of 2 */
652 	rvu->hw->table->mem_table.hash_mask = (depth - 1);
653 	reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
654 
655 	/* Set offset as 0 */
656 	rvu->hw->table->mem_table.hash_offset = 0;
657 	reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
658 
659 	/* Set reg for RX */
660 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
661 	/* Store hash mask and offset for s/w algorithm */
662 }
663 
664 /**
665  *	rvu_exact_config_table_mask - Set exact table mask.
666  *	@rvu: Resource virtualization unit.
667  */
668 static void rvu_exact_config_table_mask(struct rvu *rvu)
669 {
670 	int blkaddr;
671 	u64 mask = 0;
672 
673 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
674 
675 	/* Don't use Ctype */
676 	mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
677 
678 	/* Set chan */
679 	mask |= GENMASK_ULL(59, 48);
680 
681 	/* Full ldata */
682 	mask |= GENMASK_ULL(47, 0);
683 
684 	/* Store mask for s/w hash calcualtion */
685 	rvu->hw->table->mem_table.mask = mask;
686 
687 	/* Set mask for RX.*/
688 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
689 }
690 
691 /**
692  *      rvu_npc_exact_get_max_entries - Get total number of entries in table.
693  *      @rvu: resource virtualization unit.
694  *	Return: Maximum table entries possible.
695  */
696 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
697 {
698 	struct npc_exact_table *table;
699 
700 	table = rvu->hw->table;
701 	return table->tot_ids;
702 }
703 
704 /**
705  *      rvu_npc_exact_has_match_table - Checks support for exact match.
706  *      @rvu: resource virtualization unit.
707  *	Return: True if exact match table is supported/enabled.
708  */
709 bool rvu_npc_exact_has_match_table(struct rvu *rvu)
710 {
711 	return  rvu->hw->cap.npc_exact_match_enabled;
712 }
713 
714 /**
715  *      __rvu_npc_exact_find_entry_by_seq_id - find entry by id
716  *      @rvu: resource virtualization unit.
717  *	@seq_id: Sequence identifier.
718  *
719  *	Caller should acquire the lock.
720  *	Return: Pointer to table entry.
721  */
722 static struct npc_exact_table_entry *
723 __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
724 {
725 	struct npc_exact_table *table = rvu->hw->table;
726 	struct npc_exact_table_entry *entry = NULL;
727 	struct list_head *lhead;
728 
729 	lhead = &table->lhead_gbl;
730 
731 	/* traverse to find the matching entry */
732 	list_for_each_entry(entry, lhead, glist) {
733 		if (entry->seq_id != seq_id)
734 			continue;
735 
736 		return entry;
737 	}
738 
739 	return NULL;
740 }
741 
742 /**
743  *      rvu_npc_exact_add_to_list - Add entry to list
744  *      @rvu: resource virtualization unit.
745  *	@opc_type: OPCODE to select MEM/CAM table.
746  *	@ways: MEM table ways.
747  *	@index: Index in MEM/CAM table.
748  *	@cgx_id: CGX identifier.
749  *	@lmac_id: LMAC identifier.
750  *	@mac_addr: MAC address.
751  *	@chan: Channel number.
752  *	@ctype: Channel Type.
753  *	@seq_id: Sequence identifier
754  *	@cmd: True if function is called by ethtool cmd
755  *	@mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
756  *	@pcifunc: pci function
757  *	Return: 0 upon success.
758  */
759 static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
760 				     u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
761 				     u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
762 {
763 	struct npc_exact_table_entry *entry, *tmp, *iter;
764 	struct npc_exact_table *table = rvu->hw->table;
765 	struct list_head *lhead, *pprev;
766 
767 	WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
768 
769 	if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
770 		dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
771 		return -EFAULT;
772 	}
773 
774 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
775 	if (!entry) {
776 		rvu_npc_exact_free_id(rvu, *seq_id);
777 		dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
778 		return -ENOMEM;
779 	}
780 
781 	mutex_lock(&table->lock);
782 	switch (opc_type) {
783 	case NPC_EXACT_OPC_CAM:
784 		lhead = &table->lhead_cam_tbl_entry;
785 		table->cam_tbl_entry_cnt++;
786 		break;
787 
788 	case NPC_EXACT_OPC_MEM:
789 		lhead = &table->lhead_mem_tbl_entry[ways];
790 		table->mem_tbl_entry_cnt++;
791 		break;
792 
793 	default:
794 		mutex_unlock(&table->lock);
795 		kfree(entry);
796 		rvu_npc_exact_free_id(rvu, *seq_id);
797 
798 		dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
799 		return  -EINVAL;
800 	}
801 
802 	/* Add to global list */
803 	INIT_LIST_HEAD(&entry->glist);
804 	list_add_tail(&entry->glist, &table->lhead_gbl);
805 	INIT_LIST_HEAD(&entry->list);
806 	entry->index = index;
807 	entry->ways = ways;
808 	entry->opc_type = opc_type;
809 
810 	entry->pcifunc = pcifunc;
811 
812 	ether_addr_copy(entry->mac, mac_addr);
813 	entry->chan = chan;
814 	entry->ctype = ctype;
815 	entry->cgx_id = cgx_id;
816 	entry->lmac_id = lmac_id;
817 
818 	entry->seq_id = *seq_id;
819 
820 	entry->mcam_idx = mcam_idx;
821 	entry->cmd = cmd;
822 
823 	pprev = lhead;
824 
825 	/* Insert entry in ascending order of index */
826 	list_for_each_entry_safe(iter, tmp, lhead, list) {
827 		if (index < iter->index)
828 			break;
829 
830 		pprev = &iter->list;
831 	}
832 
833 	/* Add to each table list */
834 	list_add(&entry->list, pprev);
835 	mutex_unlock(&table->lock);
836 	return 0;
837 }
838 
839 /**
840  *	rvu_npc_exact_mem_table_write - Wrapper for register write
841  *	@rvu: resource virtualization unit.
842  *	@blkaddr: Block address
843  *	@ways: ways for MEM table.
844  *	@index: Index in MEM
845  *	@mdata: Meta data to be written to register.
846  */
847 static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
848 					  u32 index, u64 mdata)
849 {
850 	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
851 }
852 
853 /**
854  *	rvu_npc_exact_cam_table_write - Wrapper for register write
855  *	@rvu: resource virtualization unit.
856  *	@blkaddr: Block address
857  *	@index: Index in MEM
858  *	@mdata: Meta data to be written to register.
859  */
860 static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
861 					  u32 index, u64 mdata)
862 {
863 	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
864 }
865 
866 /**
867  *      rvu_npc_exact_dealloc_table_entry - dealloc table entry
868  *      @rvu: resource virtualization unit.
869  *	@opc_type: OPCODE for selection of table(MEM or CAM)
870  *	@ways: ways if opc_type is MEM table.
871  *	@index: Index of MEM or CAM table.
872  *	Return: 0 upon success.
873  */
874 static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
875 					     u8 ways, u32 index)
876 {
877 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
878 	struct npc_exact_table *table;
879 	u8 null_dmac[6] = { 0 };
880 	int depth;
881 
882 	/* Prepare entry with all fields set to zero */
883 	u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
884 
885 	table = rvu->hw->table;
886 	depth = table->mem_table.depth;
887 
888 	mutex_lock(&table->lock);
889 
890 	switch (opc_type) {
891 	case NPC_EXACT_OPC_CAM:
892 
893 		/* Check whether entry is used already */
894 		if (!test_bit(index, table->cam_table.bmap)) {
895 			mutex_unlock(&table->lock);
896 			dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
897 				__func__, ways, index);
898 			return -EINVAL;
899 		}
900 
901 		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
902 		clear_bit(index, table->cam_table.bmap);
903 		break;
904 
905 	case NPC_EXACT_OPC_MEM:
906 
907 		/* Check whether entry is used already */
908 		if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
909 			mutex_unlock(&table->lock);
910 			dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
911 				__func__, index);
912 			return -EINVAL;
913 		}
914 
915 		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
916 		clear_bit(index + ways * depth, table->mem_table.bmap);
917 		break;
918 
919 	default:
920 		mutex_unlock(&table->lock);
921 		dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
922 		return -ENOSPC;
923 	}
924 
925 	mutex_unlock(&table->lock);
926 
927 	dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
928 		__func__, index,  ways, opc_type);
929 
930 	return 0;
931 }
932 
933 /**
934  *	rvu_npc_exact_alloc_table_entry - Allociate an entry
935  *      @rvu: resource virtualization unit.
936  *	@mac: MAC address.
937  *	@chan: Channel number.
938  *	@ctype: Channel Type.
939  *	@index: Index of MEM table or CAM table.
940  *	@ways: Ways. Only valid for MEM table.
941  *	@opc_type: OPCODE to select table (MEM or CAM)
942  *
943  *	Try allocating a slot from MEM table. If all 4 ways
944  *	slot are full for a hash index, check availability in
945  *	32-entry CAM table for allocation.
946  *	Return: 0 upon success.
947  */
948 static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu,  char *mac, u16 chan, u8 ctype,
949 					   u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
950 {
951 	struct npc_exact_table *table;
952 	unsigned int hash;
953 	int err;
954 
955 	table = rvu->hw->table;
956 
957 	/* Check in 4-ways mem entry for free slote */
958 	hash =  rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
959 					 table->mem_table.depth);
960 	err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
961 	if (!err) {
962 		*opc_type = NPC_EXACT_OPC_MEM;
963 		dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
964 			__func__, *ways, *index);
965 		return 0;
966 	}
967 
968 	dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
969 
970 	/* wayss is 0 for cam table */
971 	*ways = 0;
972 	err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
973 	if (!err) {
974 		*opc_type = NPC_EXACT_OPC_CAM;
975 		dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
976 			__func__, *index);
977 		return 0;
978 	}
979 
980 	dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
981 	return -ENOSPC;
982 }
983 
984 /**
985  *	rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
986  *      @rvu: resource virtualization unit.
987  *	@drop_mcam_idx: Drop rule index in NPC mcam.
988  *	@chan_val: Channel value.
989  *	@chan_mask: Channel Mask.
990  *	@pcifunc: pcifunc of interface.
991  *	Return: True upon success.
992  */
993 static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
994 						       u64 chan_val, u64 chan_mask, u16 pcifunc)
995 {
996 	struct npc_exact_table *table;
997 	int i;
998 
999 	table = rvu->hw->table;
1000 
1001 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1002 		if (!table->drop_rule_map[i].valid)
1003 			break;
1004 
1005 		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1006 			continue;
1007 
1008 		if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
1009 			continue;
1010 
1011 		return false;
1012 	}
1013 
1014 	if (i == NPC_MCAM_DROP_RULE_MAX)
1015 		return false;
1016 
1017 	table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
1018 	table->drop_rule_map[i].chan_val = (u16)chan_val;
1019 	table->drop_rule_map[i].chan_mask = (u16)chan_mask;
1020 	table->drop_rule_map[i].pcifunc = pcifunc;
1021 	table->drop_rule_map[i].valid = true;
1022 	return true;
1023 }
1024 
1025 /**
1026  *	rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
1027  *      @rvu: resource virtualization unit.
1028  *	@intf_type: Interface type (SDK, LBK or CGX)
1029  *	@cgx_id: CGX identifier.
1030  *	@lmac_id: LAMC identifier.
1031  *	@val: Channel number.
1032  *	@mask: Channel mask.
1033  *	Return: True upon success.
1034  */
1035 static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
1036 						       u8 cgx_id, u8 lmac_id,
1037 						       u64 *val, u64 *mask)
1038 {
1039 	u16 chan_val, chan_mask;
1040 
1041 	/* No support for SDP and LBK */
1042 	if (intf_type != NIX_INTF_TYPE_CGX)
1043 		return false;
1044 
1045 	chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
1046 	chan_mask = 0xfff;
1047 
1048 	if (val)
1049 		*val = chan_val;
1050 
1051 	if (mask)
1052 		*mask = chan_mask;
1053 
1054 	return true;
1055 }
1056 
1057 /**
1058  *	rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
1059  *      @rvu: resource virtualization unit.
1060  *	@drop_rule_idx: Drop rule index in NPC mcam.
1061  *
1062  *	Debugfs (exact_drop_cnt) entry displays pcifunc for interface
1063  *	by retrieving the pcifunc value from data base.
1064  *	Return: Drop rule index.
1065  */
1066 u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
1067 {
1068 	struct npc_exact_table *table;
1069 	int i;
1070 
1071 	table = rvu->hw->table;
1072 
1073 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1074 		if (!table->drop_rule_map[i].valid)
1075 			break;
1076 
1077 		if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
1078 			continue;
1079 
1080 		return table->drop_rule_map[i].pcifunc;
1081 	}
1082 
1083 	dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1084 		__func__, drop_rule_idx);
1085 	return -1;
1086 }
1087 
1088 /**
1089  *	rvu_npc_exact_get_drop_rule_info - Get drop rule information.
1090  *      @rvu: resource virtualization unit.
1091  *	@intf_type: Interface type (CGX, SDP or LBK)
1092  *	@cgx_id: CGX identifier.
1093  *	@lmac_id: LMAC identifier.
1094  *	@drop_mcam_idx: NPC mcam drop rule index.
1095  *	@val: Channel value.
1096  *	@mask: Channel mask.
1097  *	@pcifunc: pcifunc of interface corresponding to the drop rule.
1098  *	Return: True upon success.
1099  */
1100 static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
1101 					     u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
1102 					     u64 *mask, u16 *pcifunc)
1103 {
1104 	struct npc_exact_table *table;
1105 	u64 chan_val, chan_mask;
1106 	bool rc;
1107 	int i;
1108 
1109 	table = rvu->hw->table;
1110 
1111 	if (intf_type != NIX_INTF_TYPE_CGX) {
1112 		dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
1113 		return false;
1114 	}
1115 
1116 	rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
1117 							lmac_id, &chan_val, &chan_mask);
1118 	if (!rc)
1119 		return false;
1120 
1121 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1122 		if (!table->drop_rule_map[i].valid)
1123 			break;
1124 
1125 		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1126 			continue;
1127 
1128 		if (val)
1129 			*val = table->drop_rule_map[i].chan_val;
1130 		if (mask)
1131 			*mask = table->drop_rule_map[i].chan_mask;
1132 		if (pcifunc)
1133 			*pcifunc = table->drop_rule_map[i].pcifunc;
1134 
1135 		*drop_mcam_idx = i;
1136 		return true;
1137 	}
1138 
1139 	if (i == NPC_MCAM_DROP_RULE_MAX) {
1140 		dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1141 			__func__, *drop_mcam_idx);
1142 		return false;
1143 	}
1144 
1145 	dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
1146 		__func__, cgx_id, lmac_id);
1147 	return false;
1148 }
1149 
1150 /**
1151  *	__rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
1152  *      @rvu: resource virtualization unit.
1153  *	@drop_mcam_idx: NPC mcam drop rule index.
1154  *	@val: +1 or -1.
1155  *	@enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
1156  *
1157  *	when first exact match entry against a drop rule is added, enable_or_disable_cam
1158  *	is set to true. When last exact match entry against a drop rule is deleted,
1159  *	enable_or_disable_cam is set to true.
1160  *	Return: Number of rules
1161  */
1162 static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
1163 						int val, bool *enable_or_disable_cam)
1164 {
1165 	struct npc_exact_table *table;
1166 	u16 *cnt, old_cnt;
1167 	bool promisc;
1168 
1169 	table = rvu->hw->table;
1170 	promisc = table->promisc_mode[drop_mcam_idx];
1171 
1172 	cnt = &table->cnt_cmd_rules[drop_mcam_idx];
1173 	old_cnt = *cnt;
1174 
1175 	*cnt += val;
1176 
1177 	if (!enable_or_disable_cam)
1178 		goto done;
1179 
1180 	*enable_or_disable_cam = false;
1181 
1182 	if (promisc)
1183 		goto done;
1184 
1185 	/* If all rules are deleted and not already in promisc mode; disable cam */
1186 	if (!*cnt && val < 0) {
1187 		*enable_or_disable_cam = true;
1188 		goto done;
1189 	}
1190 
1191 	/* If rule got added and not already in promisc mode; enable cam */
1192 	if (!old_cnt && val > 0) {
1193 		*enable_or_disable_cam = true;
1194 		goto done;
1195 	}
1196 
1197 done:
1198 	return *cnt;
1199 }
1200 
1201 /**
1202  *      rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
1203  *      @rvu: resource virtualization unit.
1204  *	@seq_id: Sequence identifier of the entry.
1205  *
1206  *	Deletes entry from linked lists and free up slot in HW MEM or CAM
1207  *	table.
1208  *	Return: 0 upon success.
1209  */
1210 static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
1211 {
1212 	struct npc_exact_table_entry *entry = NULL;
1213 	struct npc_exact_table *table;
1214 	bool disable_cam = false;
1215 	u32 drop_mcam_idx = -1;
1216 	int *cnt;
1217 	bool rc;
1218 
1219 	table = rvu->hw->table;
1220 
1221 	mutex_lock(&table->lock);
1222 
1223 	/* Lookup for entry which needs to be updated */
1224 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
1225 	if (!entry) {
1226 		dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
1227 		mutex_unlock(&table->lock);
1228 		return -ENODATA;
1229 	}
1230 
1231 	cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
1232 				&table->mem_tbl_entry_cnt;
1233 
1234 	/* delete from lists */
1235 	list_del_init(&entry->list);
1236 	list_del_init(&entry->glist);
1237 
1238 	(*cnt)--;
1239 
1240 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
1241 					      entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
1242 	if (!rc) {
1243 		dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
1244 			__func__, seq_id);
1245 		mutex_unlock(&table->lock);
1246 		return -ENODATA;
1247 	}
1248 
1249 	if (entry->cmd)
1250 		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
1251 
1252 	/* No dmac filter rules; disable drop on hit rule */
1253 	if (disable_cam) {
1254 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1255 		dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
1256 			__func__, drop_mcam_idx);
1257 	}
1258 
1259 	mutex_unlock(&table->lock);
1260 
1261 	rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
1262 
1263 	rvu_npc_exact_free_id(rvu, seq_id);
1264 
1265 	dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
1266 		__func__, seq_id, entry->mac);
1267 	kfree(entry);
1268 
1269 	return 0;
1270 }
1271 
1272 /**
1273  *      rvu_npc_exact_add_table_entry - Adds a table entry
1274  *      @rvu: resource virtualization unit.
1275  *	@cgx_id: cgx identifier.
1276  *	@lmac_id: lmac identifier.
1277  *	@mac: MAC address.
1278  *	@chan: Channel number.
1279  *	@ctype: Channel Type.
1280  *	@seq_id: Sequence number.
1281  *	@cmd: Whether it is invoked by ethtool cmd.
1282  *	@mcam_idx: NPC mcam index corresponding to MAC
1283  *	@pcifunc: PCI func.
1284  *
1285  *	Creates a new exact match table entry in either CAM or
1286  *	MEM table.
1287  *	Return: 0 upon success.
1288  */
1289 static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
1290 					 u16 chan, u8 ctype, u32 *seq_id, bool cmd,
1291 					 u32 mcam_idx, u16 pcifunc)
1292 {
1293 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1294 	enum npc_exact_opc_type opc_type;
1295 	bool enable_cam = false;
1296 	u32 drop_mcam_idx;
1297 	u32 index;
1298 	u64 mdata;
1299 	bool rc;
1300 	int err;
1301 	u8 ways;
1302 
1303 	ctype = 0;
1304 
1305 	err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
1306 	if (err) {
1307 		dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
1308 		return err;
1309 	}
1310 
1311 	/* Write mdata to table */
1312 	mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
1313 
1314 	if (opc_type == NPC_EXACT_OPC_CAM)
1315 		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
1316 	else
1317 		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index,  mdata);
1318 
1319 	/* Insert entry to linked list */
1320 	err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
1321 					mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
1322 	if (err) {
1323 		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1324 		dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
1325 		return err;
1326 	}
1327 
1328 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1329 					      &drop_mcam_idx, NULL, NULL, NULL);
1330 	if (!rc) {
1331 		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1332 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1333 			__func__, cgx_id, lmac_id);
1334 		return -EINVAL;
1335 	}
1336 
1337 	if (cmd)
1338 		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
1339 
1340 	/* First command rule; enable drop on hit rule */
1341 	if (enable_cam) {
1342 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
1343 		dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
1344 			__func__, drop_mcam_idx);
1345 	}
1346 
1347 	dev_dbg(rvu->dev,
1348 		"%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1349 		__func__, index, mac, ways, opc_type);
1350 
1351 	return 0;
1352 }
1353 
1354 /**
1355  *      rvu_npc_exact_update_table_entry - Update exact match table.
1356  *      @rvu: resource virtualization unit.
1357  *	@cgx_id: CGX identifier.
1358  *	@lmac_id: LMAC identifier.
1359  *	@old_mac: Existing MAC address entry.
1360  *	@new_mac: New MAC address entry.
1361  *	@seq_id: Sequence identifier of the entry.
1362  *
1363  *	Updates MAC address of an entry. If entry is in MEM table, new
1364  *	hash value may not match with old one.
1365  *	Return: 0 upon success.
1366  */
1367 static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
1368 					    u8 *old_mac, u8 *new_mac, u32 *seq_id)
1369 {
1370 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1371 	struct npc_exact_table_entry *entry;
1372 	struct npc_exact_table *table;
1373 	u32 hash_index;
1374 	u64 mdata;
1375 
1376 	table = rvu->hw->table;
1377 
1378 	mutex_lock(&table->lock);
1379 
1380 	/* Lookup for entry which needs to be updated */
1381 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
1382 	if (!entry) {
1383 		mutex_unlock(&table->lock);
1384 		dev_dbg(rvu->dev,
1385 			"%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
1386 			__func__, cgx_id, lmac_id, old_mac);
1387 		return -ENODATA;
1388 	}
1389 
1390 	/* If entry is in mem table and new hash index is different than old
1391 	 * hash index, we cannot update the entry. Fail in these scenarios.
1392 	 */
1393 	if (entry->opc_type == NPC_EXACT_OPC_MEM) {
1394 		hash_index =  rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
1395 						       new_mac, table->mem_table.mask,
1396 						       table->mem_table.depth);
1397 		if (hash_index != entry->index) {
1398 			dev_dbg(rvu->dev,
1399 				"%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
1400 				__func__, hash_index, entry->index);
1401 			mutex_unlock(&table->lock);
1402 			return -EINVAL;
1403 		}
1404 	}
1405 
1406 	mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
1407 
1408 	if (entry->opc_type == NPC_EXACT_OPC_MEM)
1409 		rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
1410 	else
1411 		rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
1412 
1413 	/* Update entry fields */
1414 	ether_addr_copy(entry->mac, new_mac);
1415 	*seq_id = entry->seq_id;
1416 
1417 	dev_dbg(rvu->dev,
1418 		"%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1419 		__func__, entry->index, entry->mac, entry->ways, entry->opc_type);
1420 
1421 	dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
1422 		__func__, old_mac, new_mac);
1423 
1424 	mutex_unlock(&table->lock);
1425 	return 0;
1426 }
1427 
1428 /**
1429  *	rvu_npc_exact_promisc_disable - Disable promiscuous mode.
1430  *      @rvu: resource virtualization unit.
1431  *	@pcifunc: pcifunc
1432  *
1433  *	Drop rule is against each PF. We dont support DMAC filter for
1434  *	VF.
1435  *	Return: 0 upon success
1436  */
1437 
1438 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
1439 {
1440 	struct npc_exact_table *table;
1441 	int pf = rvu_get_pf(pcifunc);
1442 	u8 cgx_id, lmac_id;
1443 	u32 drop_mcam_idx;
1444 	bool *promisc;
1445 	bool rc;
1446 	u32 cnt;
1447 
1448 	table = rvu->hw->table;
1449 
1450 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1451 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1452 					      &drop_mcam_idx, NULL, NULL, NULL);
1453 	if (!rc) {
1454 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1455 			__func__, cgx_id, lmac_id);
1456 		return -EINVAL;
1457 	}
1458 
1459 	mutex_lock(&table->lock);
1460 	promisc = &table->promisc_mode[drop_mcam_idx];
1461 
1462 	if (!*promisc) {
1463 		mutex_unlock(&table->lock);
1464 		dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
1465 			__func__, cgx_id, lmac_id);
1466 		return LMAC_AF_ERR_INVALID_PARAM;
1467 	}
1468 	*promisc = false;
1469 	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
1470 	mutex_unlock(&table->lock);
1471 
1472 	/* If no dmac filter entries configured, disable drop rule */
1473 	if (!cnt)
1474 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1475 	else
1476 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
1477 
1478 	dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
1479 		__func__, cgx_id, lmac_id, cnt);
1480 	return 0;
1481 }
1482 
1483 /**
1484  *	rvu_npc_exact_promisc_enable - Enable promiscuous mode.
1485  *      @rvu: resource virtualization unit.
1486  *	@pcifunc: pcifunc.
1487  *	Return: 0 upon success
1488  */
1489 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
1490 {
1491 	struct npc_exact_table *table;
1492 	int pf = rvu_get_pf(pcifunc);
1493 	u8 cgx_id, lmac_id;
1494 	u32 drop_mcam_idx;
1495 	bool *promisc;
1496 	bool rc;
1497 	u32 cnt;
1498 
1499 	table = rvu->hw->table;
1500 
1501 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1502 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1503 					      &drop_mcam_idx, NULL, NULL, NULL);
1504 	if (!rc) {
1505 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1506 			__func__, cgx_id, lmac_id);
1507 		return -EINVAL;
1508 	}
1509 
1510 	mutex_lock(&table->lock);
1511 	promisc = &table->promisc_mode[drop_mcam_idx];
1512 
1513 	if (*promisc) {
1514 		mutex_unlock(&table->lock);
1515 		dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
1516 			__func__, cgx_id, lmac_id);
1517 		return LMAC_AF_ERR_INVALID_PARAM;
1518 	}
1519 	*promisc = true;
1520 	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
1521 	mutex_unlock(&table->lock);
1522 
1523 	/* If no dmac filter entries configured, disable drop rule */
1524 	if (!cnt)
1525 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1526 	else
1527 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
1528 
1529 	dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
1530 		__func__, cgx_id, lmac_id, cnt);
1531 	return 0;
1532 }
1533 
1534 /**
1535  *	rvu_npc_exact_mac_addr_reset - Delete PF mac address.
1536  *      @rvu: resource virtualization unit.
1537  *	@req: Reset request
1538  *	@rsp: Reset response.
1539  *	Return: 0 upon success
1540  */
1541 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1542 				 struct msg_rsp *rsp)
1543 {
1544 	int pf = rvu_get_pf(req->hdr.pcifunc);
1545 	u32 seq_id = req->index;
1546 	struct rvu_pfvf *pfvf;
1547 	u8 cgx_id, lmac_id;
1548 	int rc;
1549 
1550 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1551 
1552 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1553 
1554 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1555 	if (rc) {
1556 		/* TODO: how to handle this error case ? */
1557 		dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
1558 		return 0;
1559 	}
1560 
1561 	dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
1562 		__func__, pfvf->mac_addr, pf, seq_id);
1563 	return 0;
1564 }
1565 
1566 /**
1567  *	rvu_npc_exact_mac_addr_update - Update mac address field with new value.
1568  *      @rvu: resource virtualization unit.
1569  *	@req: Update request.
1570  *	@rsp: Update response.
1571  *	Return: 0 upon success
1572  */
1573 int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
1574 				  struct cgx_mac_addr_update_req *req,
1575 				  struct cgx_mac_addr_update_rsp *rsp)
1576 {
1577 	int pf = rvu_get_pf(req->hdr.pcifunc);
1578 	struct npc_exact_table_entry *entry;
1579 	struct npc_exact_table *table;
1580 	struct rvu_pfvf *pfvf;
1581 	u32 seq_id, mcam_idx;
1582 	u8 old_mac[ETH_ALEN];
1583 	u8 cgx_id, lmac_id;
1584 	int rc;
1585 
1586 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1587 		return LMAC_AF_ERR_PERM_DENIED;
1588 
1589 	dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
1590 		__func__, req->index, req->mac_addr);
1591 
1592 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1593 
1594 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1595 
1596 	table = rvu->hw->table;
1597 
1598 	mutex_lock(&table->lock);
1599 
1600 	/* Lookup for entry which needs to be updated */
1601 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
1602 	if (!entry) {
1603 		dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
1604 		mutex_unlock(&table->lock);
1605 		return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
1606 	}
1607 	ether_addr_copy(old_mac, entry->mac);
1608 	seq_id = entry->seq_id;
1609 	mcam_idx = entry->mcam_idx;
1610 	mutex_unlock(&table->lock);
1611 
1612 	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id,  old_mac,
1613 					      req->mac_addr, &seq_id);
1614 	if (!rc) {
1615 		rsp->index = seq_id;
1616 		dev_dbg(rvu->dev, "%s  mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
1617 			__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
1618 		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1619 		return 0;
1620 	}
1621 
1622 	/* Try deleting and adding it again */
1623 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1624 	if (rc) {
1625 		/* This could be a new entry */
1626 		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
1627 			pfvf->mac_addr, pf);
1628 	}
1629 
1630 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1631 					   pfvf->rx_chan_base, 0, &seq_id, true,
1632 					   mcam_idx, req->hdr.pcifunc);
1633 	if (rc) {
1634 		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
1635 			req->mac_addr, pf);
1636 		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1637 	}
1638 
1639 	rsp->index = seq_id;
1640 	dev_dbg(rvu->dev,
1641 		"%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
1642 		__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
1643 
1644 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1645 	return 0;
1646 }
1647 
1648 /**
1649  *	rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
1650  *      @rvu: resource virtualization unit.
1651  *	@req: Add request.
1652  *	@rsp: Add response.
1653  *	Return: 0 upon success
1654  */
1655 int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
1656 			       struct cgx_mac_addr_add_req *req,
1657 			       struct cgx_mac_addr_add_rsp *rsp)
1658 {
1659 	int pf = rvu_get_pf(req->hdr.pcifunc);
1660 	struct rvu_pfvf *pfvf;
1661 	u8 cgx_id, lmac_id;
1662 	int rc = 0;
1663 	u32 seq_id;
1664 
1665 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1666 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1667 
1668 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1669 					   pfvf->rx_chan_base, 0, &seq_id,
1670 					   true, -1, req->hdr.pcifunc);
1671 
1672 	if (!rc) {
1673 		rsp->index = seq_id;
1674 		dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
1675 			__func__, req->mac_addr, pf, seq_id);
1676 		return 0;
1677 	}
1678 
1679 	dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
1680 		req->mac_addr, pf);
1681 	return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1682 }
1683 
1684 /**
1685  *	rvu_npc_exact_mac_addr_del - Delete DMAC filter
1686  *      @rvu: resource virtualization unit.
1687  *	@req: Delete request.
1688  *	@rsp: Delete response.
1689  *	Return: 0 upon success
1690  */
1691 int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
1692 			       struct cgx_mac_addr_del_req *req,
1693 			       struct msg_rsp *rsp)
1694 {
1695 	int pf = rvu_get_pf(req->hdr.pcifunc);
1696 	int rc;
1697 
1698 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1699 	if (!rc) {
1700 		dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
1701 			__func__, pf, req->index);
1702 		return 0;
1703 	}
1704 
1705 	dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
1706 		__func__,  pf, req->index);
1707 	return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
1708 }
1709 
1710 /**
1711  *	rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
1712  *      @rvu: resource virtualization unit.
1713  *	@req: Set request.
1714  *	@rsp: Set response.
1715  *	Return: 0 upon success
1716  */
1717 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
1718 			       struct cgx_mac_addr_set_or_get *rsp)
1719 {
1720 	int pf = rvu_get_pf(req->hdr.pcifunc);
1721 	u32 seq_id = req->index;
1722 	struct rvu_pfvf *pfvf;
1723 	u8 cgx_id, lmac_id;
1724 	u32 mcam_idx = -1;
1725 	int rc, nixlf;
1726 
1727 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1728 
1729 	pfvf = &rvu->pf[pf];
1730 
1731 	/* If table does not have an entry; both update entry and del table entry API
1732 	 * below fails. Those are not failure conditions.
1733 	 */
1734 	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
1735 					      req->mac_addr, &seq_id);
1736 	if (!rc) {
1737 		rsp->index = seq_id;
1738 		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1739 		ether_addr_copy(rsp->mac_addr, req->mac_addr);
1740 		dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
1741 			__func__, req->mac_addr, pf);
1742 		return 0;
1743 	}
1744 
1745 	/* Try deleting and adding it again */
1746 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1747 	if (rc) {
1748 		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
1749 			__func__, pfvf->mac_addr, pf);
1750 	}
1751 
1752 	/* find mcam entry if exist */
1753 	rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
1754 	if (!rc) {
1755 		mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
1756 						    nixlf, NIXLF_UCAST_ENTRY);
1757 	}
1758 
1759 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1760 					   pfvf->rx_chan_base, 0, &seq_id,
1761 					   true, mcam_idx, req->hdr.pcifunc);
1762 	if (rc) {
1763 		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
1764 			__func__, req->mac_addr, pf);
1765 		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1766 	}
1767 
1768 	rsp->index = seq_id;
1769 	ether_addr_copy(rsp->mac_addr, req->mac_addr);
1770 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1771 	dev_dbg(rvu->dev,
1772 		"%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
1773 		__func__, req->mac_addr, pf, seq_id);
1774 	return 0;
1775 }
1776 
1777 /**
1778  *	rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
1779  *      @rvu: resource virtualization unit.
1780  *	Return: True if exact match feature is supported.
1781  */
1782 bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
1783 {
1784 	struct npc_exact_table *table = rvu->hw->table;
1785 	bool empty;
1786 
1787 	if (!rvu->hw->cap.npc_exact_match_enabled)
1788 		return false;
1789 
1790 	mutex_lock(&table->lock);
1791 	empty = list_empty(&table->lhead_gbl);
1792 	mutex_unlock(&table->lock);
1793 
1794 	return empty;
1795 }
1796 
1797 /**
1798  *	rvu_npc_exact_disable_feature - Disable feature.
1799  *      @rvu: resource virtualization unit.
1800  */
1801 void rvu_npc_exact_disable_feature(struct rvu *rvu)
1802 {
1803 	rvu->hw->cap.npc_exact_match_enabled = false;
1804 }
1805 
1806 /**
1807  *	rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
1808  *      @rvu: resource virtualization unit.
1809  *	@pcifunc: PCI func to match.
1810  */
1811 void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
1812 {
1813 	struct npc_exact_table *table = rvu->hw->table;
1814 	struct npc_exact_table_entry *tmp, *iter;
1815 	u32 seq_id;
1816 
1817 	mutex_lock(&table->lock);
1818 	list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
1819 		if (pcifunc != iter->pcifunc)
1820 			continue;
1821 
1822 		seq_id = iter->seq_id;
1823 		dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
1824 			pcifunc, seq_id);
1825 
1826 		mutex_unlock(&table->lock);
1827 		rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1828 		mutex_lock(&table->lock);
1829 	}
1830 	mutex_unlock(&table->lock);
1831 }
1832 
1833 /**
1834  *      rvu_npc_exact_init - initialize exact match table
1835  *      @rvu: resource virtualization unit.
1836  *
1837  *	Initialize HW and SW resources to manage 4way-2K table and fully
1838  *	associative 32-entry mcam table.
1839  *	Return: 0 upon success.
1840  */
1841 int rvu_npc_exact_init(struct rvu *rvu)
1842 {
1843 	u64 bcast_mcast_val, bcast_mcast_mask;
1844 	struct npc_exact_table *table;
1845 	u64 exact_val, exact_mask;
1846 	u64 chan_val, chan_mask;
1847 	u8 cgx_id, lmac_id;
1848 	u32 *drop_mcam_idx;
1849 	u16 max_lmac_cnt;
1850 	u64 npc_const3;
1851 	int table_size;
1852 	int blkaddr;
1853 	u16 pcifunc;
1854 	int err, i;
1855 	u64 cfg;
1856 	bool rc;
1857 
1858 	/* Read NPC_AF_CONST3 and check for have exact
1859 	 * match functionality is present
1860 	 */
1861 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1862 	if (blkaddr < 0) {
1863 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1864 		return -EINVAL;
1865 	}
1866 
1867 	/* Check exact match feature is supported */
1868 	npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
1869 	if (!(npc_const3 & BIT_ULL(62)))
1870 		return 0;
1871 
1872 	/* Check if kex profile has enabled EXACT match nibble */
1873 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1874 	if (!(cfg & NPC_EXACT_NIBBLE_HIT))
1875 		return 0;
1876 
1877 	/* Set capability to true */
1878 	rvu->hw->cap.npc_exact_match_enabled = true;
1879 
1880 	table = kzalloc(sizeof(*table), GFP_KERNEL);
1881 	if (!table)
1882 		return -ENOMEM;
1883 
1884 	dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
1885 	rvu->hw->table = table;
1886 
1887 	/* Read table size, ways and depth */
1888 	table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
1889 	table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
1890 	table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
1891 
1892 	dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
1893 		__func__,  table->mem_table.ways, table->cam_table.depth);
1894 
1895 	/* Check if depth of table is not a sequre of 2
1896 	 * TODO: why _builtin_popcount() is not working ?
1897 	 */
1898 	if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
1899 		dev_err(rvu->dev,
1900 			"%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
1901 			__func__,  table->mem_table.depth);
1902 		return -EINVAL;
1903 	}
1904 
1905 	table_size = table->mem_table.depth * table->mem_table.ways;
1906 
1907 	/* Allocate bitmap for 4way 2K table */
1908 	table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
1909 						   GFP_KERNEL);
1910 	if (!table->mem_table.bmap)
1911 		return -ENOMEM;
1912 
1913 	dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
1914 
1915 	/* Allocate bitmap for 32 entry mcam */
1916 	table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
1917 
1918 	if (!table->cam_table.bmap)
1919 		return -ENOMEM;
1920 
1921 	dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
1922 
1923 	table->tot_ids = table_size + table->cam_table.depth;
1924 	table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
1925 					    GFP_KERNEL);
1926 
1927 	if (!table->id_bmap)
1928 		return -ENOMEM;
1929 
1930 	dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
1931 		__func__, table->tot_ids);
1932 
1933 	/* Initialize list heads for npc_exact_table entries.
1934 	 * This entry is used by debugfs to show entries in
1935 	 * exact match table.
1936 	 */
1937 	for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
1938 		INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
1939 
1940 	INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
1941 	INIT_LIST_HEAD(&table->lhead_gbl);
1942 
1943 	mutex_init(&table->lock);
1944 
1945 	rvu_exact_config_secret_key(rvu);
1946 	rvu_exact_config_search_key(rvu);
1947 
1948 	rvu_exact_config_table_mask(rvu);
1949 	rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
1950 
1951 	/* - No drop rule for LBK
1952 	 * - Drop rules for SDP and each LMAC.
1953 	 */
1954 	exact_val = !NPC_EXACT_RESULT_HIT;
1955 	exact_mask = NPC_EXACT_RESULT_HIT;
1956 
1957 	/* nibble - 3	2  1   0
1958 	 *	   L3B L3M L2B L2M
1959 	 */
1960 	bcast_mcast_val = 0b0000;
1961 	bcast_mcast_mask = 0b0011;
1962 
1963 	/* Install SDP drop rule */
1964 	drop_mcam_idx = &table->num_drop_rules;
1965 
1966 	max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
1967 		       PF_CGXMAP_BASE;
1968 
1969 	for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
1970 		if (rvu->pf2cgxlmac_map[i] == 0xFF)
1971 			continue;
1972 
1973 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
1974 
1975 		rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
1976 								lmac_id, &chan_val, &chan_mask);
1977 		if (!rc) {
1978 			dev_err(rvu->dev,
1979 				"%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
1980 				__func__, chan_val, chan_mask, *drop_mcam_idx);
1981 			return -EINVAL;
1982 		}
1983 
1984 		/* Filter rules are only for PF */
1985 		pcifunc = RVU_PFFUNC(i, 0);
1986 
1987 		dev_dbg(rvu->dev,
1988 			"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
1989 			__func__, cgx_id, lmac_id, chan_val, chan_mask);
1990 
1991 		rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
1992 								chan_val, chan_mask, pcifunc);
1993 		if (!rc) {
1994 			dev_err(rvu->dev,
1995 				"%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
1996 				__func__, cgx_id, lmac_id, chan_val);
1997 			return -EINVAL;
1998 		}
1999 
2000 		err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
2001 						 &table->counter_idx[*drop_mcam_idx],
2002 						 chan_val, chan_mask,
2003 						 exact_val, exact_mask,
2004 						 bcast_mcast_val, bcast_mcast_mask);
2005 		if (err) {
2006 			dev_err(rvu->dev,
2007 				"failed to configure drop rule (cgx=%d lmac=%d)\n",
2008 				cgx_id, lmac_id);
2009 			return err;
2010 		}
2011 
2012 		(*drop_mcam_idx)++;
2013 	}
2014 
2015 	dev_info(rvu->dev, "initialized exact match table successfully\n");
2016 	return 0;
2017 }
2018