1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell CN10K MCS driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 
12 #include "mcs.h"
13 #include "rvu.h"
14 #include "lmac_common.h"
15 
16 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
17 static struct _req_type __maybe_unused					\
18 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
19 {									\
20 	struct _req_type *req;						\
21 									\
22 	req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(		\
23 		&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
24 		sizeof(struct _rsp_type));				\
25 	if (!req)							\
26 		return NULL;						\
27 	req->hdr.sig = OTX2_MBOX_REQ_SIG;				\
28 	req->hdr.id = _id;						\
29 	return req;							\
30 }
31 
32 MBOX_UP_MCS_MESSAGES
33 #undef M
34 
35 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
36 				       struct mcs_set_lmac_mode *req,
37 				       struct msg_rsp *rsp)
38 {
39 	struct mcs *mcs;
40 
41 	if (req->mcs_id >= rvu->mcs_blk_cnt)
42 		return MCS_AF_ERR_INVALID_MCSID;
43 
44 	mcs = mcs_get_pdata(req->mcs_id);
45 
46 	if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
47 		mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
48 
49 	return 0;
50 }
51 
52 int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
53 {
54 	struct mcs_intrq_entry *qentry;
55 	u16 pcifunc = event->pcifunc;
56 	struct rvu *rvu = mcs->rvu;
57 	struct mcs_pfvf *pfvf;
58 
59 	/* Check if it is PF or VF */
60 	if (pcifunc & RVU_PFVF_FUNC_MASK)
61 		pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
62 	else
63 		pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
64 
65 	event->intr_mask &= pfvf->intr_mask;
66 
67 	/* Check PF/VF interrupt notification is enabled */
68 	if (!(pfvf->intr_mask && event->intr_mask))
69 		return 0;
70 
71 	qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
72 	if (!qentry)
73 		return -ENOMEM;
74 
75 	qentry->intr_event = *event;
76 	spin_lock(&rvu->mcs_intrq_lock);
77 	list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
78 	spin_unlock(&rvu->mcs_intrq_lock);
79 	queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
80 
81 	return 0;
82 }
83 
84 static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
85 {
86 	struct mcs_intr_info *req;
87 	int err, pf;
88 
89 	pf = rvu_get_pf(event->pcifunc);
90 
91 	req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
92 	if (!req)
93 		return -ENOMEM;
94 
95 	req->mcs_id = event->mcs_id;
96 	req->intr_mask = event->intr_mask;
97 	req->sa_id = event->sa_id;
98 	req->hdr.pcifunc = event->pcifunc;
99 	req->lmac_id = event->lmac_id;
100 
101 	otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
102 	err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
103 	if (err)
104 		dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
105 
106 	return 0;
107 }
108 
109 static void mcs_intr_handler_task(struct work_struct *work)
110 {
111 	struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
112 	struct mcs_intrq_entry *qentry;
113 	struct mcs_intr_event *event;
114 	unsigned long flags;
115 
116 	do {
117 		spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
118 		qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
119 						  struct mcs_intrq_entry,
120 						  node);
121 		if (qentry)
122 			list_del(&qentry->node);
123 
124 		spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
125 		if (!qentry)
126 			break; /* nothing more to process */
127 
128 		event = &qentry->intr_event;
129 
130 		mcs_notify_pfvf(event, rvu);
131 		kfree(qentry);
132 	} while (1);
133 }
134 
135 int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
136 				  struct mcs_intr_cfg *req,
137 				  struct msg_rsp *rsp)
138 {
139 	u16 pcifunc = req->hdr.pcifunc;
140 	struct mcs_pfvf *pfvf;
141 	struct mcs *mcs;
142 
143 	if (req->mcs_id >= rvu->mcs_blk_cnt)
144 		return MCS_AF_ERR_INVALID_MCSID;
145 
146 	mcs = mcs_get_pdata(req->mcs_id);
147 
148 	/* Check if it is PF or VF */
149 	if (pcifunc & RVU_PFVF_FUNC_MASK)
150 		pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
151 	else
152 		pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
153 
154 	mcs->pf_map[0] = pcifunc;
155 	pfvf->intr_mask = req->intr_mask;
156 
157 	return 0;
158 }
159 
160 int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
161 				     struct msg_req *req,
162 				     struct mcs_hw_info *rsp)
163 {
164 	struct mcs *mcs;
165 
166 	if (!rvu->mcs_blk_cnt)
167 		return MCS_AF_ERR_NOT_MAPPED;
168 
169 	/* MCS resources are same across all blocks */
170 	mcs = mcs_get_pdata(0);
171 	rsp->num_mcs_blks = rvu->mcs_blk_cnt;
172 	rsp->tcam_entries = mcs->hw->tcam_entries;
173 	rsp->secy_entries = mcs->hw->secy_entries;
174 	rsp->sc_entries = mcs->hw->sc_entries;
175 	rsp->sa_entries = mcs->hw->sa_entries;
176 	return 0;
177 }
178 
179 int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
180 				    struct msg_rsp *rsp)
181 {
182 	struct mcs *mcs;
183 
184 	if (req->mcs_id >= rvu->mcs_blk_cnt)
185 		return MCS_AF_ERR_INVALID_MCSID;
186 
187 	mcs = mcs_get_pdata(req->mcs_id);
188 
189 	mcs_reset_port(mcs, req->port_id, req->reset);
190 
191 	return 0;
192 }
193 
194 int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
195 				     struct mcs_clear_stats *req,
196 				     struct msg_rsp *rsp)
197 {
198 	u16 pcifunc = req->hdr.pcifunc;
199 	struct mcs *mcs;
200 
201 	if (req->mcs_id >= rvu->mcs_blk_cnt)
202 		return MCS_AF_ERR_INVALID_MCSID;
203 
204 	mcs = mcs_get_pdata(req->mcs_id);
205 
206 	mutex_lock(&mcs->stats_lock);
207 	if (req->all)
208 		mcs_clear_all_stats(mcs, pcifunc, req->dir);
209 	else
210 		mcs_clear_stats(mcs, req->type, req->id, req->dir);
211 
212 	mutex_unlock(&mcs->stats_lock);
213 	return 0;
214 }
215 
216 int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
217 					  struct mcs_stats_req *req,
218 					  struct mcs_flowid_stats *rsp)
219 {
220 	struct mcs *mcs;
221 
222 	if (req->mcs_id >= rvu->mcs_blk_cnt)
223 		return MCS_AF_ERR_INVALID_MCSID;
224 
225 	mcs = mcs_get_pdata(req->mcs_id);
226 
227 	/* In CNF10K-B, before reading the statistics,
228 	 * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
229 	 * to get accurate statistics
230 	 */
231 	if (mcs->hw->mcs_blks > 1)
232 		mcs_set_force_clk_en(mcs, true);
233 
234 	mutex_lock(&mcs->stats_lock);
235 	mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
236 	mutex_unlock(&mcs->stats_lock);
237 
238 	/* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
239 	 * the statistics
240 	 */
241 	if (mcs->hw->mcs_blks > 1)
242 		mcs_set_force_clk_en(mcs, false);
243 
244 	return 0;
245 }
246 
247 int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
248 					struct mcs_stats_req *req,
249 					struct mcs_secy_stats *rsp)
250 {	struct mcs *mcs;
251 
252 	if (req->mcs_id >= rvu->mcs_blk_cnt)
253 		return MCS_AF_ERR_INVALID_MCSID;
254 
255 	mcs = mcs_get_pdata(req->mcs_id);
256 
257 	if (mcs->hw->mcs_blks > 1)
258 		mcs_set_force_clk_en(mcs, true);
259 
260 	mutex_lock(&mcs->stats_lock);
261 
262 	if (req->dir == MCS_RX)
263 		mcs_get_rx_secy_stats(mcs, rsp, req->id);
264 	else
265 		mcs_get_tx_secy_stats(mcs, rsp, req->id);
266 
267 	mutex_unlock(&mcs->stats_lock);
268 
269 	if (mcs->hw->mcs_blks > 1)
270 		mcs_set_force_clk_en(mcs, false);
271 
272 	return 0;
273 }
274 
275 int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
276 				      struct mcs_stats_req *req,
277 				      struct mcs_sc_stats *rsp)
278 {
279 	struct mcs *mcs;
280 
281 	if (req->mcs_id >= rvu->mcs_blk_cnt)
282 		return MCS_AF_ERR_INVALID_MCSID;
283 
284 	mcs = mcs_get_pdata(req->mcs_id);
285 
286 	if (mcs->hw->mcs_blks > 1)
287 		mcs_set_force_clk_en(mcs, true);
288 
289 	mutex_lock(&mcs->stats_lock);
290 	mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
291 	mutex_unlock(&mcs->stats_lock);
292 
293 	if (mcs->hw->mcs_blks > 1)
294 		mcs_set_force_clk_en(mcs, false);
295 
296 	return 0;
297 }
298 
299 int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
300 				      struct mcs_stats_req *req,
301 				      struct mcs_sa_stats *rsp)
302 {
303 	struct mcs *mcs;
304 
305 	if (req->mcs_id >= rvu->mcs_blk_cnt)
306 		return MCS_AF_ERR_INVALID_MCSID;
307 
308 	mcs = mcs_get_pdata(req->mcs_id);
309 
310 	if (mcs->hw->mcs_blks > 1)
311 		mcs_set_force_clk_en(mcs, true);
312 
313 	mutex_lock(&mcs->stats_lock);
314 	mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
315 	mutex_unlock(&mcs->stats_lock);
316 
317 	if (mcs->hw->mcs_blks > 1)
318 		mcs_set_force_clk_en(mcs, false);
319 
320 	return 0;
321 }
322 
323 int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
324 					struct mcs_stats_req *req,
325 					struct mcs_port_stats *rsp)
326 {
327 	struct mcs *mcs;
328 
329 	if (req->mcs_id >= rvu->mcs_blk_cnt)
330 		return MCS_AF_ERR_INVALID_MCSID;
331 
332 	mcs = mcs_get_pdata(req->mcs_id);
333 
334 	if (mcs->hw->mcs_blks > 1)
335 		mcs_set_force_clk_en(mcs, true);
336 
337 	mutex_lock(&mcs->stats_lock);
338 	mcs_get_port_stats(mcs, rsp, req->id, req->dir);
339 	mutex_unlock(&mcs->stats_lock);
340 
341 	if (mcs->hw->mcs_blks > 1)
342 		mcs_set_force_clk_en(mcs, false);
343 
344 	return 0;
345 }
346 
347 int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
348 					 struct mcs_set_active_lmac *req,
349 					 struct msg_rsp *rsp)
350 {
351 	struct mcs *mcs;
352 
353 	if (req->mcs_id >= rvu->mcs_blk_cnt)
354 		return MCS_AF_ERR_INVALID_MCSID;
355 
356 	mcs = mcs_get_pdata(req->mcs_id);
357 	if (!mcs)
358 		return MCS_AF_ERR_NOT_MAPPED;
359 
360 	mcs->hw->lmac_bmap = req->lmac_bmap;
361 	mcs_set_lmac_channels(req->mcs_id, req->chan_base);
362 	return 0;
363 }
364 
365 int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
366 				      struct msg_rsp *rsp)
367 {
368 	struct mcs *mcs;
369 
370 	if (req->mcs_id >= rvu->mcs_blk_cnt)
371 		return MCS_AF_ERR_INVALID_MCSID;
372 
373 	mcs = mcs_get_pdata(req->mcs_id);
374 
375 	if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
376 		return -EINVAL;
377 
378 	mcs_set_port_cfg(mcs, req);
379 
380 	return 0;
381 }
382 
383 int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
384 				      struct mcs_port_cfg_get_rsp *rsp)
385 {
386 	struct mcs *mcs;
387 
388 	if (req->mcs_id >= rvu->mcs_blk_cnt)
389 		return MCS_AF_ERR_INVALID_MCSID;
390 
391 	mcs = mcs_get_pdata(req->mcs_id);
392 
393 	if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
394 		return -EINVAL;
395 
396 	mcs_get_port_cfg(mcs, req, rsp);
397 
398 	return 0;
399 }
400 
401 int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
402 					    struct mcs_custom_tag_cfg_get_rsp *rsp)
403 {
404 	struct mcs *mcs;
405 
406 	if (req->mcs_id >= rvu->mcs_blk_cnt)
407 		return MCS_AF_ERR_INVALID_MCSID;
408 
409 	mcs = mcs_get_pdata(req->mcs_id);
410 
411 	mcs_get_custom_tag_cfg(mcs, req, rsp);
412 
413 	return 0;
414 }
415 
416 int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
417 {
418 	struct mcs *mcs;
419 	int mcs_id;
420 
421 	/* CNF10K-B mcs0-6 are mapped to RPM2-8*/
422 	if (rvu->mcs_blk_cnt > 1) {
423 		for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
424 			mcs = mcs_get_pdata(mcs_id);
425 			mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
426 			mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
427 		}
428 	} else {
429 		/* CN10K-B has only one mcs block */
430 		mcs = mcs_get_pdata(0);
431 		mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
432 		mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
433 	}
434 	return 0;
435 }
436 
437 int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
438 					  struct mcs_flowid_ena_dis_entry *req,
439 					  struct msg_rsp *rsp)
440 {
441 	struct mcs *mcs;
442 
443 	if (req->mcs_id >= rvu->mcs_blk_cnt)
444 		return MCS_AF_ERR_INVALID_MCSID;
445 
446 	mcs = mcs_get_pdata(req->mcs_id);
447 	mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
448 	return 0;
449 }
450 
451 int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
452 					struct mcs_pn_table_write_req *req,
453 					struct msg_rsp *rsp)
454 {
455 	struct mcs *mcs;
456 
457 	if (req->mcs_id >= rvu->mcs_blk_cnt)
458 		return MCS_AF_ERR_INVALID_MCSID;
459 
460 	mcs = mcs_get_pdata(req->mcs_id);
461 	mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
462 	return 0;
463 }
464 
465 int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
466 					  struct mcs_set_pn_threshold *req,
467 					  struct msg_rsp *rsp)
468 {
469 	struct mcs *mcs;
470 
471 	if (req->mcs_id >= rvu->mcs_blk_cnt)
472 		return MCS_AF_ERR_INVALID_MCSID;
473 
474 	mcs = mcs_get_pdata(req->mcs_id);
475 
476 	mcs_pn_threshold_set(mcs, req);
477 
478 	return 0;
479 }
480 
481 int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
482 					    struct mcs_rx_sc_sa_map *req,
483 					    struct msg_rsp *rsp)
484 {
485 	struct mcs *mcs;
486 
487 	if (req->mcs_id >= rvu->mcs_blk_cnt)
488 		return MCS_AF_ERR_INVALID_MCSID;
489 
490 	mcs = mcs_get_pdata(req->mcs_id);
491 	mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
492 	return 0;
493 }
494 
495 int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
496 					    struct mcs_tx_sc_sa_map *req,
497 					    struct msg_rsp *rsp)
498 {
499 	struct mcs *mcs;
500 
501 	if (req->mcs_id >= rvu->mcs_blk_cnt)
502 		return MCS_AF_ERR_INVALID_MCSID;
503 
504 	mcs = mcs_get_pdata(req->mcs_id);
505 	mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
506 	mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
507 
508 	return 0;
509 }
510 
511 int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
512 				       struct mcs_sa_plcy_write_req *req,
513 				       struct msg_rsp *rsp)
514 {
515 	struct mcs *mcs;
516 	int i;
517 
518 	if (req->mcs_id >= rvu->mcs_blk_cnt)
519 		return MCS_AF_ERR_INVALID_MCSID;
520 
521 	mcs = mcs_get_pdata(req->mcs_id);
522 
523 	for (i = 0; i < req->sa_cnt; i++)
524 		mcs_sa_plcy_write(mcs, &req->plcy[i][0],
525 				  req->sa_index[i], req->dir);
526 	return 0;
527 }
528 
529 int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
530 					 struct mcs_rx_sc_cam_write_req *req,
531 					 struct msg_rsp *rsp)
532 {
533 	struct mcs *mcs;
534 
535 	if (req->mcs_id >= rvu->mcs_blk_cnt)
536 		return MCS_AF_ERR_INVALID_MCSID;
537 
538 	mcs = mcs_get_pdata(req->mcs_id);
539 	mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
540 	return 0;
541 }
542 
543 int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
544 					 struct mcs_secy_plcy_write_req *req,
545 					 struct msg_rsp *rsp)
546 {	struct mcs *mcs;
547 
548 	if (req->mcs_id >= rvu->mcs_blk_cnt)
549 		return MCS_AF_ERR_INVALID_MCSID;
550 
551 	mcs = mcs_get_pdata(req->mcs_id);
552 
553 	mcs_secy_plcy_write(mcs, req->plcy,
554 			    req->secy_id, req->dir);
555 	return 0;
556 }
557 
558 int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
559 					    struct mcs_flowid_entry_write_req *req,
560 					    struct msg_rsp *rsp)
561 {
562 	struct secy_mem_map map;
563 	struct mcs *mcs;
564 
565 	if (req->mcs_id >= rvu->mcs_blk_cnt)
566 		return MCS_AF_ERR_INVALID_MCSID;
567 
568 	mcs = mcs_get_pdata(req->mcs_id);
569 
570 	/* TODO validate the flowid */
571 	mcs_flowid_entry_write(mcs, req->data, req->mask,
572 			       req->flow_id, req->dir);
573 	map.secy = req->secy_id;
574 	map.sc = req->sc_id;
575 	map.ctrl_pkt = req->ctrl_pkt;
576 	map.flow_id = req->flow_id;
577 	map.sci = req->sci;
578 	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
579 	if (req->ena)
580 		mcs_ena_dis_flowid_entry(mcs, req->flow_id,
581 					 req->dir, true);
582 	return 0;
583 }
584 
585 int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
586 					struct mcs_free_rsrc_req *req,
587 					struct msg_rsp *rsp)
588 {
589 	u16 pcifunc = req->hdr.pcifunc;
590 	struct mcs_rsrc_map *map;
591 	struct mcs *mcs;
592 	int rc = 0;
593 
594 	if (req->mcs_id >= rvu->mcs_blk_cnt)
595 		return MCS_AF_ERR_INVALID_MCSID;
596 
597 	mcs = mcs_get_pdata(req->mcs_id);
598 
599 	if (req->dir == MCS_RX)
600 		map = &mcs->rx;
601 	else
602 		map = &mcs->tx;
603 
604 	mutex_lock(&rvu->rsrc_lock);
605 	/* Free all the cam resources mapped to PF/VF */
606 	if (req->all) {
607 		rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
608 		goto exit;
609 	}
610 
611 	switch (req->rsrc_type) {
612 	case MCS_RSRC_TYPE_FLOWID:
613 		rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
614 		mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
615 		break;
616 	case MCS_RSRC_TYPE_SECY:
617 		rc =  mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
618 		mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
619 		break;
620 	case MCS_RSRC_TYPE_SC:
621 		rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
622 		/* Disable SC CAM only on RX side */
623 		if (req->dir == MCS_RX)
624 			mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
625 		break;
626 	case MCS_RSRC_TYPE_SA:
627 		rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
628 		break;
629 	}
630 exit:
631 	mutex_unlock(&rvu->rsrc_lock);
632 	return rc;
633 }
634 
635 int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
636 					 struct mcs_alloc_rsrc_req *req,
637 					 struct mcs_alloc_rsrc_rsp *rsp)
638 {
639 	u16 pcifunc = req->hdr.pcifunc;
640 	struct mcs_rsrc_map *map;
641 	struct mcs *mcs;
642 	int rsrc_id, i;
643 
644 	if (req->mcs_id >= rvu->mcs_blk_cnt)
645 		return MCS_AF_ERR_INVALID_MCSID;
646 
647 	mcs = mcs_get_pdata(req->mcs_id);
648 
649 	if (req->dir == MCS_RX)
650 		map = &mcs->rx;
651 	else
652 		map = &mcs->tx;
653 
654 	mutex_lock(&rvu->rsrc_lock);
655 
656 	if (req->all) {
657 		rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
658 					     &rsp->secy_ids[0],
659 					     &rsp->sc_ids[0],
660 					     &rsp->sa_ids[0],
661 					     &rsp->sa_ids[1],
662 					     pcifunc, req->dir);
663 		goto exit;
664 	}
665 
666 	switch (req->rsrc_type) {
667 	case MCS_RSRC_TYPE_FLOWID:
668 		for (i = 0; i < req->rsrc_cnt; i++) {
669 			rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
670 			if (rsrc_id < 0)
671 				goto exit;
672 			rsp->flow_ids[i] = rsrc_id;
673 			rsp->rsrc_cnt++;
674 		}
675 		break;
676 	case MCS_RSRC_TYPE_SECY:
677 		for (i = 0; i < req->rsrc_cnt; i++) {
678 			rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
679 			if (rsrc_id < 0)
680 				goto exit;
681 			rsp->secy_ids[i] = rsrc_id;
682 			rsp->rsrc_cnt++;
683 		}
684 		break;
685 	case MCS_RSRC_TYPE_SC:
686 		for (i = 0; i < req->rsrc_cnt; i++) {
687 			rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
688 			if (rsrc_id < 0)
689 				goto exit;
690 			rsp->sc_ids[i] = rsrc_id;
691 			rsp->rsrc_cnt++;
692 		}
693 		break;
694 	case MCS_RSRC_TYPE_SA:
695 		for (i = 0; i < req->rsrc_cnt; i++) {
696 			rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
697 			if (rsrc_id < 0)
698 				goto exit;
699 			rsp->sa_ids[i] = rsrc_id;
700 			rsp->rsrc_cnt++;
701 		}
702 		break;
703 	}
704 
705 	rsp->rsrc_type = req->rsrc_type;
706 	rsp->dir = req->dir;
707 	rsp->mcs_id = req->mcs_id;
708 	rsp->all = req->all;
709 
710 exit:
711 	if (rsrc_id < 0)
712 		dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
713 	mutex_unlock(&rvu->rsrc_lock);
714 	return 0;
715 }
716 
717 int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
718 					     struct mcs_alloc_ctrl_pkt_rule_req *req,
719 					     struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
720 {
721 	u16 pcifunc = req->hdr.pcifunc;
722 	struct mcs_rsrc_map *map;
723 	struct mcs *mcs;
724 	int rsrc_id;
725 	u16 offset;
726 
727 	if (req->mcs_id >= rvu->mcs_blk_cnt)
728 		return MCS_AF_ERR_INVALID_MCSID;
729 
730 	mcs = mcs_get_pdata(req->mcs_id);
731 
732 	map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
733 
734 	mutex_lock(&rvu->rsrc_lock);
735 
736 	switch (req->rule_type) {
737 	case MCS_CTRL_PKT_RULE_TYPE_ETH:
738 		offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
739 		break;
740 	case MCS_CTRL_PKT_RULE_TYPE_DA:
741 		offset = MCS_CTRLPKT_DA_RULE_OFFSET;
742 		break;
743 	case MCS_CTRL_PKT_RULE_TYPE_RANGE:
744 		offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
745 		break;
746 	case MCS_CTRL_PKT_RULE_TYPE_COMBO:
747 		offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
748 		break;
749 	case MCS_CTRL_PKT_RULE_TYPE_MAC:
750 		offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
751 		break;
752 	}
753 
754 	rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
755 					pcifunc);
756 	if (rsrc_id < 0)
757 		goto exit;
758 
759 	rsp->rule_idx = rsrc_id;
760 	rsp->rule_type = req->rule_type;
761 	rsp->dir = req->dir;
762 	rsp->mcs_id = req->mcs_id;
763 
764 	mutex_unlock(&rvu->rsrc_lock);
765 	return 0;
766 exit:
767 	if (rsrc_id < 0)
768 		dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
769 			pcifunc);
770 	mutex_unlock(&rvu->rsrc_lock);
771 	return rsrc_id;
772 }
773 
774 int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
775 					    struct mcs_free_ctrl_pkt_rule_req *req,
776 					    struct msg_rsp *rsp)
777 {
778 	struct mcs *mcs;
779 	int rc;
780 
781 	if (req->mcs_id >= rvu->mcs_blk_cnt)
782 		return MCS_AF_ERR_INVALID_MCSID;
783 
784 	mcs = mcs_get_pdata(req->mcs_id);
785 
786 	mutex_lock(&rvu->rsrc_lock);
787 
788 	rc = mcs_free_ctrlpktrule(mcs, req);
789 
790 	mutex_unlock(&rvu->rsrc_lock);
791 
792 	return rc;
793 }
794 
795 int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
796 					     struct mcs_ctrl_pkt_rule_write_req *req,
797 					     struct msg_rsp *rsp)
798 {
799 	struct mcs *mcs;
800 	int rc;
801 
802 	if (req->mcs_id >= rvu->mcs_blk_cnt)
803 		return MCS_AF_ERR_INVALID_MCSID;
804 
805 	mcs = mcs_get_pdata(req->mcs_id);
806 
807 	rc = mcs_ctrlpktrule_write(mcs, req);
808 
809 	return rc;
810 }
811 
812 static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
813 {
814 	struct mcs *mcs = mcs_get_pdata(0);
815 	unsigned long lmac_bmap;
816 	int cgx, lmac, port;
817 
818 	for (port = 0; port < mcs->hw->lmac_cnt; port++) {
819 		cgx = port / rvu->hw->lmac_per_cgx;
820 		lmac = port % rvu->hw->lmac_per_cgx;
821 		if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
822 			continue;
823 		set_bit(port, &lmac_bmap);
824 	}
825 	mcs->hw->lmac_bmap = lmac_bmap;
826 }
827 
828 int rvu_mcs_init(struct rvu *rvu)
829 {
830 	struct rvu_hwinfo *hw = rvu->hw;
831 	int lmac, err = 0, mcs_id;
832 	struct mcs *mcs;
833 
834 	rvu->mcs_blk_cnt = mcs_get_blkcnt();
835 
836 	if (!rvu->mcs_blk_cnt)
837 		return 0;
838 
839 	/* Needed only for CN10K-B */
840 	if (rvu->mcs_blk_cnt == 1) {
841 		err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
842 		if (err)
843 			return err;
844 		/* Set active lmacs */
845 		rvu_mcs_set_lmac_bmap(rvu);
846 	}
847 
848 	/* Install default tcam bypass entry and set port to operational mode */
849 	for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
850 		mcs = mcs_get_pdata(mcs_id);
851 		mcs_install_flowid_bypass_entry(mcs);
852 		for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
853 			mcs_set_lmac_mode(mcs, lmac, 0);
854 
855 		mcs->rvu = rvu;
856 
857 		/* Allocated memory for PFVF data */
858 		mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
859 				       sizeof(struct mcs_pfvf), GFP_KERNEL);
860 		if (!mcs->pf)
861 			return -ENOMEM;
862 
863 		mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
864 				       sizeof(struct mcs_pfvf), GFP_KERNEL);
865 		if (!mcs->vf)
866 			return -ENOMEM;
867 	}
868 
869 	/* Initialize the wq for handling mcs interrupts */
870 	INIT_LIST_HEAD(&rvu->mcs_intrq_head);
871 	INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
872 	rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
873 	if (!rvu->mcs_intr_wq) {
874 		dev_err(rvu->dev, "mcs alloc workqueue failed\n");
875 		return -ENOMEM;
876 	}
877 
878 	return err;
879 }
880 
881 void rvu_mcs_exit(struct rvu *rvu)
882 {
883 	if (!rvu->mcs_intr_wq)
884 		return;
885 
886 	flush_workqueue(rvu->mcs_intr_wq);
887 	destroy_workqueue(rvu->mcs_intr_wq);
888 	rvu->mcs_intr_wq = NULL;
889 }
890