1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/types.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #include "rvu.h" 13 #include "cgx.h" 14 #include "lmac_common.h" 15 #include "rvu_reg.h" 16 #include "rvu_trace.h" 17 #include "rvu_npc_hash.h" 18 19 struct cgx_evq_entry { 20 struct list_head evq_node; 21 struct cgx_link_event link_event; 22 }; 23 24 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 25 static struct _req_type __maybe_unused \ 26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ 27 { \ 28 struct _req_type *req; \ 29 \ 30 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ 32 sizeof(struct _rsp_type)); \ 33 if (!req) \ 34 return NULL; \ 35 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 36 req->hdr.id = _id; \ 37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ 38 return req; \ 39 } 40 41 MBOX_UP_CGX_MESSAGES 42 #undef M 43 44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) 45 { 46 u8 cgx_id, lmac_id; 47 void *cgxd; 48 49 if (!is_pf_cgxmapped(rvu, pf)) 50 return 0; 51 52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 53 cgxd = rvu_cgx_pdata(cgx_id, rvu); 54 55 return (cgx_features_get(cgxd) & feature); 56 } 57 58 /* Returns bitmap of mapped PFs */ 59 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) 60 { 61 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; 62 } 63 64 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) 65 { 66 unsigned long pfmap; 67 68 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); 69 70 /* Assumes only one pf mapped to a cgx lmac port */ 71 if (!pfmap) 72 return -ENODEV; 73 else 74 return find_first_bit(&pfmap, 16); 75 } 76 77 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) 78 { 79 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); 80 } 81 82 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) 83 { 84 if (cgx_id >= rvu->cgx_cnt_max) 85 return NULL; 86 87 return rvu->cgx_idmap[cgx_id]; 88 } 89 90 /* Return first enabled CGX instance if none are enabled then return NULL */ 91 void *rvu_first_cgx_pdata(struct rvu *rvu) 92 { 93 int first_enabled_cgx = 0; 94 void *cgxd = NULL; 95 96 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { 97 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); 98 if (cgxd) 99 break; 100 } 101 102 return cgxd; 103 } 104 105 /* Based on P2X connectivity find mapped NIX block for a PF */ 106 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, 107 int cgx_id, int lmac_id) 108 { 109 struct rvu_pfvf *pfvf = &rvu->pf[pf]; 110 u8 p2x; 111 112 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); 113 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ 114 pfvf->nix_blkaddr = BLKADDR_NIX0; 115 if (p2x == CMR_P2X_SEL_NIX1) 116 pfvf->nix_blkaddr = BLKADDR_NIX1; 117 } 118 119 static int rvu_map_cgx_lmac_pf(struct rvu *rvu) 120 { 121 struct npc_pkind *pkind = &rvu->hw->pkind; 122 int cgx_cnt_max = rvu->cgx_cnt_max; 123 int pf = PF_CGXMAP_BASE; 124 unsigned long lmac_bmap; 125 int size, free_pkind; 126 int cgx, lmac, iter; 127 int numvfs, hwvfs; 128 129 if (!cgx_cnt_max) 130 return 0; 131 132 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) 133 return -EINVAL; 134 135 /* Alloc map table 136 * An additional entry is required since PF id starts from 1 and 137 * hence entry at offset 0 is invalid. 138 */ 139 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); 140 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); 141 if (!rvu->pf2cgxlmac_map) 142 return -ENOMEM; 143 144 /* Initialize all entries with an invalid cgx and lmac id */ 145 memset(rvu->pf2cgxlmac_map, 0xFF, size); 146 147 /* Reverse map table */ 148 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, 149 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), 150 GFP_KERNEL); 151 if (!rvu->cgxlmac2pf_map) 152 return -ENOMEM; 153 154 rvu->cgx_mapped_pfs = 0; 155 for (cgx = 0; cgx < cgx_cnt_max; cgx++) { 156 if (!rvu_cgx_pdata(cgx, rvu)) 157 continue; 158 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 159 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { 160 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), 161 iter); 162 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); 163 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; 164 free_pkind = rvu_alloc_rsrc(&pkind->rsrc); 165 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; 166 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); 167 rvu->cgx_mapped_pfs++; 168 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); 169 rvu->cgx_mapped_vfs += numvfs; 170 pf++; 171 } 172 } 173 return 0; 174 } 175 176 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) 177 { 178 struct cgx_evq_entry *qentry; 179 unsigned long flags; 180 int err; 181 182 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL); 183 if (!qentry) 184 return -ENOMEM; 185 186 /* Lock the event queue before we read the local link status */ 187 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); 188 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 189 &qentry->link_event.link_uinfo); 190 qentry->link_event.cgx_id = cgx_id; 191 qentry->link_event.lmac_id = lmac_id; 192 if (err) { 193 kfree(qentry); 194 goto skip_add; 195 } 196 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); 197 skip_add: 198 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); 199 200 /* start worker to process the events */ 201 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); 202 203 return 0; 204 } 205 206 /* This is called from interrupt context and is expected to be atomic */ 207 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) 208 { 209 struct cgx_evq_entry *qentry; 210 struct rvu *rvu = data; 211 212 /* post event to the event queue */ 213 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); 214 if (!qentry) 215 return -ENOMEM; 216 qentry->link_event = *event; 217 spin_lock(&rvu->cgx_evq_lock); 218 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); 219 spin_unlock(&rvu->cgx_evq_lock); 220 221 /* start worker to process the events */ 222 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); 223 224 return 0; 225 } 226 227 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) 228 { 229 struct cgx_link_user_info *linfo; 230 struct cgx_link_info_msg *msg; 231 unsigned long pfmap; 232 int err, pfid; 233 234 linfo = &event->link_uinfo; 235 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); 236 237 do { 238 pfid = find_first_bit(&pfmap, 16); 239 clear_bit(pfid, &pfmap); 240 241 /* check if notification is enabled */ 242 if (!test_bit(pfid, &rvu->pf_notify_bmap)) { 243 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", 244 event->cgx_id, event->lmac_id, 245 linfo->link_up ? "UP" : "DOWN"); 246 continue; 247 } 248 249 /* Send mbox message to PF */ 250 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); 251 if (!msg) 252 continue; 253 msg->link_info = *linfo; 254 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); 255 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); 256 if (err) 257 dev_warn(rvu->dev, "notification to pf %d failed\n", 258 pfid); 259 } while (pfmap); 260 } 261 262 static void cgx_evhandler_task(struct work_struct *work) 263 { 264 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); 265 struct cgx_evq_entry *qentry; 266 struct cgx_link_event *event; 267 unsigned long flags; 268 269 do { 270 /* Dequeue an event */ 271 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); 272 qentry = list_first_entry_or_null(&rvu->cgx_evq_head, 273 struct cgx_evq_entry, 274 evq_node); 275 if (qentry) 276 list_del(&qentry->evq_node); 277 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); 278 if (!qentry) 279 break; /* nothing more to process */ 280 281 event = &qentry->link_event; 282 283 /* process event */ 284 cgx_notify_pfs(event, rvu); 285 kfree(qentry); 286 } while (1); 287 } 288 289 static int cgx_lmac_event_handler_init(struct rvu *rvu) 290 { 291 unsigned long lmac_bmap; 292 struct cgx_event_cb cb; 293 int cgx, lmac, err; 294 void *cgxd; 295 296 spin_lock_init(&rvu->cgx_evq_lock); 297 INIT_LIST_HEAD(&rvu->cgx_evq_head); 298 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); 299 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); 300 if (!rvu->cgx_evh_wq) { 301 dev_err(rvu->dev, "alloc workqueue failed"); 302 return -ENOMEM; 303 } 304 305 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ 306 cb.data = rvu; 307 308 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 309 cgxd = rvu_cgx_pdata(cgx, rvu); 310 if (!cgxd) 311 continue; 312 lmac_bmap = cgx_get_lmac_bmap(cgxd); 313 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { 314 err = cgx_lmac_evh_register(&cb, cgxd, lmac); 315 if (err) 316 dev_err(rvu->dev, 317 "%d:%d handler register failed\n", 318 cgx, lmac); 319 } 320 } 321 322 return 0; 323 } 324 325 static void rvu_cgx_wq_destroy(struct rvu *rvu) 326 { 327 if (rvu->cgx_evh_wq) { 328 destroy_workqueue(rvu->cgx_evh_wq); 329 rvu->cgx_evh_wq = NULL; 330 } 331 } 332 333 int rvu_cgx_init(struct rvu *rvu) 334 { 335 int cgx, err; 336 void *cgxd; 337 338 /* CGX port id starts from 0 and are not necessarily contiguous 339 * Hence we allocate resources based on the maximum port id value. 340 */ 341 rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); 342 if (!rvu->cgx_cnt_max) { 343 dev_info(rvu->dev, "No CGX devices found!\n"); 344 return -ENODEV; 345 } 346 347 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * 348 sizeof(void *), GFP_KERNEL); 349 if (!rvu->cgx_idmap) 350 return -ENOMEM; 351 352 /* Initialize the cgxdata table */ 353 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) 354 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); 355 356 /* Map CGX LMAC interfaces to RVU PFs */ 357 err = rvu_map_cgx_lmac_pf(rvu); 358 if (err) 359 return err; 360 361 /* Register for CGX events */ 362 err = cgx_lmac_event_handler_init(rvu); 363 if (err) 364 return err; 365 366 mutex_init(&rvu->cgx_cfg_lock); 367 368 /* Ensure event handler registration is completed, before 369 * we turn on the links 370 */ 371 mb(); 372 373 /* Do link up for all CGX ports */ 374 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 375 cgxd = rvu_cgx_pdata(cgx, rvu); 376 if (!cgxd) 377 continue; 378 err = cgx_lmac_linkup_start(cgxd); 379 if (err) 380 dev_err(rvu->dev, 381 "Link up process failed to start on cgx %d\n", 382 cgx); 383 } 384 385 return 0; 386 } 387 388 int rvu_cgx_exit(struct rvu *rvu) 389 { 390 unsigned long lmac_bmap; 391 int cgx, lmac; 392 void *cgxd; 393 394 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { 395 cgxd = rvu_cgx_pdata(cgx, rvu); 396 if (!cgxd) 397 continue; 398 lmac_bmap = cgx_get_lmac_bmap(cgxd); 399 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) 400 cgx_lmac_evh_unregister(cgxd, lmac); 401 } 402 403 /* Ensure event handler unregister is completed */ 404 mb(); 405 406 rvu_cgx_wq_destroy(rvu); 407 return 0; 408 } 409 410 /* Most of the CGX configuration is restricted to the mapped PF only, 411 * VF's of mapped PF and other PFs are not allowed. This fn() checks 412 * whether a PFFUNC is permitted to do the config or not. 413 */ 414 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) 415 { 416 if ((pcifunc & RVU_PFVF_FUNC_MASK) || 417 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) 418 return false; 419 return true; 420 } 421 422 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) 423 { 424 struct mac_ops *mac_ops; 425 u8 cgx_id, lmac_id; 426 void *cgxd; 427 428 if (!is_pf_cgxmapped(rvu, pf)) 429 return; 430 431 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 432 cgxd = rvu_cgx_pdata(cgx_id, rvu); 433 434 mac_ops = get_mac_ops(cgxd); 435 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ 436 if (enable) 437 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true); 438 else 439 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false); 440 } 441 442 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) 443 { 444 int pf = rvu_get_pf(pcifunc); 445 struct mac_ops *mac_ops; 446 u8 cgx_id, lmac_id; 447 void *cgxd; 448 449 if (!is_cgx_config_permitted(rvu, pcifunc)) 450 return LMAC_AF_ERR_PERM_DENIED; 451 452 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 453 cgxd = rvu_cgx_pdata(cgx_id, rvu); 454 mac_ops = get_mac_ops(cgxd); 455 456 return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); 457 } 458 459 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) 460 { 461 struct mac_ops *mac_ops; 462 463 mac_ops = get_mac_ops(cgxd); 464 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); 465 } 466 467 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) 468 { 469 int pf = rvu_get_pf(pcifunc); 470 int i = 0, lmac_count = 0; 471 u8 max_dmac_filters; 472 u8 cgx_id, lmac_id; 473 void *cgx_dev; 474 475 if (!is_cgx_config_permitted(rvu, pcifunc)) 476 return; 477 478 if (rvu_npc_exact_has_match_table(rvu)) { 479 rvu_npc_exact_reset(rvu, pcifunc); 480 return; 481 } 482 483 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 484 cgx_dev = cgx_get_pdata(cgx_id); 485 lmac_count = cgx_get_lmac_cnt(cgx_dev); 486 max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; 487 488 for (i = 0; i < max_dmac_filters; i++) 489 cgx_lmac_addr_del(cgx_id, lmac_id, i); 490 491 /* As cgx_lmac_addr_del does not clear entry for index 0 492 * so it needs to be done explicitly 493 */ 494 cgx_lmac_addr_reset(cgx_id, lmac_id); 495 } 496 497 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, 498 struct msg_rsp *rsp) 499 { 500 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); 501 return 0; 502 } 503 504 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, 505 struct msg_rsp *rsp) 506 { 507 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); 508 return 0; 509 } 510 511 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, 512 void *rsp) 513 { 514 int pf = rvu_get_pf(req->hdr.pcifunc); 515 struct mac_ops *mac_ops; 516 int stat = 0, err = 0; 517 u64 tx_stat, rx_stat; 518 u8 cgx_idx, lmac; 519 void *cgxd; 520 521 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 522 return LMAC_AF_ERR_PERM_DENIED; 523 524 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 525 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 526 mac_ops = get_mac_ops(cgxd); 527 528 /* Rx stats */ 529 while (stat < mac_ops->rx_stats_cnt) { 530 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat); 531 if (err) 532 return err; 533 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT) 534 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; 535 else 536 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; 537 stat++; 538 } 539 540 /* Tx stats */ 541 stat = 0; 542 while (stat < mac_ops->tx_stats_cnt) { 543 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat); 544 if (err) 545 return err; 546 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT) 547 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; 548 else 549 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; 550 stat++; 551 } 552 return 0; 553 } 554 555 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, 556 struct cgx_stats_rsp *rsp) 557 { 558 return rvu_lmac_get_stats(rvu, req, (void *)rsp); 559 } 560 561 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, 562 struct rpm_stats_rsp *rsp) 563 { 564 return rvu_lmac_get_stats(rvu, req, (void *)rsp); 565 } 566 567 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, 568 struct msg_req *req, 569 struct cgx_fec_stats_rsp *rsp) 570 { 571 int pf = rvu_get_pf(req->hdr.pcifunc); 572 u8 cgx_idx, lmac; 573 void *cgxd; 574 575 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 576 return LMAC_AF_ERR_PERM_DENIED; 577 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 578 579 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 580 return cgx_get_fec_stats(cgxd, lmac, rsp); 581 } 582 583 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, 584 struct cgx_mac_addr_set_or_get *req, 585 struct cgx_mac_addr_set_or_get *rsp) 586 { 587 int pf = rvu_get_pf(req->hdr.pcifunc); 588 u8 cgx_id, lmac_id; 589 590 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 591 return -EPERM; 592 593 if (rvu_npc_exact_has_match_table(rvu)) 594 return rvu_npc_exact_mac_addr_set(rvu, req, rsp); 595 596 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 597 598 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); 599 600 return 0; 601 } 602 603 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, 604 struct cgx_mac_addr_add_req *req, 605 struct cgx_mac_addr_add_rsp *rsp) 606 { 607 int pf = rvu_get_pf(req->hdr.pcifunc); 608 u8 cgx_id, lmac_id; 609 int rc = 0; 610 611 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 612 return -EPERM; 613 614 if (rvu_npc_exact_has_match_table(rvu)) 615 return rvu_npc_exact_mac_addr_add(rvu, req, rsp); 616 617 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 618 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); 619 if (rc >= 0) { 620 rsp->index = rc; 621 return 0; 622 } 623 624 return rc; 625 } 626 627 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, 628 struct cgx_mac_addr_del_req *req, 629 struct msg_rsp *rsp) 630 { 631 int pf = rvu_get_pf(req->hdr.pcifunc); 632 u8 cgx_id, lmac_id; 633 634 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 635 return -EPERM; 636 637 if (rvu_npc_exact_has_match_table(rvu)) 638 return rvu_npc_exact_mac_addr_del(rvu, req, rsp); 639 640 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 641 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); 642 } 643 644 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, 645 struct msg_req *req, 646 struct cgx_max_dmac_entries_get_rsp 647 *rsp) 648 { 649 int pf = rvu_get_pf(req->hdr.pcifunc); 650 u8 cgx_id, lmac_id; 651 652 /* If msg is received from PFs(which are not mapped to CGX LMACs) 653 * or VF then no entries are allocated for DMAC filters at CGX level. 654 * So returning zero. 655 */ 656 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { 657 rsp->max_dmac_filters = 0; 658 return 0; 659 } 660 661 if (rvu_npc_exact_has_match_table(rvu)) { 662 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); 663 return 0; 664 } 665 666 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 667 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); 668 return 0; 669 } 670 671 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, 672 struct cgx_mac_addr_set_or_get *req, 673 struct cgx_mac_addr_set_or_get *rsp) 674 { 675 int pf = rvu_get_pf(req->hdr.pcifunc); 676 u8 cgx_id, lmac_id; 677 int rc = 0, i; 678 u64 cfg; 679 680 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 681 return -EPERM; 682 683 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 684 685 rsp->hdr.rc = rc; 686 cfg = cgx_lmac_addr_get(cgx_id, lmac_id); 687 /* copy 48 bit mac address to req->mac_addr */ 688 for (i = 0; i < ETH_ALEN; i++) 689 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8; 690 return 0; 691 } 692 693 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, 694 struct msg_rsp *rsp) 695 { 696 u16 pcifunc = req->hdr.pcifunc; 697 int pf = rvu_get_pf(pcifunc); 698 u8 cgx_id, lmac_id; 699 700 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 701 return -EPERM; 702 703 /* Disable drop on non hit rule */ 704 if (rvu_npc_exact_has_match_table(rvu)) 705 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); 706 707 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 708 709 cgx_lmac_promisc_config(cgx_id, lmac_id, true); 710 return 0; 711 } 712 713 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, 714 struct msg_rsp *rsp) 715 { 716 int pf = rvu_get_pf(req->hdr.pcifunc); 717 u8 cgx_id, lmac_id; 718 719 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 720 return -EPERM; 721 722 /* Disable drop on non hit rule */ 723 if (rvu_npc_exact_has_match_table(rvu)) 724 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); 725 726 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 727 728 cgx_lmac_promisc_config(cgx_id, lmac_id, false); 729 return 0; 730 } 731 732 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) 733 { 734 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 735 int pf = rvu_get_pf(pcifunc); 736 struct mac_ops *mac_ops; 737 u8 cgx_id, lmac_id; 738 void *cgxd; 739 740 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 741 return 0; 742 743 /* This msg is expected only from PFs that are mapped to CGX LMACs, 744 * if received from other PF/VF simply ACK, nothing to do. 745 */ 746 if ((pcifunc & RVU_PFVF_FUNC_MASK) || 747 !is_pf_cgxmapped(rvu, pf)) 748 return -ENODEV; 749 750 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 751 cgxd = rvu_cgx_pdata(cgx_id, rvu); 752 753 mac_ops = get_mac_ops(cgxd); 754 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true); 755 /* If PTP is enabled then inform NPC that packets to be 756 * parsed by this PF will have their data shifted by 8 bytes 757 * and if PTP is disabled then no shift is required 758 */ 759 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) 760 return -EINVAL; 761 /* This flag is required to clean up CGX conf if app gets killed */ 762 pfvf->hw_rx_tstamp_en = enable; 763 764 return 0; 765 } 766 767 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, 768 struct msg_rsp *rsp) 769 { 770 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) 771 return -EPERM; 772 773 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); 774 } 775 776 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, 777 struct msg_rsp *rsp) 778 { 779 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false); 780 } 781 782 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) 783 { 784 int pf = rvu_get_pf(pcifunc); 785 u8 cgx_id, lmac_id; 786 787 if (!is_cgx_config_permitted(rvu, pcifunc)) 788 return -EPERM; 789 790 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 791 792 if (en) { 793 set_bit(pf, &rvu->pf_notify_bmap); 794 /* Send the current link status to PF */ 795 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); 796 } else { 797 clear_bit(pf, &rvu->pf_notify_bmap); 798 } 799 800 return 0; 801 } 802 803 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, 804 struct msg_rsp *rsp) 805 { 806 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); 807 return 0; 808 } 809 810 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, 811 struct msg_rsp *rsp) 812 { 813 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); 814 return 0; 815 } 816 817 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, 818 struct cgx_link_info_msg *rsp) 819 { 820 u8 cgx_id, lmac_id; 821 int pf, err; 822 823 pf = rvu_get_pf(req->hdr.pcifunc); 824 825 if (!is_pf_cgxmapped(rvu, pf)) 826 return -ENODEV; 827 828 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 829 830 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 831 &rsp->link_info); 832 return err; 833 } 834 835 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, 836 struct msg_req *req, 837 struct cgx_features_info_msg *rsp) 838 { 839 int pf = rvu_get_pf(req->hdr.pcifunc); 840 u8 cgx_idx, lmac; 841 void *cgxd; 842 843 if (!is_pf_cgxmapped(rvu, pf)) 844 return 0; 845 846 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 847 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 848 rsp->lmac_features = cgx_features_get(cgxd); 849 850 return 0; 851 } 852 853 u32 rvu_cgx_get_fifolen(struct rvu *rvu) 854 { 855 struct mac_ops *mac_ops; 856 u32 fifo_len; 857 858 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 859 fifo_len = mac_ops ? mac_ops->fifo_len : 0; 860 861 return fifo_len; 862 } 863 864 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) 865 { 866 struct mac_ops *mac_ops; 867 void *cgxd; 868 869 cgxd = rvu_cgx_pdata(cgx, rvu); 870 if (!cgxd) 871 return 0; 872 873 mac_ops = get_mac_ops(cgxd); 874 if (!mac_ops->lmac_fifo_len) 875 return 0; 876 877 return mac_ops->lmac_fifo_len(cgxd, lmac); 878 } 879 880 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) 881 { 882 int pf = rvu_get_pf(pcifunc); 883 struct mac_ops *mac_ops; 884 u8 cgx_id, lmac_id; 885 886 if (!is_cgx_config_permitted(rvu, pcifunc)) 887 return -EPERM; 888 889 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 890 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 891 892 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), 893 lmac_id, en); 894 } 895 896 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, 897 struct msg_rsp *rsp) 898 { 899 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); 900 return 0; 901 } 902 903 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, 904 struct msg_rsp *rsp) 905 { 906 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); 907 return 0; 908 } 909 910 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) 911 { 912 int pf = rvu_get_pf(pcifunc); 913 u8 rx_pfc = 0, tx_pfc = 0; 914 struct mac_ops *mac_ops; 915 u8 cgx_id, lmac_id; 916 void *cgxd; 917 918 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) 919 return 0; 920 921 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 922 * if received from other PF/VF simply ACK, nothing to do. 923 */ 924 if (!is_pf_cgxmapped(rvu, pf)) 925 return LMAC_AF_ERR_PF_NOT_MAPPED; 926 927 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 928 cgxd = rvu_cgx_pdata(cgx_id, rvu); 929 mac_ops = get_mac_ops(cgxd); 930 931 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc); 932 if (tx_pfc || rx_pfc) { 933 dev_warn(rvu->dev, 934 "Can not configure 802.3X flow control as PFC frames are enabled"); 935 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED; 936 } 937 938 mutex_lock(&rvu->rsrc_lock); 939 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, 940 pcifunc & RVU_PFVF_FUNC_MASK)) { 941 mutex_unlock(&rvu->rsrc_lock); 942 return LMAC_AF_ERR_PERM_DENIED; 943 } 944 mutex_unlock(&rvu->rsrc_lock); 945 946 return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause); 947 } 948 949 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, 950 struct cgx_pause_frm_cfg *req, 951 struct cgx_pause_frm_cfg *rsp) 952 { 953 int pf = rvu_get_pf(req->hdr.pcifunc); 954 struct mac_ops *mac_ops; 955 u8 cgx_id, lmac_id; 956 int err = 0; 957 void *cgxd; 958 959 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 960 * if received from other PF/VF simply ACK, nothing to do. 961 */ 962 if (!is_pf_cgxmapped(rvu, pf)) 963 return -ENODEV; 964 965 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 966 cgxd = rvu_cgx_pdata(cgx_id, rvu); 967 mac_ops = get_mac_ops(cgxd); 968 969 if (req->set) 970 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause); 971 else 972 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); 973 974 return err; 975 } 976 977 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, 978 struct msg_rsp *rsp) 979 { 980 int pf = rvu_get_pf(req->hdr.pcifunc); 981 u8 cgx_id, lmac_id; 982 983 if (!is_pf_cgxmapped(rvu, pf)) 984 return LMAC_AF_ERR_PF_NOT_MAPPED; 985 986 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 987 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); 988 } 989 990 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those 991 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level 992 */ 993 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, 994 int index, int rxtxflag, u64 *stat) 995 { 996 struct rvu_block *block; 997 int blkaddr; 998 u16 pcifunc; 999 int pf, lf; 1000 1001 *stat = 0; 1002 1003 if (!cgxd || !rvu) 1004 return -EINVAL; 1005 1006 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); 1007 if (pf < 0) 1008 return pf; 1009 1010 /* Assumes LF of a PF and all of its VF belongs to the same 1011 * NIX block 1012 */ 1013 pcifunc = pf << RVU_PFVF_PF_SHIFT; 1014 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 1015 if (blkaddr < 0) 1016 return 0; 1017 block = &rvu->hw->block[blkaddr]; 1018 1019 for (lf = 0; lf < block->lf.max; lf++) { 1020 /* Check if a lf is attached to this PF or one of its VFs */ 1021 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & 1022 ~RVU_PFVF_FUNC_MASK))) 1023 continue; 1024 if (rxtxflag == NIX_STATS_RX) 1025 *stat += rvu_read64(rvu, blkaddr, 1026 NIX_AF_LFX_RX_STATX(lf, index)); 1027 else 1028 *stat += rvu_read64(rvu, blkaddr, 1029 NIX_AF_LFX_TX_STATX(lf, index)); 1030 } 1031 1032 return 0; 1033 } 1034 1035 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) 1036 { 1037 struct rvu_pfvf *parent_pf, *pfvf; 1038 int cgx_users, err = 0; 1039 1040 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) 1041 return 0; 1042 1043 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; 1044 pfvf = rvu_get_pfvf(rvu, pcifunc); 1045 1046 mutex_lock(&rvu->cgx_cfg_lock); 1047 1048 if (start && pfvf->cgx_in_use) 1049 goto exit; /* CGX is already started hence nothing to do */ 1050 if (!start && !pfvf->cgx_in_use) 1051 goto exit; /* CGX is already stopped hence nothing to do */ 1052 1053 if (start) { 1054 cgx_users = parent_pf->cgx_users; 1055 parent_pf->cgx_users++; 1056 } else { 1057 parent_pf->cgx_users--; 1058 cgx_users = parent_pf->cgx_users; 1059 } 1060 1061 /* Start CGX when first of all NIXLFs is started. 1062 * Stop CGX when last of all NIXLFs is stopped. 1063 */ 1064 if (!cgx_users) { 1065 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK, 1066 start); 1067 if (err) { 1068 dev_err(rvu->dev, "Unable to %s CGX\n", 1069 start ? "start" : "stop"); 1070 /* Revert the usage count in case of error */ 1071 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1 1072 : parent_pf->cgx_users + 1; 1073 goto exit; 1074 } 1075 } 1076 pfvf->cgx_in_use = start; 1077 exit: 1078 mutex_unlock(&rvu->cgx_cfg_lock); 1079 return err; 1080 } 1081 1082 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, 1083 struct fec_mode *req, 1084 struct fec_mode *rsp) 1085 { 1086 int pf = rvu_get_pf(req->hdr.pcifunc); 1087 u8 cgx_id, lmac_id; 1088 1089 if (!is_pf_cgxmapped(rvu, pf)) 1090 return -EPERM; 1091 1092 if (req->fec == OTX2_FEC_OFF) 1093 req->fec = OTX2_FEC_NONE; 1094 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1095 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id); 1096 return 0; 1097 } 1098 1099 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, 1100 struct cgx_fw_data *rsp) 1101 { 1102 int pf = rvu_get_pf(req->hdr.pcifunc); 1103 u8 cgx_id, lmac_id; 1104 1105 if (!rvu->fwdata) 1106 return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED; 1107 1108 if (!is_pf_cgxmapped(rvu, pf)) 1109 return -EPERM; 1110 1111 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1112 1113 memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], 1114 sizeof(struct cgx_lmac_fwdata_s)); 1115 return 0; 1116 } 1117 1118 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, 1119 struct cgx_set_link_mode_req *req, 1120 struct cgx_set_link_mode_rsp *rsp) 1121 { 1122 int pf = rvu_get_pf(req->hdr.pcifunc); 1123 u8 cgx_idx, lmac; 1124 void *cgxd; 1125 1126 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1127 return -EPERM; 1128 1129 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 1130 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 1131 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); 1132 return 0; 1133 } 1134 1135 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1136 struct msg_rsp *rsp) 1137 { 1138 int pf = rvu_get_pf(req->hdr.pcifunc); 1139 u8 cgx_id, lmac_id; 1140 1141 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1142 return LMAC_AF_ERR_PERM_DENIED; 1143 1144 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1145 1146 if (rvu_npc_exact_has_match_table(rvu)) 1147 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); 1148 1149 return cgx_lmac_addr_reset(cgx_id, lmac_id); 1150 } 1151 1152 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, 1153 struct cgx_mac_addr_update_req *req, 1154 struct cgx_mac_addr_update_rsp *rsp) 1155 { 1156 int pf = rvu_get_pf(req->hdr.pcifunc); 1157 u8 cgx_id, lmac_id; 1158 1159 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1160 return LMAC_AF_ERR_PERM_DENIED; 1161 1162 if (rvu_npc_exact_has_match_table(rvu)) 1163 return rvu_npc_exact_mac_addr_update(rvu, req, rsp); 1164 1165 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1166 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); 1167 } 1168 1169 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, 1170 u8 rx_pause, u16 pfc_en) 1171 { 1172 int pf = rvu_get_pf(pcifunc); 1173 u8 rx_8023 = 0, tx_8023 = 0; 1174 struct mac_ops *mac_ops; 1175 u8 cgx_id, lmac_id; 1176 void *cgxd; 1177 1178 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 1179 * if received from other PF/VF simply ACK, nothing to do. 1180 */ 1181 if (!is_pf_cgxmapped(rvu, pf)) 1182 return -ENODEV; 1183 1184 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1185 cgxd = rvu_cgx_pdata(cgx_id, rvu); 1186 mac_ops = get_mac_ops(cgxd); 1187 1188 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023); 1189 if (tx_8023 || rx_8023) { 1190 dev_warn(rvu->dev, 1191 "Can not configure PFC as 802.3X pause frames are enabled"); 1192 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED; 1193 } 1194 1195 mutex_lock(&rvu->rsrc_lock); 1196 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, 1197 pcifunc & RVU_PFVF_FUNC_MASK)) { 1198 mutex_unlock(&rvu->rsrc_lock); 1199 return LMAC_AF_ERR_PERM_DENIED; 1200 } 1201 mutex_unlock(&rvu->rsrc_lock); 1202 1203 return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en); 1204 } 1205 1206 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, 1207 struct cgx_pfc_cfg *req, 1208 struct cgx_pfc_rsp *rsp) 1209 { 1210 int pf = rvu_get_pf(req->hdr.pcifunc); 1211 struct mac_ops *mac_ops; 1212 u8 cgx_id, lmac_id; 1213 void *cgxd; 1214 int err; 1215 1216 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 1217 * if received from other PF/VF simply ACK, nothing to do. 1218 */ 1219 if (!is_pf_cgxmapped(rvu, pf)) 1220 return -ENODEV; 1221 1222 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1223 cgxd = rvu_cgx_pdata(cgx_id, rvu); 1224 mac_ops = get_mac_ops(cgxd); 1225 1226 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause, 1227 req->rx_pause, req->pfc_en); 1228 1229 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); 1230 return err; 1231 } 1232