1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/errno.h> 10 #include <linux/pci.h> 11 #include <linux/device.h> 12 #include <linux/semaphore.h> 13 #include <linux/completion.h> 14 #include <linux/slab.h> 15 #include <net/devlink.h> 16 #include <asm/barrier.h> 17 18 #include "hinic_devlink.h" 19 #include "hinic_hw_if.h" 20 #include "hinic_hw_eqs.h" 21 #include "hinic_hw_api_cmd.h" 22 #include "hinic_hw_mgmt.h" 23 #include "hinic_hw_dev.h" 24 25 #define SYNC_MSG_ID_MASK 0x1FF 26 27 #define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) 28 29 #define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ 30 ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \ 31 SYNC_MSG_ID_MASK)) 32 33 #define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN) 34 35 #define MGMT_MSG_LEN_MIN 20 36 #define MGMT_MSG_LEN_STEP 16 37 #define MGMT_MSG_RSVD_FOR_DEV 8 38 39 #define SEGMENT_LEN 48 40 41 #define MAX_PF_MGMT_BUF_SIZE 2048 42 43 /* Data should be SEG LEN size aligned */ 44 #define MAX_MSG_LEN 2016 45 46 #define MSG_NOT_RESP 0xFFFF 47 48 #define MGMT_MSG_TIMEOUT 5000 49 50 #define SET_FUNC_PORT_MBOX_TIMEOUT 30000 51 52 #define SET_FUNC_PORT_MGMT_TIMEOUT 25000 53 54 #define UPDATE_FW_MGMT_TIMEOUT 20000 55 56 #define mgmt_to_pfhwdev(pf_mgmt) \ 57 container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) 58 59 enum msg_segment_type { 60 NOT_LAST_SEGMENT = 0, 61 LAST_SEGMENT = 1, 62 }; 63 64 enum mgmt_direction_type { 65 MGMT_DIRECT_SEND = 0, 66 MGMT_RESP = 1, 67 }; 68 69 enum msg_ack_type { 70 MSG_ACK = 0, 71 MSG_NO_ACK = 1, 72 }; 73 74 /** 75 * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module 76 * @pf_to_mgmt: PF to MGMT channel 77 * @mod: module in the chip that this handler will handle its messages 78 * @handle: private data for the callback 79 * @callback: the handler that will handle messages 80 **/ 81 void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, 82 enum hinic_mod_type mod, 83 void *handle, 84 void (*callback)(void *handle, 85 u8 cmd, void *buf_in, 86 u16 in_size, void *buf_out, 87 u16 *out_size)) 88 { 89 struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; 90 91 mgmt_cb->cb = callback; 92 mgmt_cb->handle = handle; 93 mgmt_cb->state = HINIC_MGMT_CB_ENABLED; 94 } 95 96 /** 97 * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module 98 * @pf_to_mgmt: PF to MGMT channel 99 * @mod: module in the chip that this handler handles its messages 100 **/ 101 void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, 102 enum hinic_mod_type mod) 103 { 104 struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; 105 106 mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED; 107 108 while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING) 109 schedule(); 110 111 mgmt_cb->cb = NULL; 112 } 113 114 /** 115 * prepare_header - prepare the header of the message 116 * @pf_to_mgmt: PF to MGMT channel 117 * @msg_len: the length of the message 118 * @mod: module in the chip that will get the message 119 * @ack_type: ask for response 120 * @direction: the direction of the message 121 * @cmd: command of the message 122 * @msg_id: message id 123 * 124 * Return the prepared header value 125 **/ 126 static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt, 127 u16 msg_len, enum hinic_mod_type mod, 128 enum msg_ack_type ack_type, 129 enum mgmt_direction_type direction, 130 u16 cmd, u16 msg_id) 131 { 132 struct hinic_hwif *hwif = pf_to_mgmt->hwif; 133 134 return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | 135 HINIC_MSG_HEADER_SET(mod, MODULE) | 136 HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) | 137 HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | 138 HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | 139 HINIC_MSG_HEADER_SET(0, SEQID) | 140 HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | 141 HINIC_MSG_HEADER_SET(direction, DIRECTION) | 142 HINIC_MSG_HEADER_SET(cmd, CMD) | 143 HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) | 144 HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) | 145 HINIC_MSG_HEADER_SET(msg_id, MSG_ID); 146 } 147 148 /** 149 * prepare_mgmt_cmd - prepare the mgmt command 150 * @mgmt_cmd: pointer to the command to prepare 151 * @header: pointer of the header for the message 152 * @msg: the data of the message 153 * @msg_len: the length of the message 154 **/ 155 static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len) 156 { 157 memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); 158 159 mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; 160 memcpy(mgmt_cmd, header, sizeof(*header)); 161 162 mgmt_cmd += sizeof(*header); 163 memcpy(mgmt_cmd, msg, msg_len); 164 } 165 166 /** 167 * mgmt_msg_len - calculate the total message length 168 * @msg_data_len: the length of the message data 169 * 170 * Return the total message length 171 **/ 172 static u16 mgmt_msg_len(u16 msg_data_len) 173 { 174 /* RSVD + HEADER_SIZE + DATA_LEN */ 175 u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len; 176 177 if (msg_len > MGMT_MSG_LEN_MIN) 178 msg_len = MGMT_MSG_LEN_MIN + 179 ALIGN((msg_len - MGMT_MSG_LEN_MIN), 180 MGMT_MSG_LEN_STEP); 181 else 182 msg_len = MGMT_MSG_LEN_MIN; 183 184 return msg_len; 185 } 186 187 /** 188 * send_msg_to_mgmt - send message to mgmt by API CMD 189 * @pf_to_mgmt: PF to MGMT channel 190 * @mod: module in the chip that will get the message 191 * @cmd: command of the message 192 * @data: the msg data 193 * @data_len: the msg data length 194 * @ack_type: ask for response 195 * @direction: the direction of the original message 196 * @resp_msg_id: msg id to response for 197 * 198 * Return 0 - Success, negative - Failure 199 **/ 200 static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, 201 enum hinic_mod_type mod, u8 cmd, 202 u8 *data, u16 data_len, 203 enum msg_ack_type ack_type, 204 enum mgmt_direction_type direction, 205 u16 resp_msg_id) 206 { 207 struct hinic_api_cmd_chain *chain; 208 u64 header; 209 u16 msg_id; 210 211 msg_id = SYNC_MSG_ID(pf_to_mgmt); 212 213 if (direction == MGMT_RESP) { 214 header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, 215 direction, cmd, resp_msg_id); 216 } else { 217 SYNC_MSG_ID_INC(pf_to_mgmt); 218 header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, 219 direction, cmd, msg_id); 220 } 221 222 prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len); 223 224 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; 225 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT, 226 pf_to_mgmt->sync_msg_buf, 227 mgmt_msg_len(data_len)); 228 } 229 230 /** 231 * msg_to_mgmt_sync - send sync message to mgmt 232 * @pf_to_mgmt: PF to MGMT channel 233 * @mod: module in the chip that will get the message 234 * @cmd: command of the message 235 * @buf_in: the msg data 236 * @in_size: the msg data length 237 * @buf_out: response 238 * @out_size: response length 239 * @direction: the direction of the original message 240 * @resp_msg_id: msg id to response for 241 * @timeout: time-out period of waiting for response 242 * 243 * Return 0 - Success, negative - Failure 244 **/ 245 static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, 246 enum hinic_mod_type mod, u8 cmd, 247 u8 *buf_in, u16 in_size, 248 u8 *buf_out, u16 *out_size, 249 enum mgmt_direction_type direction, 250 u16 resp_msg_id, u32 timeout) 251 { 252 struct hinic_hwif *hwif = pf_to_mgmt->hwif; 253 struct pci_dev *pdev = hwif->pdev; 254 struct hinic_recv_msg *recv_msg; 255 struct completion *recv_done; 256 unsigned long timeo; 257 u16 msg_id; 258 int err; 259 260 /* Lock the sync_msg_buf */ 261 down(&pf_to_mgmt->sync_msg_lock); 262 263 recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; 264 recv_done = &recv_msg->recv_done; 265 266 if (resp_msg_id == MSG_NOT_RESP) 267 msg_id = SYNC_MSG_ID(pf_to_mgmt); 268 else 269 msg_id = resp_msg_id; 270 271 init_completion(recv_done); 272 273 err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, 274 MSG_ACK, direction, resp_msg_id); 275 if (err) { 276 dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n"); 277 goto unlock_sync_msg; 278 } 279 280 timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); 281 282 if (!wait_for_completion_timeout(recv_done, timeo)) { 283 dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); 284 hinic_dump_aeq_info(pf_to_mgmt->hwdev); 285 err = -ETIMEDOUT; 286 goto unlock_sync_msg; 287 } 288 289 smp_rmb(); /* verify reading after completion */ 290 291 if (recv_msg->msg_id != msg_id) { 292 dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id); 293 err = -EFAULT; 294 goto unlock_sync_msg; 295 } 296 297 if (buf_out && recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE) { 298 memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); 299 *out_size = recv_msg->msg_len; 300 } 301 302 unlock_sync_msg: 303 up(&pf_to_mgmt->sync_msg_lock); 304 return err; 305 } 306 307 /** 308 * msg_to_mgmt_async - send message to mgmt without response 309 * @pf_to_mgmt: PF to MGMT channel 310 * @mod: module in the chip that will get the message 311 * @cmd: command of the message 312 * @buf_in: the msg data 313 * @in_size: the msg data length 314 * @direction: the direction of the original message 315 * @resp_msg_id: msg id to response for 316 * 317 * Return 0 - Success, negative - Failure 318 **/ 319 static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt, 320 enum hinic_mod_type mod, u8 cmd, 321 u8 *buf_in, u16 in_size, 322 enum mgmt_direction_type direction, 323 u16 resp_msg_id) 324 { 325 int err; 326 327 /* Lock the sync_msg_buf */ 328 down(&pf_to_mgmt->sync_msg_lock); 329 330 err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, 331 MSG_NO_ACK, direction, resp_msg_id); 332 333 up(&pf_to_mgmt->sync_msg_lock); 334 return err; 335 } 336 337 /** 338 * hinic_msg_to_mgmt - send message to mgmt 339 * @pf_to_mgmt: PF to MGMT channel 340 * @mod: module in the chip that will get the message 341 * @cmd: command of the message 342 * @buf_in: the msg data 343 * @in_size: the msg data length 344 * @buf_out: response 345 * @out_size: returned response length 346 * @sync: sync msg or async msg 347 * 348 * Return 0 - Success, negative - Failure 349 **/ 350 int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, 351 enum hinic_mod_type mod, u8 cmd, 352 void *buf_in, u16 in_size, void *buf_out, u16 *out_size, 353 enum hinic_mgmt_msg_type sync) 354 { 355 struct hinic_hwif *hwif = pf_to_mgmt->hwif; 356 struct pci_dev *pdev = hwif->pdev; 357 u32 timeout = 0; 358 359 if (sync != HINIC_MGMT_MSG_SYNC) { 360 dev_err(&pdev->dev, "Invalid MGMT msg type\n"); 361 return -EINVAL; 362 } 363 364 if (!MSG_SZ_IS_VALID(in_size)) { 365 dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n"); 366 return -EINVAL; 367 } 368 369 if (HINIC_IS_VF(hwif)) { 370 if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) 371 timeout = SET_FUNC_PORT_MBOX_TIMEOUT; 372 373 return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in, 374 in_size, buf_out, out_size, timeout); 375 } else { 376 if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) 377 timeout = SET_FUNC_PORT_MGMT_TIMEOUT; 378 else if (cmd == HINIC_PORT_CMD_UPDATE_FW) 379 timeout = UPDATE_FW_MGMT_TIMEOUT; 380 381 return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, 382 buf_out, out_size, MGMT_DIRECT_SEND, 383 MSG_NOT_RESP, timeout); 384 } 385 } 386 387 static void recv_mgmt_msg_work_handler(struct work_struct *work) 388 { 389 struct hinic_mgmt_msg_handle_work *mgmt_work = 390 container_of(work, struct hinic_mgmt_msg_handle_work, work); 391 struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt; 392 struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; 393 u8 *buf_out = pf_to_mgmt->mgmt_ack_buf; 394 struct hinic_mgmt_cb *mgmt_cb; 395 unsigned long cb_state; 396 u16 out_size = 0; 397 398 memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); 399 400 if (mgmt_work->mod >= HINIC_MOD_MAX) { 401 dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", 402 mgmt_work->mod); 403 kfree(mgmt_work->msg); 404 kfree(mgmt_work); 405 return; 406 } 407 408 mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod]; 409 410 cb_state = cmpxchg(&mgmt_cb->state, 411 HINIC_MGMT_CB_ENABLED, 412 HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); 413 414 if (cb_state == HINIC_MGMT_CB_ENABLED && mgmt_cb->cb) 415 mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd, 416 mgmt_work->msg, mgmt_work->msg_len, 417 buf_out, &out_size); 418 else 419 dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", 420 mgmt_work->mod, mgmt_work->cmd); 421 422 mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; 423 424 if (!mgmt_work->async_mgmt_to_pf) 425 /* MGMT sent sync msg, send the response */ 426 msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd, 427 buf_out, out_size, MGMT_RESP, 428 mgmt_work->msg_id); 429 430 kfree(mgmt_work->msg); 431 kfree(mgmt_work); 432 } 433 434 /** 435 * mgmt_recv_msg_handler - handler for message from mgmt cpu 436 * @pf_to_mgmt: PF to MGMT channel 437 * @recv_msg: received message details 438 **/ 439 static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, 440 struct hinic_recv_msg *recv_msg) 441 { 442 struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; 443 444 mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); 445 if (!mgmt_work) 446 return; 447 448 if (recv_msg->msg_len) { 449 mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); 450 if (!mgmt_work->msg) { 451 kfree(mgmt_work); 452 return; 453 } 454 } 455 456 mgmt_work->pf_to_mgmt = pf_to_mgmt; 457 mgmt_work->msg_len = recv_msg->msg_len; 458 memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); 459 mgmt_work->msg_id = recv_msg->msg_id; 460 mgmt_work->mod = recv_msg->mod; 461 mgmt_work->cmd = recv_msg->cmd; 462 mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; 463 464 INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); 465 queue_work(pf_to_mgmt->workq, &mgmt_work->work); 466 } 467 468 /** 469 * mgmt_resp_msg_handler - handler for a response message from mgmt cpu 470 * @pf_to_mgmt: PF to MGMT channel 471 * @recv_msg: received message details 472 **/ 473 static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, 474 struct hinic_recv_msg *recv_msg) 475 { 476 wmb(); /* verify writing all, before reading */ 477 478 complete(&recv_msg->recv_done); 479 } 480 481 /** 482 * recv_mgmt_msg_handler - handler for a message from mgmt cpu 483 * @pf_to_mgmt: PF to MGMT channel 484 * @header: the header of the message 485 * @recv_msg: received message details 486 **/ 487 static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, 488 u64 *header, struct hinic_recv_msg *recv_msg) 489 { 490 struct hinic_hwif *hwif = pf_to_mgmt->hwif; 491 struct pci_dev *pdev = hwif->pdev; 492 int seq_id, seg_len; 493 u8 *msg_body; 494 495 seq_id = HINIC_MSG_HEADER_GET(*header, SEQID); 496 seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN); 497 498 if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) { 499 dev_err(&pdev->dev, "recv big mgmt msg\n"); 500 return; 501 } 502 503 msg_body = (u8 *)header + sizeof(*header); 504 memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len); 505 506 if (!HINIC_MSG_HEADER_GET(*header, LAST)) 507 return; 508 509 recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD); 510 recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE); 511 recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header, 512 ASYNC_MGMT_TO_PF); 513 recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN); 514 recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID); 515 516 if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP) 517 mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); 518 else 519 mgmt_recv_msg_handler(pf_to_mgmt, recv_msg); 520 } 521 522 /** 523 * mgmt_msg_aeqe_handler - handler for a mgmt message event 524 * @handle: PF to MGMT channel 525 * @data: the header of the message 526 * @size: unused 527 **/ 528 static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) 529 { 530 struct hinic_pf_to_mgmt *pf_to_mgmt = handle; 531 struct hinic_recv_msg *recv_msg; 532 u64 *header = (u64 *)data; 533 534 recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) == 535 MGMT_DIRECT_SEND ? 536 &pf_to_mgmt->recv_msg_from_mgmt : 537 &pf_to_mgmt->recv_resp_msg_from_mgmt; 538 539 recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); 540 } 541 542 /** 543 * alloc_recv_msg - allocate receive message memory 544 * @pf_to_mgmt: PF to MGMT channel 545 * @recv_msg: pointer that will hold the allocated data 546 * 547 * Return 0 - Success, negative - Failure 548 **/ 549 static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt, 550 struct hinic_recv_msg *recv_msg) 551 { 552 struct hinic_hwif *hwif = pf_to_mgmt->hwif; 553 struct pci_dev *pdev = hwif->pdev; 554 555 recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, 556 GFP_KERNEL); 557 if (!recv_msg->msg) 558 return -ENOMEM; 559 560 recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, 561 GFP_KERNEL); 562 if (!recv_msg->buf_out) 563 return -ENOMEM; 564 565 return 0; 566 } 567 568 /** 569 * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel 570 * @pf_to_mgmt: PF to MGMT channel 571 * 572 * Return 0 - Success, negative - Failure 573 **/ 574 static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) 575 { 576 struct hinic_hwif *hwif = pf_to_mgmt->hwif; 577 struct pci_dev *pdev = hwif->pdev; 578 int err; 579 580 err = alloc_recv_msg(pf_to_mgmt, 581 &pf_to_mgmt->recv_msg_from_mgmt); 582 if (err) { 583 dev_err(&pdev->dev, "Failed to allocate recv msg\n"); 584 return err; 585 } 586 587 err = alloc_recv_msg(pf_to_mgmt, 588 &pf_to_mgmt->recv_resp_msg_from_mgmt); 589 if (err) { 590 dev_err(&pdev->dev, "Failed to allocate resp recv msg\n"); 591 return err; 592 } 593 594 pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev, 595 MAX_PF_MGMT_BUF_SIZE, 596 GFP_KERNEL); 597 if (!pf_to_mgmt->sync_msg_buf) 598 return -ENOMEM; 599 600 pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev, 601 MAX_PF_MGMT_BUF_SIZE, 602 GFP_KERNEL); 603 if (!pf_to_mgmt->mgmt_ack_buf) 604 return -ENOMEM; 605 606 return 0; 607 } 608 609 /** 610 * hinic_pf_to_mgmt_init - initialize PF to MGMT channel 611 * @pf_to_mgmt: PF to MGMT channel 612 * @hwif: HW interface the PF to MGMT will use for accessing HW 613 * 614 * Return 0 - Success, negative - Failure 615 **/ 616 int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, 617 struct hinic_hwif *hwif) 618 { 619 struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); 620 struct hinic_hwdev *hwdev = &pfhwdev->hwdev; 621 struct pci_dev *pdev = hwif->pdev; 622 int err; 623 624 pf_to_mgmt->hwif = hwif; 625 pf_to_mgmt->hwdev = hwdev; 626 627 if (HINIC_IS_VF(hwif)) 628 return 0; 629 630 err = hinic_health_reporters_create(hwdev->devlink_dev); 631 if (err) 632 return err; 633 634 sema_init(&pf_to_mgmt->sync_msg_lock, 1); 635 pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); 636 if (!pf_to_mgmt->workq) { 637 dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); 638 hinic_health_reporters_destroy(hwdev->devlink_dev); 639 return -ENOMEM; 640 } 641 pf_to_mgmt->sync_msg_id = 0; 642 643 err = alloc_msg_buf(pf_to_mgmt); 644 if (err) { 645 dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); 646 hinic_health_reporters_destroy(hwdev->devlink_dev); 647 return err; 648 } 649 650 err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); 651 if (err) { 652 dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); 653 hinic_health_reporters_destroy(hwdev->devlink_dev); 654 return err; 655 } 656 657 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, 658 pf_to_mgmt, 659 mgmt_msg_aeqe_handler); 660 return 0; 661 } 662 663 /** 664 * hinic_pf_to_mgmt_free - free PF to MGMT channel 665 * @pf_to_mgmt: PF to MGMT channel 666 **/ 667 void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) 668 { 669 struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); 670 struct hinic_hwdev *hwdev = &pfhwdev->hwdev; 671 672 if (HINIC_IS_VF(hwdev->hwif)) 673 return; 674 675 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); 676 hinic_api_cmd_free(pf_to_mgmt->cmd_chain); 677 destroy_workqueue(pf_to_mgmt->workq); 678 hinic_health_reporters_destroy(hwdev->devlink_dev); 679 } 680