1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 */ 5 #include <linux/pci.h> 6 #include <linux/delay.h> 7 #include <linux/types.h> 8 #include <linux/completion.h> 9 #include <linux/semaphore.h> 10 #include <linux/spinlock.h> 11 #include <linux/workqueue.h> 12 13 #include "hinic_hw_if.h" 14 #include "hinic_hw_mgmt.h" 15 #include "hinic_hw_csr.h" 16 #include "hinic_hw_dev.h" 17 #include "hinic_hw_mbox.h" 18 19 #define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 20 #define HINIC_MBOX_INT_DST_AEQN_SHIFT 10 21 #define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 22 #define HINIC_MBOX_INT_STAT_DMA_SHIFT 14 23 /* The size of data to be sended (unit of 4 bytes) */ 24 #define HINIC_MBOX_INT_TX_SIZE_SHIFT 20 25 /* SO_RO(strong order, relax order) */ 26 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 27 #define HINIC_MBOX_INT_WB_EN_SHIFT 28 28 29 #define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF 30 #define HINIC_MBOX_INT_DST_AEQN_MASK 0x3 31 #define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 32 #define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F 33 #define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F 34 #define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 35 #define HINIC_MBOX_INT_WB_EN_MASK 0x1 36 37 #define HINIC_MBOX_INT_SET(val, field) \ 38 (((val) & HINIC_MBOX_INT_##field##_MASK) << \ 39 HINIC_MBOX_INT_##field##_SHIFT) 40 41 enum hinic_mbox_tx_status { 42 TX_NOT_DONE = 1, 43 }; 44 45 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 46 47 /* specifies the issue request for the message data. 48 * 0 - Tx request is done; 49 * 1 - Tx request is in process. 50 */ 51 #define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1 52 53 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 54 #define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1 55 56 #define HINIC_MBOX_CTRL_SET(val, field) \ 57 (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \ 58 HINIC_MBOX_CTRL_##field##_SHIFT) 59 60 #define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0 61 #define HINIC_MBOX_HEADER_MODULE_SHIFT 11 62 #define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16 63 #define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22 64 #define HINIC_MBOX_HEADER_SEQID_SHIFT 24 65 #define HINIC_MBOX_HEADER_LAST_SHIFT 30 66 67 /* specifies the mailbox message direction 68 * 0 - send 69 * 1 - receive 70 */ 71 #define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31 72 #define HINIC_MBOX_HEADER_CMD_SHIFT 32 73 #define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40 74 #define HINIC_MBOX_HEADER_STATUS_SHIFT 48 75 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54 76 77 #define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF 78 #define HINIC_MBOX_HEADER_MODULE_MASK 0x1F 79 #define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F 80 #define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1 81 #define HINIC_MBOX_HEADER_SEQID_MASK 0x3F 82 #define HINIC_MBOX_HEADER_LAST_MASK 0x1 83 #define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1 84 #define HINIC_MBOX_HEADER_CMD_MASK 0xFF 85 #define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF 86 #define HINIC_MBOX_HEADER_STATUS_MASK 0x3F 87 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF 88 89 #define HINIC_MBOX_HEADER_GET(val, field) \ 90 (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \ 91 HINIC_MBOX_HEADER_##field##_MASK) 92 #define HINIC_MBOX_HEADER_SET(val, field) \ 93 ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \ 94 HINIC_MBOX_HEADER_##field##_SHIFT) 95 96 #define MBOX_SEGLEN_MASK \ 97 HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN) 98 99 #define HINIC_MBOX_SEG_LEN 48 100 #define HINIC_MBOX_COMP_TIME 8000U 101 #define MBOX_MSG_POLLING_TIMEOUT 8000 102 103 #define HINIC_MBOX_DATA_SIZE 2040 104 105 #define MBOX_MAX_BUF_SZ 2048UL 106 #define MBOX_HEADER_SZ 8 107 108 #define MBOX_INFO_SZ 4 109 110 /* MBOX size is 64B, 8B for mbox_header, 4B reserved */ 111 #define MBOX_SEG_LEN 48 112 #define MBOX_SEG_LEN_ALIGN 4 113 #define MBOX_WB_STATUS_LEN 16UL 114 115 /* mbox write back status is 16B, only first 4B is used */ 116 #define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF 117 #define MBOX_WB_STATUS_MASK 0xFF 118 #define MBOX_WB_ERROR_CODE_MASK 0xFF00 119 #define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF 120 #define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE 121 #define MBOX_WB_STATUS_NOT_FINISHED 0x00 122 123 #define MBOX_STATUS_FINISHED(wb) \ 124 (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) 125 #define MBOX_STATUS_SUCCESS(wb) \ 126 (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) 127 #define MBOX_STATUS_ERRCODE(wb) \ 128 ((wb) & MBOX_WB_ERROR_CODE_MASK) 129 130 #define SEQ_ID_START_VAL 0 131 #define SEQ_ID_MAX_VAL 42 132 133 #define DST_AEQ_IDX_DEFAULT_VAL 0 134 #define SRC_AEQ_IDX_DEFAULT_VAL 0 135 #define NO_DMA_ATTRIBUTE_VAL 0 136 137 #define HINIC_MGMT_RSP_AEQN 0 138 #define HINIC_MBOX_RSP_AEQN 2 139 #define HINIC_MBOX_RECV_AEQN 0 140 141 #define MBOX_MSG_NO_DATA_LEN 1 142 143 #define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) 144 #define MBOX_AREA(hwif) \ 145 ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF) 146 147 #define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS) 148 149 #define MBOX_RESPONSE_ERROR 0x1 150 #define MBOX_MSG_ID_MASK 0xFF 151 #define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) 152 #define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \ 153 (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK) 154 155 #define FUNC_ID_OFF_SET_8B 8 156 #define FUNC_ID_OFF_SET_10B 10 157 158 /* max message counter wait to process for one function */ 159 #define HINIC_MAX_MSG_CNT_TO_PROCESS 10 160 161 #define HINIC_QUEUE_MIN_DEPTH 6 162 #define HINIC_QUEUE_MAX_DEPTH 12 163 #define HINIC_MAX_RX_BUFFER_SIZE 15 164 165 enum hinic_hwif_direction_type { 166 HINIC_HWIF_DIRECT_SEND = 0, 167 HINIC_HWIF_RESPONSE = 1, 168 }; 169 170 enum mbox_send_mod { 171 MBOX_SEND_MSG_INT, 172 }; 173 174 enum mbox_seg_type { 175 NOT_LAST_SEG, 176 LAST_SEG, 177 }; 178 179 enum mbox_ordering_type { 180 STRONG_ORDER, 181 }; 182 183 enum mbox_write_back_type { 184 WRITE_BACK = 1, 185 }; 186 187 enum mbox_aeq_trig_type { 188 NOT_TRIGGER, 189 TRIGGER, 190 }; 191 192 /** 193 * hinic_register_pf_mbox_cb - register mbox callback for pf 194 * @hwdev: the pointer to hw device 195 * @mod: specific mod that the callback will handle 196 * @callback: callback function 197 * Return: 0 - success, negative - failure 198 */ 199 int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, 200 enum hinic_mod_type mod, 201 hinic_pf_mbox_cb callback) 202 { 203 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 204 205 if (mod >= HINIC_MOD_MAX) 206 return -EFAULT; 207 208 func_to_func->pf_mbox_cb[mod] = callback; 209 210 set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); 211 212 return 0; 213 } 214 215 /** 216 * hinic_register_vf_mbox_cb - register mbox callback for vf 217 * @hwdev: the pointer to hw device 218 * @mod: specific mod that the callback will handle 219 * @callback: callback function 220 * Return: 0 - success, negative - failure 221 */ 222 int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev, 223 enum hinic_mod_type mod, 224 hinic_vf_mbox_cb callback) 225 { 226 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 227 228 if (mod >= HINIC_MOD_MAX) 229 return -EFAULT; 230 231 func_to_func->vf_mbox_cb[mod] = callback; 232 233 set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); 234 235 return 0; 236 } 237 238 /** 239 * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf 240 * @hwdev: the pointer to hw device 241 * @mod: specific mod that the callback will handle 242 */ 243 void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev, 244 enum hinic_mod_type mod) 245 { 246 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 247 248 clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); 249 250 while (test_bit(HINIC_PF_MBOX_CB_RUNNING, 251 &func_to_func->pf_mbox_cb_state[mod])) 252 usleep_range(900, 1000); 253 254 func_to_func->pf_mbox_cb[mod] = NULL; 255 } 256 257 /** 258 * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf 259 * @hwdev: the pointer to hw device 260 * @mod: specific mod that the callback will handle 261 */ 262 void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev, 263 enum hinic_mod_type mod) 264 { 265 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 266 267 clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); 268 269 while (test_bit(HINIC_VF_MBOX_CB_RUNNING, 270 &func_to_func->vf_mbox_cb_state[mod])) 271 usleep_range(900, 1000); 272 273 func_to_func->vf_mbox_cb[mod] = NULL; 274 } 275 276 static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, 277 struct hinic_recv_mbox *recv_mbox, 278 void *buf_out, u16 *out_size) 279 { 280 hinic_vf_mbox_cb cb; 281 int ret = 0; 282 283 if (recv_mbox->mod >= HINIC_MOD_MAX) { 284 dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n", 285 recv_mbox->mod); 286 return -EINVAL; 287 } 288 289 set_bit(HINIC_VF_MBOX_CB_RUNNING, 290 &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); 291 292 cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; 293 if (cb && test_bit(HINIC_VF_MBOX_CB_REG, 294 &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { 295 cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox, 296 recv_mbox->mbox_len, buf_out, out_size); 297 } else { 298 dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n"); 299 ret = -EINVAL; 300 } 301 302 clear_bit(HINIC_VF_MBOX_CB_RUNNING, 303 &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); 304 305 return ret; 306 } 307 308 static int 309 recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, 310 struct hinic_recv_mbox *recv_mbox, 311 u16 src_func_idx, void *buf_out, 312 u16 *out_size) 313 { 314 hinic_pf_mbox_cb cb; 315 u16 vf_id = 0; 316 int ret; 317 318 if (recv_mbox->mod >= HINIC_MOD_MAX) { 319 dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n", 320 recv_mbox->mod); 321 return -EINVAL; 322 } 323 324 set_bit(HINIC_PF_MBOX_CB_RUNNING, 325 &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); 326 327 cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; 328 if (cb && test_bit(HINIC_PF_MBOX_CB_REG, 329 &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { 330 vf_id = src_func_idx - 331 hinic_glb_pf_vf_offset(func_to_func->hwif); 332 ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd, 333 recv_mbox->mbox, recv_mbox->mbox_len, 334 buf_out, out_size); 335 } else { 336 dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n", 337 recv_mbox->mod); 338 ret = -EINVAL; 339 } 340 341 clear_bit(HINIC_PF_MBOX_CB_RUNNING, 342 &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); 343 344 return ret; 345 } 346 347 static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox, 348 u8 seq_id, u8 seg_len) 349 { 350 if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN) 351 return false; 352 353 if (seq_id == 0) { 354 recv_mbox->seq_id = seq_id; 355 } else { 356 if (seq_id != recv_mbox->seq_id + 1) 357 return false; 358 359 recv_mbox->seq_id = seq_id; 360 } 361 362 return true; 363 } 364 365 static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, 366 struct hinic_recv_mbox *recv_mbox) 367 { 368 spin_lock(&func_to_func->mbox_lock); 369 if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id && 370 func_to_func->event_flag == EVENT_START) 371 complete(&recv_mbox->recv_done); 372 else 373 dev_err(&func_to_func->hwif->pdev->dev, 374 "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", 375 func_to_func->send_msg_id, recv_mbox->msg_info.msg_id, 376 recv_mbox->msg_info.status); 377 spin_unlock(&func_to_func->mbox_lock); 378 } 379 380 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, 381 struct hinic_recv_mbox *recv_mbox, 382 u16 src_func_idx); 383 384 static void recv_func_mbox_work_handler(struct work_struct *work) 385 { 386 struct hinic_mbox_work *mbox_work = 387 container_of(work, struct hinic_mbox_work, work); 388 struct hinic_recv_mbox *recv_mbox; 389 390 recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox, 391 mbox_work->src_func_idx); 392 393 recv_mbox = 394 &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx]; 395 396 atomic_dec(&recv_mbox->msg_cnt); 397 398 kfree(mbox_work); 399 } 400 401 static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, 402 void *header, struct hinic_recv_mbox *recv_mbox) 403 { 404 void *mbox_body = MBOX_BODY_FROM_HDR(header); 405 struct hinic_recv_mbox *rcv_mbox_temp = NULL; 406 u64 mbox_header = *((u64 *)header); 407 struct hinic_mbox_work *mbox_work; 408 u8 seq_id, seg_len; 409 u16 src_func_idx; 410 int pos; 411 412 seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID); 413 seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN); 414 src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); 415 416 if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) { 417 dev_err(&func_to_func->hwif->pdev->dev, 418 "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n", 419 src_func_idx, recv_mbox->seq_id, seq_id, seg_len); 420 recv_mbox->seq_id = SEQ_ID_MAX_VAL; 421 return; 422 } 423 424 pos = seq_id * MBOX_SEG_LEN; 425 memcpy((u8 *)recv_mbox->mbox + pos, mbox_body, 426 HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN)); 427 428 if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST)) 429 return; 430 431 recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD); 432 recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE); 433 recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN); 434 recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK); 435 recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID); 436 recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS); 437 recv_mbox->seq_id = SEQ_ID_MAX_VAL; 438 439 if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) == 440 HINIC_HWIF_RESPONSE) { 441 resp_mbox_handler(func_to_func, recv_mbox); 442 return; 443 } 444 445 if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) { 446 dev_warn(&func_to_func->hwif->pdev->dev, 447 "This function(%u) have %d message wait to process,can't add to work queue\n", 448 src_func_idx, atomic_read(&recv_mbox->msg_cnt)); 449 return; 450 } 451 452 rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL); 453 if (!rcv_mbox_temp) 454 return; 455 456 rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ, 457 GFP_KERNEL); 458 if (!rcv_mbox_temp->mbox) 459 goto err_alloc_rcv_mbox_msg; 460 461 rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); 462 if (!rcv_mbox_temp->buf_out) 463 goto err_alloc_rcv_mbox_buf; 464 465 mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); 466 if (!mbox_work) 467 goto err_alloc_mbox_work; 468 469 mbox_work->func_to_func = func_to_func; 470 mbox_work->recv_mbox = rcv_mbox_temp; 471 mbox_work->src_func_idx = src_func_idx; 472 473 atomic_inc(&recv_mbox->msg_cnt); 474 INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); 475 queue_work(func_to_func->workq, &mbox_work->work); 476 477 return; 478 479 err_alloc_mbox_work: 480 kfree(rcv_mbox_temp->buf_out); 481 482 err_alloc_rcv_mbox_buf: 483 kfree(rcv_mbox_temp->mbox); 484 485 err_alloc_rcv_mbox_msg: 486 kfree(rcv_mbox_temp); 487 } 488 489 void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size) 490 { 491 struct hinic_mbox_func_to_func *func_to_func; 492 u64 mbox_header = *((u64 *)header); 493 struct hinic_recv_mbox *recv_mbox; 494 u64 src, dir; 495 496 func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; 497 498 dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION); 499 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); 500 501 if (src >= HINIC_MAX_FUNCTIONS) { 502 dev_err(&func_to_func->hwif->pdev->dev, 503 "Mailbox source function id:%u is invalid\n", (u32)src); 504 return; 505 } 506 507 recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ? 508 &func_to_func->mbox_send[src] : 509 &func_to_func->mbox_resp[src]; 510 511 recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox); 512 } 513 514 void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size) 515 { 516 struct hinic_mbox_func_to_func *func_to_func; 517 struct hinic_send_mbox *send_mbox; 518 519 func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; 520 send_mbox = &func_to_func->send_mbox; 521 522 complete(&send_mbox->send_done); 523 } 524 525 static void clear_mbox_status(struct hinic_send_mbox *mbox) 526 { 527 *mbox->wb_status = 0; 528 529 /* clear mailbox write back status */ 530 wmb(); 531 } 532 533 static void mbox_copy_header(struct hinic_hwdev *hwdev, 534 struct hinic_send_mbox *mbox, u64 *header) 535 { 536 u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); 537 u32 *data = (u32 *)header; 538 539 for (i = 0; i < idx_max; i++) 540 __raw_writel(*(data + i), mbox->data + i * sizeof(u32)); 541 } 542 543 static void mbox_copy_send_data(struct hinic_hwdev *hwdev, 544 struct hinic_send_mbox *mbox, void *seg, 545 u16 seg_len) 546 { 547 u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; 548 u32 data_len, chk_sz = sizeof(u32); 549 u32 *data = seg; 550 u32 i, idx_max; 551 552 /* The mbox message should be aligned in 4 bytes. */ 553 if (seg_len % chk_sz) { 554 memcpy(mbox_max_buf, seg, seg_len); 555 data = (u32 *)mbox_max_buf; 556 } 557 558 data_len = seg_len; 559 idx_max = ALIGN(data_len, chk_sz) / chk_sz; 560 561 for (i = 0; i < idx_max; i++) 562 __raw_writel(*(data + i), 563 mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); 564 } 565 566 static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, 567 u16 dst_func, u16 dst_aeqn, u16 seg_len, 568 int poll) 569 { 570 u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN; 571 u32 mbox_int, mbox_ctrl; 572 573 mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | 574 HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | 575 HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) | 576 HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | 577 HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ + 578 MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2, 579 TX_SIZE) | 580 HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | 581 HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN); 582 583 hinic_hwif_write_reg(func_to_func->hwif, 584 HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); 585 586 wmb(); /* writing the mbox int attributes */ 587 mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); 588 589 if (poll) 590 mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); 591 else 592 mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE); 593 594 hinic_hwif_write_reg(func_to_func->hwif, 595 HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); 596 } 597 598 static void dump_mox_reg(struct hinic_hwdev *hwdev) 599 { 600 u32 val; 601 602 val = hinic_hwif_read_reg(hwdev->hwif, 603 HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF); 604 dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val); 605 606 val = hinic_hwif_read_reg(hwdev->hwif, 607 HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); 608 dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n", 609 val); 610 } 611 612 static u16 get_mbox_status(struct hinic_send_mbox *mbox) 613 { 614 /* write back is 16B, but only use first 4B */ 615 u64 wb_val = be64_to_cpu(*mbox->wb_status); 616 617 rmb(); /* verify reading before check */ 618 619 return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); 620 } 621 622 static int 623 wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func, 624 int poll, u16 *wb_status) 625 { 626 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; 627 struct hinic_hwdev *hwdev = func_to_func->hwdev; 628 struct completion *done = &send_mbox->send_done; 629 u32 cnt = 0; 630 unsigned long jif; 631 632 if (poll) { 633 while (cnt < MBOX_MSG_POLLING_TIMEOUT) { 634 *wb_status = get_mbox_status(send_mbox); 635 if (MBOX_STATUS_FINISHED(*wb_status)) 636 break; 637 638 usleep_range(900, 1000); 639 cnt++; 640 } 641 642 if (cnt == MBOX_MSG_POLLING_TIMEOUT) { 643 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n", 644 *wb_status); 645 dump_mox_reg(hwdev); 646 return -ETIMEDOUT; 647 } 648 } else { 649 jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME); 650 if (!wait_for_completion_timeout(done, jif)) { 651 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n"); 652 dump_mox_reg(hwdev); 653 return -ETIMEDOUT; 654 } 655 656 *wb_status = get_mbox_status(send_mbox); 657 } 658 659 return 0; 660 } 661 662 static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, 663 u64 header, u16 dst_func, void *seg, u16 seg_len, 664 int poll, void *msg_info) 665 { 666 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; 667 u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); 668 struct hinic_hwdev *hwdev = func_to_func->hwdev; 669 struct completion *done = &send_mbox->send_done; 670 u8 num_aeqs = hwdev->hwif->attr.num_aeqs; 671 u16 dst_aeqn, wb_status = 0, errcode; 672 673 if (num_aeqs >= 4) 674 dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? 675 HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; 676 else 677 dst_aeqn = 0; 678 679 if (!poll) 680 init_completion(done); 681 682 clear_mbox_status(send_mbox); 683 684 mbox_copy_header(hwdev, send_mbox, &header); 685 686 mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); 687 688 write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll); 689 690 wmb(); /* writing the mbox msg attributes */ 691 692 if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status)) 693 return -ETIMEDOUT; 694 695 if (!MBOX_STATUS_SUCCESS(wb_status)) { 696 dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n", 697 dst_func, wb_status); 698 errcode = MBOX_STATUS_ERRCODE(wb_status); 699 return errcode ? errcode : -EFAULT; 700 } 701 702 return 0; 703 } 704 705 static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, 706 enum hinic_mod_type mod, u16 cmd, void *msg, 707 u16 msg_len, u16 dst_func, 708 enum hinic_hwif_direction_type direction, 709 enum hinic_mbox_ack_type ack_type, 710 struct mbox_msg_info *msg_info) 711 { 712 struct hinic_hwdev *hwdev = func_to_func->hwdev; 713 u16 seg_len = MBOX_SEG_LEN; 714 u8 *msg_seg = (u8 *)msg; 715 u16 left = msg_len; 716 u32 seq_id = 0; 717 u64 header = 0; 718 int err = 0; 719 720 down(&func_to_func->msg_send_sem); 721 722 header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) | 723 HINIC_MBOX_HEADER_SET(mod, MODULE) | 724 HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) | 725 HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) | 726 HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) | 727 HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) | 728 HINIC_MBOX_HEADER_SET(direction, DIRECTION) | 729 HINIC_MBOX_HEADER_SET(cmd, CMD) | 730 /* The vf's offset to it's associated pf */ 731 HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) | 732 HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) | 733 HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif), 734 SRC_GLB_FUNC_IDX); 735 736 while (!(HINIC_MBOX_HEADER_GET(header, LAST))) { 737 if (left <= HINIC_MBOX_SEG_LEN) { 738 header &= ~MBOX_SEGLEN_MASK; 739 header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN); 740 header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST); 741 742 seg_len = left; 743 } 744 745 err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, 746 seg_len, MBOX_SEND_MSG_INT, msg_info); 747 if (err) { 748 dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n", 749 HINIC_MBOX_HEADER_GET(header, SEQID)); 750 goto err_send_mbox_seg; 751 } 752 753 left -= HINIC_MBOX_SEG_LEN; 754 msg_seg += HINIC_MBOX_SEG_LEN; 755 756 seq_id++; 757 header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK, 758 SEQID)); 759 header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID); 760 } 761 762 err_send_mbox_seg: 763 up(&func_to_func->msg_send_sem); 764 765 return err; 766 } 767 768 static void 769 response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func, 770 struct hinic_recv_mbox *recv_mbox, int err, 771 u16 out_size, u16 src_func_idx) 772 { 773 struct mbox_msg_info msg_info = {0}; 774 775 if (recv_mbox->ack_type == MBOX_ACK) { 776 msg_info.msg_id = recv_mbox->msg_info.msg_id; 777 if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW) 778 msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW; 779 else if (err == HINIC_MBOX_VF_CMD_ERROR) 780 msg_info.status = HINIC_MBOX_VF_CMD_ERROR; 781 else if (err) 782 msg_info.status = HINIC_MBOX_PF_SEND_ERR; 783 784 /* if no data needs to response, set out_size to 1 */ 785 if (!out_size || err) 786 out_size = MBOX_MSG_NO_DATA_LEN; 787 788 send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd, 789 recv_mbox->buf_out, out_size, src_func_idx, 790 HINIC_HWIF_RESPONSE, MBOX_ACK, 791 &msg_info); 792 } 793 } 794 795 static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, 796 struct hinic_recv_mbox *recv_mbox, 797 u16 src_func_idx) 798 { 799 void *buf_out = recv_mbox->buf_out; 800 u16 out_size = MBOX_MAX_BUF_SZ; 801 int err = 0; 802 803 if (HINIC_IS_VF(func_to_func->hwif)) { 804 err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, 805 &out_size); 806 } else { 807 if (IS_PF_OR_PPF_SRC(src_func_idx)) 808 dev_warn(&func_to_func->hwif->pdev->dev, 809 "Unsupported pf2pf mbox msg\n"); 810 else 811 err = recv_pf_from_vf_mbox_handler(func_to_func, 812 recv_mbox, 813 src_func_idx, 814 buf_out, &out_size); 815 } 816 817 response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size, 818 src_func_idx); 819 kfree(recv_mbox->buf_out); 820 kfree(recv_mbox->mbox); 821 kfree(recv_mbox); 822 } 823 824 static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func, 825 enum mbox_event_state event_flag) 826 { 827 spin_lock(&func_to_func->mbox_lock); 828 func_to_func->event_flag = event_flag; 829 spin_unlock(&func_to_func->mbox_lock); 830 } 831 832 static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func, 833 struct hinic_recv_mbox *mbox_for_resp, 834 enum hinic_mod_type mod, u16 cmd, 835 void *buf_out, u16 *out_size) 836 { 837 int err; 838 839 if (mbox_for_resp->msg_info.status) { 840 err = mbox_for_resp->msg_info.status; 841 if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) 842 dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n", 843 mbox_for_resp->msg_info.status); 844 return err; 845 } 846 847 if (buf_out && out_size) { 848 if (*out_size < mbox_for_resp->mbox_len) { 849 dev_err(&func_to_func->hwif->pdev->dev, 850 "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n", 851 mbox_for_resp->mbox_len, mod, cmd, *out_size); 852 return -EFAULT; 853 } 854 855 if (mbox_for_resp->mbox_len) 856 memcpy(buf_out, mbox_for_resp->mbox, 857 mbox_for_resp->mbox_len); 858 859 *out_size = mbox_for_resp->mbox_len; 860 } 861 862 return 0; 863 } 864 865 int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, 866 enum hinic_mod_type mod, u16 cmd, u16 dst_func, 867 void *buf_in, u16 in_size, void *buf_out, 868 u16 *out_size, u32 timeout) 869 { 870 struct hinic_recv_mbox *mbox_for_resp; 871 struct mbox_msg_info msg_info = {0}; 872 unsigned long timeo; 873 int err; 874 875 mbox_for_resp = &func_to_func->mbox_resp[dst_func]; 876 877 down(&func_to_func->mbox_send_sem); 878 879 init_completion(&mbox_for_resp->recv_done); 880 881 msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); 882 883 set_mbox_to_func_event(func_to_func, EVENT_START); 884 885 err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size, 886 dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK, 887 &msg_info); 888 if (err) { 889 dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n", 890 msg_info.msg_id); 891 set_mbox_to_func_event(func_to_func, EVENT_FAIL); 892 goto err_send_mbox; 893 } 894 895 timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME); 896 if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) { 897 set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); 898 dev_err(&func_to_func->hwif->pdev->dev, 899 "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id); 900 err = -ETIMEDOUT; 901 goto err_send_mbox; 902 } 903 904 set_mbox_to_func_event(func_to_func, EVENT_END); 905 906 err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd, 907 buf_out, out_size); 908 909 err_send_mbox: 910 up(&func_to_func->mbox_send_sem); 911 912 return err; 913 } 914 915 static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func, 916 void *buf_in, u16 in_size) 917 { 918 if (in_size > HINIC_MBOX_DATA_SIZE) { 919 dev_err(&func_to_func->hwif->pdev->dev, 920 "Mbox msg len(%d) exceed limit(%d)\n", 921 in_size, HINIC_MBOX_DATA_SIZE); 922 return -EINVAL; 923 } 924 925 return 0; 926 } 927 928 int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, 929 enum hinic_mod_type mod, u8 cmd, void *buf_in, 930 u16 in_size, void *buf_out, u16 *out_size, u32 timeout) 931 { 932 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 933 int err = mbox_func_params_valid(func_to_func, buf_in, in_size); 934 935 if (err) 936 return err; 937 938 if (!HINIC_IS_VF(hwdev->hwif)) { 939 dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n", 940 HINIC_FUNC_TYPE(hwdev->hwif)); 941 return -EINVAL; 942 } 943 944 return hinic_mbox_to_func(func_to_func, mod, cmd, 945 hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in, 946 in_size, buf_out, out_size, timeout); 947 } 948 949 int hinic_mbox_to_vf(struct hinic_hwdev *hwdev, 950 enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, 951 u16 in_size, void *buf_out, u16 *out_size, u32 timeout) 952 { 953 struct hinic_mbox_func_to_func *func_to_func; 954 u16 dst_func_idx; 955 int err; 956 957 if (!hwdev) 958 return -EINVAL; 959 960 func_to_func = hwdev->func_to_func; 961 err = mbox_func_params_valid(func_to_func, buf_in, in_size); 962 if (err) 963 return err; 964 965 if (HINIC_IS_VF(hwdev->hwif)) { 966 dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n", 967 HINIC_FUNC_TYPE(hwdev->hwif)); 968 return -EINVAL; 969 } 970 971 if (!vf_id) { 972 dev_err(&hwdev->hwif->pdev->dev, 973 "VF id(%d) error!\n", vf_id); 974 return -EINVAL; 975 } 976 977 /* vf_offset_to_pf + vf_id is the vf's global function id of vf in 978 * this pf 979 */ 980 dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; 981 982 return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, 983 in_size, buf_out, out_size, timeout); 984 } 985 986 static int init_mbox_info(struct hinic_recv_mbox *mbox_info) 987 { 988 int err; 989 990 mbox_info->seq_id = SEQ_ID_MAX_VAL; 991 992 mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); 993 if (!mbox_info->mbox) 994 return -ENOMEM; 995 996 mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); 997 if (!mbox_info->buf_out) { 998 err = -ENOMEM; 999 goto err_alloc_buf_out; 1000 } 1001 1002 atomic_set(&mbox_info->msg_cnt, 0); 1003 1004 return 0; 1005 1006 err_alloc_buf_out: 1007 kfree(mbox_info->mbox); 1008 1009 return err; 1010 } 1011 1012 static void clean_mbox_info(struct hinic_recv_mbox *mbox_info) 1013 { 1014 kfree(mbox_info->buf_out); 1015 kfree(mbox_info->mbox); 1016 } 1017 1018 static int alloc_mbox_info(struct hinic_hwdev *hwdev, 1019 struct hinic_recv_mbox *mbox_info) 1020 { 1021 u16 func_idx, i; 1022 int err; 1023 1024 for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) { 1025 err = init_mbox_info(&mbox_info[func_idx]); 1026 if (err) { 1027 dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n", 1028 func_idx); 1029 goto err_init_mbox_info; 1030 } 1031 } 1032 1033 return 0; 1034 1035 err_init_mbox_info: 1036 for (i = 0; i < func_idx; i++) 1037 clean_mbox_info(&mbox_info[i]); 1038 1039 return err; 1040 } 1041 1042 static void free_mbox_info(struct hinic_recv_mbox *mbox_info) 1043 { 1044 u16 func_idx; 1045 1046 for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) 1047 clean_mbox_info(&mbox_info[func_idx]); 1048 } 1049 1050 static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func) 1051 { 1052 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; 1053 1054 send_mbox->data = MBOX_AREA(func_to_func->hwif); 1055 } 1056 1057 static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) 1058 { 1059 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; 1060 struct hinic_hwdev *hwdev = func_to_func->hwdev; 1061 u32 addr_h, addr_l; 1062 1063 send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev, 1064 MBOX_WB_STATUS_LEN, 1065 &send_mbox->wb_paddr, 1066 GFP_KERNEL); 1067 if (!send_mbox->wb_vaddr) 1068 return -ENOMEM; 1069 1070 send_mbox->wb_status = send_mbox->wb_vaddr; 1071 1072 addr_h = upper_32_bits(send_mbox->wb_paddr); 1073 addr_l = lower_32_bits(send_mbox->wb_paddr); 1074 1075 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, 1076 addr_h); 1077 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, 1078 addr_l); 1079 1080 return 0; 1081 } 1082 1083 static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) 1084 { 1085 struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; 1086 struct hinic_hwdev *hwdev = func_to_func->hwdev; 1087 1088 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, 1089 0); 1090 hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, 1091 0); 1092 1093 dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN, 1094 send_mbox->wb_vaddr, 1095 send_mbox->wb_paddr); 1096 } 1097 1098 static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, 1099 u16 in_size, void *buf_out, u16 *out_size) 1100 { 1101 struct hinic_hwdev *hwdev = handle; 1102 struct hinic_pfhwdev *pfhwdev; 1103 int err = 0; 1104 1105 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); 1106 1107 if (cmd == HINIC_COMM_CMD_START_FLR) { 1108 *out_size = 0; 1109 } else { 1110 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, 1111 cmd, buf_in, in_size, buf_out, out_size, 1112 HINIC_MGMT_MSG_SYNC); 1113 if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) 1114 dev_err(&hwdev->hwif->pdev->dev, 1115 "PF mbox common callback handler err: %d\n", 1116 err); 1117 } 1118 1119 return err; 1120 } 1121 1122 int hinic_func_to_func_init(struct hinic_hwdev *hwdev) 1123 { 1124 struct hinic_mbox_func_to_func *func_to_func; 1125 struct hinic_pfhwdev *pfhwdev; 1126 int err; 1127 1128 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); 1129 func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); 1130 if (!func_to_func) 1131 return -ENOMEM; 1132 1133 hwdev->func_to_func = func_to_func; 1134 func_to_func->hwdev = hwdev; 1135 func_to_func->hwif = hwdev->hwif; 1136 sema_init(&func_to_func->mbox_send_sem, 1); 1137 sema_init(&func_to_func->msg_send_sem, 1); 1138 spin_lock_init(&func_to_func->mbox_lock); 1139 func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME); 1140 if (!func_to_func->workq) { 1141 dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n"); 1142 err = -ENOMEM; 1143 goto err_create_mbox_workq; 1144 } 1145 1146 err = alloc_mbox_info(hwdev, func_to_func->mbox_send); 1147 if (err) { 1148 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n"); 1149 goto err_alloc_mbox_for_send; 1150 } 1151 1152 err = alloc_mbox_info(hwdev, func_to_func->mbox_resp); 1153 if (err) { 1154 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n"); 1155 goto err_alloc_mbox_for_resp; 1156 } 1157 1158 err = alloc_mbox_wb_status(func_to_func); 1159 if (err) { 1160 dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n"); 1161 goto err_alloc_wb_status; 1162 } 1163 1164 prepare_send_mbox(func_to_func); 1165 1166 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC, 1167 &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler); 1168 hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT, 1169 &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler); 1170 1171 if (!HINIC_IS_VF(hwdev->hwif)) 1172 hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, 1173 comm_pf_mbox_handler); 1174 1175 return 0; 1176 1177 err_alloc_wb_status: 1178 free_mbox_info(func_to_func->mbox_resp); 1179 1180 err_alloc_mbox_for_resp: 1181 free_mbox_info(func_to_func->mbox_send); 1182 1183 err_alloc_mbox_for_send: 1184 destroy_workqueue(func_to_func->workq); 1185 1186 err_create_mbox_workq: 1187 kfree(func_to_func); 1188 1189 return err; 1190 } 1191 1192 void hinic_func_to_func_free(struct hinic_hwdev *hwdev) 1193 { 1194 struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; 1195 1196 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC); 1197 hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT); 1198 1199 hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM); 1200 /* destroy workqueue before free related mbox resources in case of 1201 * illegal resource access 1202 */ 1203 destroy_workqueue(func_to_func->workq); 1204 1205 free_mbox_wb_status(func_to_func); 1206 free_mbox_info(func_to_func->mbox_resp); 1207 free_mbox_info(func_to_func->mbox_send); 1208 1209 kfree(func_to_func); 1210 } 1211