1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_mbx.h" 6 #include "hnae3.h" 7 8 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF 9 * receives a mailbox message from VF. 10 * @vport: pointer to struct hclge_vport 11 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox 12 * message 13 * @resp_status: indicate to VF whether its request success(0) or failed. 14 */ 15 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, 16 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, 17 int resp_status, 18 u8 *resp_data, u16 resp_data_len) 19 { 20 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 21 struct hclge_dev *hdev = vport->back; 22 enum hclge_cmd_status status; 23 struct hclge_desc desc; 24 25 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 26 27 if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { 28 dev_err(&hdev->pdev->dev, 29 "PF fail to gen resp to VF len %d exceeds max len %d\n", 30 resp_data_len, 31 HCLGE_MBX_MAX_RESP_DATA_SIZE); 32 } 33 34 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 35 36 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; 37 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; 38 39 resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP; 40 resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0]; 41 resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1]; 42 resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1; 43 44 if (resp_data && resp_data_len > 0) 45 memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len); 46 47 status = hclge_cmd_send(&hdev->hw, &desc, 1); 48 if (status) 49 dev_err(&hdev->pdev->dev, 50 "PF failed(=%d) to send response to VF\n", status); 51 52 return status; 53 } 54 55 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, 56 u16 mbx_opcode, u8 dest_vfid) 57 { 58 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 59 struct hclge_dev *hdev = vport->back; 60 enum hclge_cmd_status status; 61 struct hclge_desc desc; 62 63 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 64 65 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 66 67 resp_pf_to_vf->dest_vfid = dest_vfid; 68 resp_pf_to_vf->msg_len = msg_len; 69 resp_pf_to_vf->msg[0] = mbx_opcode; 70 71 memcpy(&resp_pf_to_vf->msg[1], msg, msg_len); 72 73 status = hclge_cmd_send(&hdev->hw, &desc, 1); 74 if (status) 75 dev_err(&hdev->pdev->dev, 76 "PF failed(=%d) to send mailbox message to VF\n", 77 status); 78 79 return status; 80 } 81 82 static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) 83 { 84 u8 msg_data[2]; 85 u8 dest_vfid; 86 87 dest_vfid = (u8)vport->vport_id; 88 89 /* send this requested info to VF */ 90 return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), 91 HCLGE_MBX_ASSERTING_RESET, dest_vfid); 92 } 93 94 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) 95 { 96 struct hnae3_ring_chain_node *chain_tmp, *chain; 97 98 chain = head->next; 99 100 while (chain) { 101 chain_tmp = chain->next; 102 kzfree(chain); 103 chain = chain_tmp; 104 } 105 } 106 107 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx 108 * from mailbox message 109 * msg[0]: opcode 110 * msg[1]: <not relevant to this function> 111 * msg[2]: ring_num 112 * msg[3]: first ring type (TX|RX) 113 * msg[4]: first tqp id 114 * msg[5]: first int_gl idx 115 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx 116 */ 117 static int hclge_get_ring_chain_from_mbx( 118 struct hclge_mbx_vf_to_pf_cmd *req, 119 struct hnae3_ring_chain_node *ring_chain, 120 struct hclge_vport *vport) 121 { 122 struct hnae3_ring_chain_node *cur_chain, *new_chain; 123 int ring_num; 124 int i; 125 126 ring_num = req->msg[2]; 127 128 if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM - 129 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / 130 HCLGE_MBX_RING_NODE_VARIABLE_NUM)) 131 return -ENOMEM; 132 133 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); 134 ring_chain->tqp_index = 135 hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); 136 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 137 HNAE3_RING_GL_IDX_S, 138 req->msg[5]); 139 140 cur_chain = ring_chain; 141 142 for (i = 1; i < ring_num; i++) { 143 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); 144 if (!new_chain) 145 goto err; 146 147 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, 148 req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + 149 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); 150 151 new_chain->tqp_index = 152 hclge_get_queue_id(vport->nic.kinfo.tqp 153 [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + 154 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); 155 156 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 157 HNAE3_RING_GL_IDX_S, 158 req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + 159 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); 160 161 cur_chain->next = new_chain; 162 cur_chain = new_chain; 163 } 164 165 return 0; 166 err: 167 hclge_free_vector_ring_chain(ring_chain); 168 return -ENOMEM; 169 } 170 171 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, 172 struct hclge_mbx_vf_to_pf_cmd *req) 173 { 174 struct hnae3_ring_chain_node ring_chain; 175 int vector_id = req->msg[1]; 176 int ret; 177 178 memset(&ring_chain, 0, sizeof(ring_chain)); 179 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 180 if (ret) 181 return ret; 182 183 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); 184 if (ret) 185 return ret; 186 187 hclge_free_vector_ring_chain(&ring_chain); 188 189 return 0; 190 } 191 192 static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, 193 struct hclge_mbx_vf_to_pf_cmd *req) 194 { 195 bool en_uc = req->msg[1] ? true : false; 196 bool en_mc = req->msg[2] ? true : false; 197 struct hclge_promisc_param param; 198 199 /* always enable broadcast promisc bit */ 200 hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); 201 return hclge_cmd_set_promisc_mode(vport->back, ¶m); 202 } 203 204 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, 205 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 206 bool gen_resp) 207 { 208 const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); 209 struct hclge_dev *hdev = vport->back; 210 int status; 211 212 if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { 213 const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]); 214 215 hclge_rm_uc_addr_common(vport, old_addr); 216 status = hclge_add_uc_addr_common(vport, mac_addr); 217 if (status) 218 hclge_add_uc_addr_common(vport, old_addr); 219 } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { 220 status = hclge_add_uc_addr_common(vport, mac_addr); 221 } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { 222 status = hclge_rm_uc_addr_common(vport, mac_addr); 223 } else { 224 dev_err(&hdev->pdev->dev, 225 "failed to set unicast mac addr, unknown subcode %d\n", 226 mbx_req->msg[1]); 227 return -EIO; 228 } 229 230 if (gen_resp) 231 hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); 232 233 return 0; 234 } 235 236 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, 237 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 238 bool gen_resp) 239 { 240 const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); 241 struct hclge_dev *hdev = vport->back; 242 u8 resp_len = 0; 243 u8 resp_data; 244 int status; 245 246 if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { 247 status = hclge_add_mc_addr_common(vport, mac_addr); 248 } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { 249 status = hclge_rm_mc_addr_common(vport, mac_addr); 250 } else { 251 dev_err(&hdev->pdev->dev, 252 "failed to set mcast mac addr, unknown subcode %d\n", 253 mbx_req->msg[1]); 254 return -EIO; 255 } 256 257 if (gen_resp) 258 hclge_gen_resp_to_vf(vport, mbx_req, status, 259 &resp_data, resp_len); 260 261 return 0; 262 } 263 264 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 265 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 266 bool gen_resp) 267 { 268 int status = 0; 269 270 if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { 271 struct hnae3_handle *handle = &vport->nic; 272 u16 vlan, proto; 273 bool is_kill; 274 275 is_kill = !!mbx_req->msg[2]; 276 memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); 277 memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); 278 status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), 279 vlan, is_kill); 280 } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { 281 struct hnae3_handle *handle = &vport->nic; 282 bool en = mbx_req->msg[2] ? true : false; 283 284 status = hclge_en_hw_strip_rxvtag(handle, en); 285 } 286 287 if (gen_resp) 288 status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); 289 290 return status; 291 } 292 293 static int hclge_get_vf_tcinfo(struct hclge_vport *vport, 294 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 295 bool gen_resp) 296 { 297 struct hclge_dev *hdev = vport->back; 298 int ret; 299 300 ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, 301 sizeof(u8)); 302 303 return ret; 304 } 305 306 static int hclge_get_vf_queue_info(struct hclge_vport *vport, 307 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 308 bool gen_resp) 309 { 310 #define HCLGE_TQPS_RSS_INFO_LEN 8 311 u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN]; 312 struct hclge_dev *hdev = vport->back; 313 314 /* get the queue related info */ 315 memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); 316 memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); 317 memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); 318 memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); 319 320 return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 321 HCLGE_TQPS_RSS_INFO_LEN); 322 } 323 324 static int hclge_get_link_info(struct hclge_vport *vport, 325 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 326 { 327 struct hclge_dev *hdev = vport->back; 328 u16 link_status; 329 u8 msg_data[8]; 330 u8 dest_vfid; 331 u16 duplex; 332 333 /* mac.link can only be 0 or 1 */ 334 link_status = (u16)hdev->hw.mac.link; 335 duplex = hdev->hw.mac.duplex; 336 memcpy(&msg_data[0], &link_status, sizeof(u16)); 337 memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); 338 memcpy(&msg_data[6], &duplex, sizeof(u16)); 339 dest_vfid = mbx_req->mbx_src_vfid; 340 341 /* send this requested info to VF */ 342 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 343 HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); 344 } 345 346 static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, 347 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 348 { 349 u16 queue_id; 350 351 memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); 352 353 hclge_reset_vf_queue(vport, queue_id); 354 355 /* send response msg to VF after queue reset complete*/ 356 hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0); 357 } 358 359 static void hclge_reset_vf(struct hclge_vport *vport, 360 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 361 { 362 struct hclge_dev *hdev = vport->back; 363 int ret; 364 365 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", 366 mbx_req->mbx_src_vfid); 367 368 /* Acknowledge VF that PF is now about to assert the reset for the VF. 369 * On receiving this message VF will get into pending state and will 370 * start polling for the hardware reset completion status. 371 */ 372 ret = hclge_inform_reset_assert_to_vf(vport); 373 if (ret) { 374 dev_err(&hdev->pdev->dev, 375 "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", 376 ret, vport->vport_id); 377 return; 378 } 379 380 dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", 381 mbx_req->mbx_src_vfid); 382 /* reset this virtual function */ 383 hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); 384 } 385 386 static bool hclge_cmd_crq_empty(struct hclge_hw *hw) 387 { 388 u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); 389 390 return tail == hw->cmq.crq.next_to_use; 391 } 392 393 void hclge_mbx_handler(struct hclge_dev *hdev) 394 { 395 struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; 396 struct hclge_mbx_vf_to_pf_cmd *req; 397 struct hclge_vport *vport; 398 struct hclge_desc *desc; 399 int ret, flag; 400 401 /* handle all the mailbox requests in the queue */ 402 while (!hclge_cmd_crq_empty(&hdev->hw)) { 403 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { 404 dev_warn(&hdev->pdev->dev, 405 "command queue needs re-initializing\n"); 406 return; 407 } 408 409 desc = &crq->desc[crq->next_to_use]; 410 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 411 412 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 413 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { 414 dev_warn(&hdev->pdev->dev, 415 "dropped invalid mailbox message, code = %d\n", 416 req->msg[0]); 417 418 /* dropping/not processing this invalid message */ 419 crq->desc[crq->next_to_use].flag = 0; 420 hclge_mbx_ring_ptr_move_crq(crq); 421 continue; 422 } 423 424 vport = &hdev->vport[req->mbx_src_vfid]; 425 426 switch (req->msg[0]) { 427 case HCLGE_MBX_MAP_RING_TO_VECTOR: 428 ret = hclge_map_unmap_ring_to_vf_vector(vport, true, 429 req); 430 break; 431 case HCLGE_MBX_UNMAP_RING_TO_VECTOR: 432 ret = hclge_map_unmap_ring_to_vf_vector(vport, false, 433 req); 434 break; 435 case HCLGE_MBX_SET_PROMISC_MODE: 436 ret = hclge_set_vf_promisc_mode(vport, req); 437 if (ret) 438 dev_err(&hdev->pdev->dev, 439 "PF fail(%d) to set VF promisc mode\n", 440 ret); 441 break; 442 case HCLGE_MBX_SET_UNICAST: 443 ret = hclge_set_vf_uc_mac_addr(vport, req, true); 444 if (ret) 445 dev_err(&hdev->pdev->dev, 446 "PF fail(%d) to set VF UC MAC Addr\n", 447 ret); 448 break; 449 case HCLGE_MBX_SET_MULTICAST: 450 ret = hclge_set_vf_mc_mac_addr(vport, req, false); 451 if (ret) 452 dev_err(&hdev->pdev->dev, 453 "PF fail(%d) to set VF MC MAC Addr\n", 454 ret); 455 break; 456 case HCLGE_MBX_SET_VLAN: 457 ret = hclge_set_vf_vlan_cfg(vport, req, false); 458 if (ret) 459 dev_err(&hdev->pdev->dev, 460 "PF failed(%d) to config VF's VLAN\n", 461 ret); 462 break; 463 case HCLGE_MBX_GET_QINFO: 464 ret = hclge_get_vf_queue_info(vport, req, true); 465 if (ret) 466 dev_err(&hdev->pdev->dev, 467 "PF failed(%d) to get Q info for VF\n", 468 ret); 469 break; 470 case HCLGE_MBX_GET_TCINFO: 471 ret = hclge_get_vf_tcinfo(vport, req, true); 472 if (ret) 473 dev_err(&hdev->pdev->dev, 474 "PF failed(%d) to get TC info for VF\n", 475 ret); 476 break; 477 case HCLGE_MBX_GET_LINK_STATUS: 478 ret = hclge_get_link_info(vport, req); 479 if (ret) 480 dev_err(&hdev->pdev->dev, 481 "PF fail(%d) to get link stat for VF\n", 482 ret); 483 break; 484 case HCLGE_MBX_QUEUE_RESET: 485 hclge_mbx_reset_vf_queue(vport, req); 486 break; 487 case HCLGE_MBX_RESET: 488 hclge_reset_vf(vport, req); 489 break; 490 default: 491 dev_err(&hdev->pdev->dev, 492 "un-supported mailbox message, code = %d\n", 493 req->msg[0]); 494 break; 495 } 496 crq->desc[crq->next_to_use].flag = 0; 497 hclge_mbx_ring_ptr_move_crq(crq); 498 } 499 500 /* Write back CMDQ_RQ header pointer, M7 need this pointer */ 501 hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); 502 } 503