1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_mbx.h" 6 #include "hnae3.h" 7 8 static u16 hclge_errno_to_resp(int errno) 9 { 10 return abs(errno); 11 } 12 13 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF 14 * receives a mailbox message from VF. 15 * @vport: pointer to struct hclge_vport 16 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox 17 * message 18 * @resp_status: indicate to VF whether its request success(0) or failed. 19 */ 20 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, 21 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, 22 struct hclge_respond_to_vf_msg *resp_msg) 23 { 24 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 25 struct hclge_dev *hdev = vport->back; 26 enum hclge_cmd_status status; 27 struct hclge_desc desc; 28 u16 resp; 29 30 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 31 32 if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { 33 dev_err(&hdev->pdev->dev, 34 "PF fail to gen resp to VF len %u exceeds max len %u\n", 35 resp_msg->len, 36 HCLGE_MBX_MAX_RESP_DATA_SIZE); 37 /* If resp_msg->len is too long, set the value to max length 38 * and return the msg to VF 39 */ 40 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 41 } 42 43 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 44 45 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; 46 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; 47 48 resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; 49 resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; 50 resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; 51 resp = hclge_errno_to_resp(resp_msg->status); 52 if (resp < SHRT_MAX) { 53 resp_pf_to_vf->msg.resp_status = resp; 54 } else { 55 dev_warn(&hdev->pdev->dev, 56 "failed to send response to VF, response status %d is out-of-bound\n", 57 resp); 58 resp_pf_to_vf->msg.resp_status = EIO; 59 } 60 61 if (resp_msg->len > 0) 62 memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, 63 resp_msg->len); 64 65 status = hclge_cmd_send(&hdev->hw, &desc, 1); 66 if (status) 67 dev_err(&hdev->pdev->dev, 68 "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", 69 status, vf_to_pf_req->mbx_src_vfid, 70 vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); 71 72 return status; 73 } 74 75 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, 76 u16 mbx_opcode, u8 dest_vfid) 77 { 78 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 79 struct hclge_dev *hdev = vport->back; 80 enum hclge_cmd_status status; 81 struct hclge_desc desc; 82 83 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 84 85 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 86 87 resp_pf_to_vf->dest_vfid = dest_vfid; 88 resp_pf_to_vf->msg_len = msg_len; 89 resp_pf_to_vf->msg.code = mbx_opcode; 90 91 memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); 92 93 status = hclge_cmd_send(&hdev->hw, &desc, 1); 94 if (status) 95 dev_err(&hdev->pdev->dev, 96 "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", 97 status, dest_vfid, mbx_opcode); 98 99 return status; 100 } 101 102 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) 103 { 104 struct hclge_dev *hdev = vport->back; 105 u16 reset_type; 106 u8 msg_data[2]; 107 u8 dest_vfid; 108 109 BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); 110 111 dest_vfid = (u8)vport->vport_id; 112 113 if (hdev->reset_type == HNAE3_FUNC_RESET) 114 reset_type = HNAE3_VF_PF_FUNC_RESET; 115 else if (hdev->reset_type == HNAE3_FLR_RESET) 116 reset_type = HNAE3_VF_FULL_RESET; 117 else 118 reset_type = HNAE3_VF_FUNC_RESET; 119 120 memcpy(&msg_data[0], &reset_type, sizeof(u16)); 121 122 /* send this requested info to VF */ 123 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 124 HCLGE_MBX_ASSERTING_RESET, dest_vfid); 125 } 126 127 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) 128 { 129 struct hnae3_ring_chain_node *chain_tmp, *chain; 130 131 chain = head->next; 132 133 while (chain) { 134 chain_tmp = chain->next; 135 kzfree(chain); 136 chain = chain_tmp; 137 } 138 } 139 140 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx 141 * from mailbox message 142 * msg[0]: opcode 143 * msg[1]: <not relevant to this function> 144 * msg[2]: ring_num 145 * msg[3]: first ring type (TX|RX) 146 * msg[4]: first tqp id 147 * msg[5]: first int_gl idx 148 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx 149 */ 150 static int hclge_get_ring_chain_from_mbx( 151 struct hclge_mbx_vf_to_pf_cmd *req, 152 struct hnae3_ring_chain_node *ring_chain, 153 struct hclge_vport *vport) 154 { 155 struct hnae3_ring_chain_node *cur_chain, *new_chain; 156 int ring_num; 157 int i = 0; 158 159 ring_num = req->msg.ring_num; 160 161 if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) 162 return -ENOMEM; 163 164 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, 165 req->msg.param[i].ring_type); 166 ring_chain->tqp_index = 167 hclge_get_queue_id(vport->nic.kinfo.tqp 168 [req->msg.param[i].tqp_index]); 169 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 170 HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index); 171 172 cur_chain = ring_chain; 173 174 for (i = 1; i < ring_num; i++) { 175 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); 176 if (!new_chain) 177 goto err; 178 179 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, 180 req->msg.param[i].ring_type); 181 182 new_chain->tqp_index = 183 hclge_get_queue_id(vport->nic.kinfo.tqp 184 [req->msg.param[i].tqp_index]); 185 186 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 187 HNAE3_RING_GL_IDX_S, 188 req->msg.param[i].int_gl_index); 189 190 cur_chain->next = new_chain; 191 cur_chain = new_chain; 192 } 193 194 return 0; 195 err: 196 hclge_free_vector_ring_chain(ring_chain); 197 return -ENOMEM; 198 } 199 200 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, 201 struct hclge_mbx_vf_to_pf_cmd *req) 202 { 203 struct hnae3_ring_chain_node ring_chain; 204 int vector_id = req->msg.vector_id; 205 int ret; 206 207 memset(&ring_chain, 0, sizeof(ring_chain)); 208 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 209 if (ret) 210 return ret; 211 212 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); 213 214 hclge_free_vector_ring_chain(&ring_chain); 215 216 return ret; 217 } 218 219 static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, 220 struct hclge_mbx_vf_to_pf_cmd *req) 221 { 222 bool en_bc = req->msg.en_bc ? true : false; 223 bool en_uc = req->msg.en_uc ? true : false; 224 bool en_mc = req->msg.en_mc ? true : false; 225 int ret; 226 227 if (!vport->vf_info.trusted) { 228 en_uc = false; 229 en_mc = false; 230 } 231 232 ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc); 233 234 vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0; 235 236 return ret; 237 } 238 239 void hclge_inform_vf_promisc_info(struct hclge_vport *vport) 240 { 241 u8 dest_vfid = (u8)vport->vport_id; 242 u8 msg_data[2]; 243 244 memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16)); 245 246 hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 247 HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid); 248 } 249 250 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, 251 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 252 { 253 #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 254 255 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 256 struct hclge_dev *hdev = vport->back; 257 int status; 258 259 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { 260 const u8 *old_addr = (const u8 *) 261 (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); 262 263 /* If VF MAC has been configured by the host then it 264 * cannot be overridden by the MAC specified by the VM. 265 */ 266 if (!is_zero_ether_addr(vport->vf_info.mac) && 267 !ether_addr_equal(mac_addr, vport->vf_info.mac)) 268 return -EPERM; 269 270 if (!is_valid_ether_addr(mac_addr)) 271 return -EINVAL; 272 273 hclge_rm_uc_addr_common(vport, old_addr); 274 status = hclge_add_uc_addr_common(vport, mac_addr); 275 if (status) { 276 hclge_add_uc_addr_common(vport, old_addr); 277 } else { 278 hclge_rm_vport_mac_table(vport, mac_addr, 279 false, HCLGE_MAC_ADDR_UC); 280 hclge_add_vport_mac_table(vport, mac_addr, 281 HCLGE_MAC_ADDR_UC); 282 } 283 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { 284 status = hclge_add_uc_addr_common(vport, mac_addr); 285 if (!status) 286 hclge_add_vport_mac_table(vport, mac_addr, 287 HCLGE_MAC_ADDR_UC); 288 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { 289 status = hclge_rm_uc_addr_common(vport, mac_addr); 290 if (!status) 291 hclge_rm_vport_mac_table(vport, mac_addr, 292 false, HCLGE_MAC_ADDR_UC); 293 } else { 294 dev_err(&hdev->pdev->dev, 295 "failed to set unicast mac addr, unknown subcode %u\n", 296 mbx_req->msg.subcode); 297 return -EIO; 298 } 299 300 return status; 301 } 302 303 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, 304 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 305 { 306 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 307 struct hclge_dev *hdev = vport->back; 308 int status; 309 310 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { 311 status = hclge_add_mc_addr_common(vport, mac_addr); 312 if (!status) 313 hclge_add_vport_mac_table(vport, mac_addr, 314 HCLGE_MAC_ADDR_MC); 315 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { 316 status = hclge_rm_mc_addr_common(vport, mac_addr); 317 if (!status) 318 hclge_rm_vport_mac_table(vport, mac_addr, 319 false, HCLGE_MAC_ADDR_MC); 320 } else { 321 dev_err(&hdev->pdev->dev, 322 "failed to set mcast mac addr, unknown subcode %u\n", 323 mbx_req->msg.subcode); 324 return -EIO; 325 } 326 327 return status; 328 } 329 330 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 331 u16 state, u16 vlan_tag, u16 qos, 332 u16 vlan_proto) 333 { 334 #define MSG_DATA_SIZE 8 335 336 u8 msg_data[MSG_DATA_SIZE]; 337 338 memcpy(&msg_data[0], &state, sizeof(u16)); 339 memcpy(&msg_data[2], &vlan_proto, sizeof(u16)); 340 memcpy(&msg_data[4], &qos, sizeof(u16)); 341 memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); 342 343 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 344 HCLGE_MBX_PUSH_VLAN_INFO, vfid); 345 } 346 347 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 348 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 349 struct hclge_respond_to_vf_msg *resp_msg) 350 { 351 #define HCLGE_MBX_VLAN_STATE_OFFSET 0 352 #define HCLGE_MBX_VLAN_INFO_OFFSET 2 353 354 struct hclge_vf_vlan_cfg *msg_cmd; 355 int status = 0; 356 357 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 358 if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) { 359 struct hnae3_handle *handle = &vport->nic; 360 u16 vlan, proto; 361 bool is_kill; 362 363 is_kill = !!msg_cmd->is_kill; 364 vlan = msg_cmd->vlan; 365 proto = msg_cmd->proto; 366 status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), 367 vlan, is_kill); 368 } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) { 369 struct hnae3_handle *handle = &vport->nic; 370 bool en = msg_cmd->is_kill ? true : false; 371 372 status = hclge_en_hw_strip_rxvtag(handle, en); 373 } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) { 374 struct hclge_vlan_info *vlan_info; 375 u16 *state; 376 377 state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET]; 378 vlan_info = (struct hclge_vlan_info *) 379 &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET]; 380 status = hclge_update_port_base_vlan_cfg(vport, *state, 381 vlan_info); 382 } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { 383 resp_msg->data[0] = vport->port_base_vlan_cfg.state; 384 resp_msg->len = sizeof(u8); 385 } 386 387 return status; 388 } 389 390 static int hclge_set_vf_alive(struct hclge_vport *vport, 391 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 392 { 393 bool alive = !!mbx_req->msg.data[0]; 394 int ret = 0; 395 396 if (alive) 397 ret = hclge_vport_start(vport); 398 else 399 hclge_vport_stop(vport); 400 401 return ret; 402 } 403 404 static void hclge_get_vf_tcinfo(struct hclge_vport *vport, 405 struct hclge_respond_to_vf_msg *resp_msg) 406 { 407 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 408 unsigned int i; 409 410 for (i = 0; i < kinfo->num_tc; i++) 411 resp_msg->data[0] |= BIT(i); 412 413 resp_msg->len = sizeof(u8); 414 } 415 416 static void hclge_get_vf_queue_info(struct hclge_vport *vport, 417 struct hclge_respond_to_vf_msg *resp_msg) 418 { 419 #define HCLGE_TQPS_RSS_INFO_LEN 6 420 #define HCLGE_TQPS_ALLOC_OFFSET 0 421 #define HCLGE_TQPS_RSS_SIZE_OFFSET 2 422 #define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 423 424 struct hclge_dev *hdev = vport->back; 425 426 /* get the queue related info */ 427 memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], 428 &vport->alloc_tqps, sizeof(u16)); 429 memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], 430 &vport->nic.kinfo.rss_size, sizeof(u16)); 431 memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], 432 &hdev->rx_buf_len, sizeof(u16)); 433 resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; 434 } 435 436 static void hclge_get_vf_mac_addr(struct hclge_vport *vport, 437 struct hclge_respond_to_vf_msg *resp_msg) 438 { 439 ether_addr_copy(resp_msg->data, vport->vf_info.mac); 440 resp_msg->len = ETH_ALEN; 441 } 442 443 static void hclge_get_vf_queue_depth(struct hclge_vport *vport, 444 struct hclge_respond_to_vf_msg *resp_msg) 445 { 446 #define HCLGE_TQPS_DEPTH_INFO_LEN 4 447 #define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 448 #define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 449 450 struct hclge_dev *hdev = vport->back; 451 452 /* get the queue depth info */ 453 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], 454 &hdev->num_tx_desc, sizeof(u16)); 455 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], 456 &hdev->num_rx_desc, sizeof(u16)); 457 resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; 458 } 459 460 static void hclge_get_vf_media_type(struct hclge_vport *vport, 461 struct hclge_respond_to_vf_msg *resp_msg) 462 { 463 #define HCLGE_VF_MEDIA_TYPE_OFFSET 0 464 #define HCLGE_VF_MODULE_TYPE_OFFSET 1 465 #define HCLGE_VF_MEDIA_TYPE_LENGTH 2 466 467 struct hclge_dev *hdev = vport->back; 468 469 resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = 470 hdev->hw.mac.media_type; 471 resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = 472 hdev->hw.mac.module_type; 473 resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; 474 } 475 476 static int hclge_get_link_info(struct hclge_vport *vport, 477 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 478 { 479 #define HCLGE_VF_LINK_STATE_UP 1U 480 #define HCLGE_VF_LINK_STATE_DOWN 0U 481 482 struct hclge_dev *hdev = vport->back; 483 u16 link_status; 484 u8 msg_data[8]; 485 u8 dest_vfid; 486 u16 duplex; 487 488 /* mac.link can only be 0 or 1 */ 489 switch (vport->vf_info.link_state) { 490 case IFLA_VF_LINK_STATE_ENABLE: 491 link_status = HCLGE_VF_LINK_STATE_UP; 492 break; 493 case IFLA_VF_LINK_STATE_DISABLE: 494 link_status = HCLGE_VF_LINK_STATE_DOWN; 495 break; 496 case IFLA_VF_LINK_STATE_AUTO: 497 default: 498 link_status = (u16)hdev->hw.mac.link; 499 break; 500 } 501 502 duplex = hdev->hw.mac.duplex; 503 memcpy(&msg_data[0], &link_status, sizeof(u16)); 504 memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); 505 memcpy(&msg_data[6], &duplex, sizeof(u16)); 506 dest_vfid = mbx_req->mbx_src_vfid; 507 508 /* send this requested info to VF */ 509 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 510 HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); 511 } 512 513 static void hclge_get_link_mode(struct hclge_vport *vport, 514 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 515 { 516 #define HCLGE_SUPPORTED 1 517 struct hclge_dev *hdev = vport->back; 518 unsigned long advertising; 519 unsigned long supported; 520 unsigned long send_data; 521 u8 msg_data[10]; 522 u8 dest_vfid; 523 524 advertising = hdev->hw.mac.advertising[0]; 525 supported = hdev->hw.mac.supported[0]; 526 dest_vfid = mbx_req->mbx_src_vfid; 527 msg_data[0] = mbx_req->msg.data[0]; 528 529 send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; 530 531 memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); 532 hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 533 HCLGE_MBX_LINK_STAT_MODE, dest_vfid); 534 } 535 536 static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, 537 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 538 { 539 u16 queue_id; 540 541 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 542 543 hclge_reset_vf_queue(vport, queue_id); 544 } 545 546 static int hclge_reset_vf(struct hclge_vport *vport) 547 { 548 struct hclge_dev *hdev = vport->back; 549 550 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", 551 vport->vport_id); 552 553 return hclge_func_reset_cmd(hdev, vport->vport_id); 554 } 555 556 static void hclge_vf_keep_alive(struct hclge_vport *vport) 557 { 558 vport->last_active_jiffies = jiffies; 559 } 560 561 static int hclge_set_vf_mtu(struct hclge_vport *vport, 562 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 563 { 564 u32 mtu; 565 566 memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); 567 568 return hclge_set_vport_mtu(vport, mtu); 569 } 570 571 static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, 572 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 573 struct hclge_respond_to_vf_msg *resp_msg) 574 { 575 u16 queue_id, qid_in_pf; 576 577 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 578 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); 579 memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); 580 resp_msg->len = sizeof(qid_in_pf); 581 } 582 583 static void hclge_get_rss_key(struct hclge_vport *vport, 584 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 585 struct hclge_respond_to_vf_msg *resp_msg) 586 { 587 #define HCLGE_RSS_MBX_RESP_LEN 8 588 struct hclge_dev *hdev = vport->back; 589 u8 index; 590 591 index = mbx_req->msg.data[0]; 592 593 memcpy(resp_msg->data, 594 &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], 595 HCLGE_RSS_MBX_RESP_LEN); 596 resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; 597 } 598 599 static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) 600 { 601 switch (link_fail_code) { 602 case HCLGE_LF_REF_CLOCK_LOST: 603 dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); 604 break; 605 case HCLGE_LF_XSFP_TX_DISABLE: 606 dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); 607 break; 608 case HCLGE_LF_XSFP_ABSENT: 609 dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); 610 break; 611 default: 612 break; 613 } 614 } 615 616 static void hclge_handle_link_change_event(struct hclge_dev *hdev, 617 struct hclge_mbx_vf_to_pf_cmd *req) 618 { 619 hclge_task_schedule(hdev, 0); 620 621 if (!req->msg.subcode) 622 hclge_link_fail_parse(hdev, req->msg.data[0]); 623 } 624 625 static bool hclge_cmd_crq_empty(struct hclge_hw *hw) 626 { 627 u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); 628 629 return tail == hw->cmq.crq.next_to_use; 630 } 631 632 static void hclge_handle_ncsi_error(struct hclge_dev *hdev) 633 { 634 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 635 636 ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); 637 dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); 638 ae_dev->ops->reset_event(hdev->pdev, NULL); 639 } 640 641 void hclge_mbx_handler(struct hclge_dev *hdev) 642 { 643 struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; 644 struct hclge_respond_to_vf_msg resp_msg; 645 struct hclge_mbx_vf_to_pf_cmd *req; 646 struct hclge_vport *vport; 647 struct hclge_desc *desc; 648 unsigned int flag; 649 int ret = 0; 650 651 memset(&resp_msg, 0, sizeof(resp_msg)); 652 /* handle all the mailbox requests in the queue */ 653 while (!hclge_cmd_crq_empty(&hdev->hw)) { 654 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { 655 dev_warn(&hdev->pdev->dev, 656 "command queue needs re-initializing\n"); 657 return; 658 } 659 660 desc = &crq->desc[crq->next_to_use]; 661 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 662 663 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 664 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { 665 dev_warn(&hdev->pdev->dev, 666 "dropped invalid mailbox message, code = %u\n", 667 req->msg.code); 668 669 /* dropping/not processing this invalid message */ 670 crq->desc[crq->next_to_use].flag = 0; 671 hclge_mbx_ring_ptr_move_crq(crq); 672 continue; 673 } 674 675 vport = &hdev->vport[req->mbx_src_vfid]; 676 677 switch (req->msg.code) { 678 case HCLGE_MBX_MAP_RING_TO_VECTOR: 679 ret = hclge_map_unmap_ring_to_vf_vector(vport, true, 680 req); 681 break; 682 case HCLGE_MBX_UNMAP_RING_TO_VECTOR: 683 ret = hclge_map_unmap_ring_to_vf_vector(vport, false, 684 req); 685 break; 686 case HCLGE_MBX_SET_PROMISC_MODE: 687 ret = hclge_set_vf_promisc_mode(vport, req); 688 if (ret) 689 dev_err(&hdev->pdev->dev, 690 "PF fail(%d) to set VF promisc mode\n", 691 ret); 692 break; 693 case HCLGE_MBX_SET_UNICAST: 694 ret = hclge_set_vf_uc_mac_addr(vport, req); 695 if (ret) 696 dev_err(&hdev->pdev->dev, 697 "PF fail(%d) to set VF UC MAC Addr\n", 698 ret); 699 break; 700 case HCLGE_MBX_SET_MULTICAST: 701 ret = hclge_set_vf_mc_mac_addr(vport, req); 702 if (ret) 703 dev_err(&hdev->pdev->dev, 704 "PF fail(%d) to set VF MC MAC Addr\n", 705 ret); 706 break; 707 case HCLGE_MBX_SET_VLAN: 708 ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg); 709 if (ret) 710 dev_err(&hdev->pdev->dev, 711 "PF failed(%d) to config VF's VLAN\n", 712 ret); 713 break; 714 case HCLGE_MBX_SET_ALIVE: 715 ret = hclge_set_vf_alive(vport, req); 716 if (ret) 717 dev_err(&hdev->pdev->dev, 718 "PF failed(%d) to set VF's ALIVE\n", 719 ret); 720 break; 721 case HCLGE_MBX_GET_QINFO: 722 hclge_get_vf_queue_info(vport, &resp_msg); 723 break; 724 case HCLGE_MBX_GET_QDEPTH: 725 hclge_get_vf_queue_depth(vport, &resp_msg); 726 break; 727 case HCLGE_MBX_GET_TCINFO: 728 hclge_get_vf_tcinfo(vport, &resp_msg); 729 break; 730 case HCLGE_MBX_GET_LINK_STATUS: 731 ret = hclge_get_link_info(vport, req); 732 if (ret) 733 dev_err(&hdev->pdev->dev, 734 "PF fail(%d) to get link stat for VF\n", 735 ret); 736 break; 737 case HCLGE_MBX_QUEUE_RESET: 738 hclge_mbx_reset_vf_queue(vport, req); 739 break; 740 case HCLGE_MBX_RESET: 741 ret = hclge_reset_vf(vport); 742 break; 743 case HCLGE_MBX_KEEP_ALIVE: 744 hclge_vf_keep_alive(vport); 745 break; 746 case HCLGE_MBX_SET_MTU: 747 ret = hclge_set_vf_mtu(vport, req); 748 if (ret) 749 dev_err(&hdev->pdev->dev, 750 "VF fail(%d) to set mtu\n", ret); 751 break; 752 case HCLGE_MBX_GET_QID_IN_PF: 753 hclge_get_queue_id_in_pf(vport, req, &resp_msg); 754 break; 755 case HCLGE_MBX_GET_RSS_KEY: 756 hclge_get_rss_key(vport, req, &resp_msg); 757 break; 758 case HCLGE_MBX_GET_LINK_MODE: 759 hclge_get_link_mode(vport, req); 760 break; 761 case HCLGE_MBX_GET_VF_FLR_STATUS: 762 case HCLGE_MBX_VF_UNINIT: 763 hclge_rm_vport_all_mac_table(vport, true, 764 HCLGE_MAC_ADDR_UC); 765 hclge_rm_vport_all_mac_table(vport, true, 766 HCLGE_MAC_ADDR_MC); 767 hclge_rm_vport_all_vlan_table(vport, true); 768 break; 769 case HCLGE_MBX_GET_MEDIA_TYPE: 770 hclge_get_vf_media_type(vport, &resp_msg); 771 break; 772 case HCLGE_MBX_PUSH_LINK_STATUS: 773 hclge_handle_link_change_event(hdev, req); 774 break; 775 case HCLGE_MBX_GET_MAC_ADDR: 776 hclge_get_vf_mac_addr(vport, &resp_msg); 777 break; 778 case HCLGE_MBX_NCSI_ERROR: 779 hclge_handle_ncsi_error(hdev); 780 break; 781 default: 782 dev_err(&hdev->pdev->dev, 783 "un-supported mailbox message, code = %u\n", 784 req->msg.code); 785 break; 786 } 787 788 /* PF driver should not reply IMP */ 789 if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && 790 req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { 791 resp_msg.status = ret; 792 hclge_gen_resp_to_vf(vport, req, &resp_msg); 793 } 794 795 crq->desc[crq->next_to_use].flag = 0; 796 hclge_mbx_ring_ptr_move_crq(crq); 797 798 /* reinitialize ret after complete the mbx message processing */ 799 ret = 0; 800 } 801 802 /* Write back CMDQ_RQ header pointer, M7 need this pointer */ 803 hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); 804 } 805