1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_mbx.h" 6 #include "hnae3.h" 7 8 #define CREATE_TRACE_POINTS 9 #include "hclge_trace.h" 10 11 static u16 hclge_errno_to_resp(int errno) 12 { 13 return abs(errno); 14 } 15 16 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF 17 * receives a mailbox message from VF. 18 * @vport: pointer to struct hclge_vport 19 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox 20 * message 21 * @resp_status: indicate to VF whether its request success(0) or failed. 22 */ 23 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, 24 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, 25 struct hclge_respond_to_vf_msg *resp_msg) 26 { 27 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 28 struct hclge_dev *hdev = vport->back; 29 enum hclge_cmd_status status; 30 struct hclge_desc desc; 31 u16 resp; 32 33 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 34 35 if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { 36 dev_err(&hdev->pdev->dev, 37 "PF fail to gen resp to VF len %u exceeds max len %u\n", 38 resp_msg->len, 39 HCLGE_MBX_MAX_RESP_DATA_SIZE); 40 /* If resp_msg->len is too long, set the value to max length 41 * and return the msg to VF 42 */ 43 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 44 } 45 46 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 47 48 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; 49 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; 50 51 resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; 52 resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; 53 resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; 54 resp = hclge_errno_to_resp(resp_msg->status); 55 if (resp < SHRT_MAX) { 56 resp_pf_to_vf->msg.resp_status = resp; 57 } else { 58 dev_warn(&hdev->pdev->dev, 59 "failed to send response to VF, response status %u is out-of-bound\n", 60 resp); 61 resp_pf_to_vf->msg.resp_status = EIO; 62 } 63 64 if (resp_msg->len > 0) 65 memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, 66 resp_msg->len); 67 68 status = hclge_cmd_send(&hdev->hw, &desc, 1); 69 if (status) 70 dev_err(&hdev->pdev->dev, 71 "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", 72 status, vf_to_pf_req->mbx_src_vfid, 73 vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); 74 75 return status; 76 } 77 78 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, 79 u16 mbx_opcode, u8 dest_vfid) 80 { 81 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 82 struct hclge_dev *hdev = vport->back; 83 enum hclge_cmd_status status; 84 struct hclge_desc desc; 85 86 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 87 88 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 89 90 resp_pf_to_vf->dest_vfid = dest_vfid; 91 resp_pf_to_vf->msg_len = msg_len; 92 resp_pf_to_vf->msg.code = mbx_opcode; 93 94 memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); 95 96 trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); 97 98 status = hclge_cmd_send(&hdev->hw, &desc, 1); 99 if (status) 100 dev_err(&hdev->pdev->dev, 101 "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", 102 status, dest_vfid, mbx_opcode); 103 104 return status; 105 } 106 107 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) 108 { 109 struct hclge_dev *hdev = vport->back; 110 u16 reset_type; 111 u8 msg_data[2]; 112 u8 dest_vfid; 113 114 BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); 115 116 dest_vfid = (u8)vport->vport_id; 117 118 if (hdev->reset_type == HNAE3_FUNC_RESET) 119 reset_type = HNAE3_VF_PF_FUNC_RESET; 120 else if (hdev->reset_type == HNAE3_FLR_RESET) 121 reset_type = HNAE3_VF_FULL_RESET; 122 else 123 reset_type = HNAE3_VF_FUNC_RESET; 124 125 memcpy(&msg_data[0], &reset_type, sizeof(u16)); 126 127 /* send this requested info to VF */ 128 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 129 HCLGE_MBX_ASSERTING_RESET, dest_vfid); 130 } 131 132 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) 133 { 134 struct hnae3_ring_chain_node *chain_tmp, *chain; 135 136 chain = head->next; 137 138 while (chain) { 139 chain_tmp = chain->next; 140 kfree_sensitive(chain); 141 chain = chain_tmp; 142 } 143 } 144 145 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx 146 * from mailbox message 147 * msg[0]: opcode 148 * msg[1]: <not relevant to this function> 149 * msg[2]: ring_num 150 * msg[3]: first ring type (TX|RX) 151 * msg[4]: first tqp id 152 * msg[5]: first int_gl idx 153 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx 154 */ 155 static int hclge_get_ring_chain_from_mbx( 156 struct hclge_mbx_vf_to_pf_cmd *req, 157 struct hnae3_ring_chain_node *ring_chain, 158 struct hclge_vport *vport) 159 { 160 struct hnae3_ring_chain_node *cur_chain, *new_chain; 161 struct hclge_dev *hdev = vport->back; 162 int ring_num; 163 int i; 164 165 ring_num = req->msg.ring_num; 166 167 if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) 168 return -ENOMEM; 169 170 for (i = 0; i < ring_num; i++) { 171 if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { 172 dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", 173 req->msg.param[i].tqp_index, 174 vport->nic.kinfo.rss_size - 1); 175 return -EINVAL; 176 } 177 } 178 179 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, 180 req->msg.param[0].ring_type); 181 ring_chain->tqp_index = 182 hclge_get_queue_id(vport->nic.kinfo.tqp 183 [req->msg.param[0].tqp_index]); 184 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 185 HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); 186 187 cur_chain = ring_chain; 188 189 for (i = 1; i < ring_num; i++) { 190 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); 191 if (!new_chain) 192 goto err; 193 194 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, 195 req->msg.param[i].ring_type); 196 197 new_chain->tqp_index = 198 hclge_get_queue_id(vport->nic.kinfo.tqp 199 [req->msg.param[i].tqp_index]); 200 201 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 202 HNAE3_RING_GL_IDX_S, 203 req->msg.param[i].int_gl_index); 204 205 cur_chain->next = new_chain; 206 cur_chain = new_chain; 207 } 208 209 return 0; 210 err: 211 hclge_free_vector_ring_chain(ring_chain); 212 return -ENOMEM; 213 } 214 215 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, 216 struct hclge_mbx_vf_to_pf_cmd *req) 217 { 218 struct hnae3_ring_chain_node ring_chain; 219 int vector_id = req->msg.vector_id; 220 int ret; 221 222 memset(&ring_chain, 0, sizeof(ring_chain)); 223 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 224 if (ret) 225 return ret; 226 227 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); 228 229 hclge_free_vector_ring_chain(&ring_chain); 230 231 return ret; 232 } 233 234 static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, 235 struct hclge_mbx_vf_to_pf_cmd *req) 236 { 237 struct hnae3_handle *handle = &vport->nic; 238 struct hclge_dev *hdev = vport->back; 239 240 vport->vf_info.request_uc_en = req->msg.en_uc; 241 vport->vf_info.request_mc_en = req->msg.en_mc; 242 vport->vf_info.request_bc_en = req->msg.en_bc; 243 244 if (req->msg.en_limit_promisc) 245 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); 246 else 247 clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, 248 &handle->priv_flags); 249 250 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 251 hclge_task_schedule(hdev, 0); 252 } 253 254 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, 255 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 256 { 257 #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 258 259 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 260 struct hclge_dev *hdev = vport->back; 261 int status; 262 263 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { 264 const u8 *old_addr = (const u8 *) 265 (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); 266 267 /* If VF MAC has been configured by the host then it 268 * cannot be overridden by the MAC specified by the VM. 269 */ 270 if (!is_zero_ether_addr(vport->vf_info.mac) && 271 !ether_addr_equal(mac_addr, vport->vf_info.mac)) 272 return -EPERM; 273 274 if (!is_valid_ether_addr(mac_addr)) 275 return -EINVAL; 276 277 spin_lock_bh(&vport->mac_list_lock); 278 status = hclge_update_mac_node_for_dev_addr(vport, old_addr, 279 mac_addr); 280 spin_unlock_bh(&vport->mac_list_lock); 281 hclge_task_schedule(hdev, 0); 282 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { 283 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 284 HCLGE_MAC_ADDR_UC, mac_addr); 285 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { 286 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 287 HCLGE_MAC_ADDR_UC, mac_addr); 288 } else { 289 dev_err(&hdev->pdev->dev, 290 "failed to set unicast mac addr, unknown subcode %u\n", 291 mbx_req->msg.subcode); 292 return -EIO; 293 } 294 295 return status; 296 } 297 298 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, 299 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 300 { 301 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 302 struct hclge_dev *hdev = vport->back; 303 304 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { 305 hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 306 HCLGE_MAC_ADDR_MC, mac_addr); 307 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { 308 hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 309 HCLGE_MAC_ADDR_MC, mac_addr); 310 } else { 311 dev_err(&hdev->pdev->dev, 312 "failed to set mcast mac addr, unknown subcode %u\n", 313 mbx_req->msg.subcode); 314 return -EIO; 315 } 316 317 return 0; 318 } 319 320 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 321 u16 state, u16 vlan_tag, u16 qos, 322 u16 vlan_proto) 323 { 324 #define MSG_DATA_SIZE 8 325 326 u8 msg_data[MSG_DATA_SIZE]; 327 328 memcpy(&msg_data[0], &state, sizeof(u16)); 329 memcpy(&msg_data[2], &vlan_proto, sizeof(u16)); 330 memcpy(&msg_data[4], &qos, sizeof(u16)); 331 memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); 332 333 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 334 HCLGE_MBX_PUSH_VLAN_INFO, vfid); 335 } 336 337 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 338 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 339 struct hclge_respond_to_vf_msg *resp_msg) 340 { 341 #define HCLGE_MBX_VLAN_STATE_OFFSET 0 342 #define HCLGE_MBX_VLAN_INFO_OFFSET 2 343 344 struct hclge_vf_vlan_cfg *msg_cmd; 345 int status = 0; 346 347 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 348 if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) { 349 struct hnae3_handle *handle = &vport->nic; 350 u16 vlan, proto; 351 bool is_kill; 352 353 is_kill = !!msg_cmd->is_kill; 354 vlan = msg_cmd->vlan; 355 proto = msg_cmd->proto; 356 status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), 357 vlan, is_kill); 358 } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) { 359 struct hnae3_handle *handle = &vport->nic; 360 bool en = msg_cmd->is_kill ? true : false; 361 362 status = hclge_en_hw_strip_rxvtag(handle, en); 363 } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) { 364 struct hclge_vlan_info *vlan_info; 365 u16 *state; 366 367 state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET]; 368 vlan_info = (struct hclge_vlan_info *) 369 &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET]; 370 status = hclge_update_port_base_vlan_cfg(vport, *state, 371 vlan_info); 372 } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { 373 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); 374 /* vf does not need to know about the port based VLAN state 375 * on device HNAE3_DEVICE_VERSION_V3. So always return disable 376 * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port 377 * based VLAN state. 378 */ 379 resp_msg->data[0] = 380 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? 381 HNAE3_PORT_BASE_VLAN_DISABLE : 382 vport->port_base_vlan_cfg.state; 383 resp_msg->len = sizeof(u8); 384 } 385 386 return status; 387 } 388 389 static int hclge_set_vf_alive(struct hclge_vport *vport, 390 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 391 { 392 bool alive = !!mbx_req->msg.data[0]; 393 int ret = 0; 394 395 if (alive) 396 ret = hclge_vport_start(vport); 397 else 398 hclge_vport_stop(vport); 399 400 return ret; 401 } 402 403 static void hclge_get_vf_tcinfo(struct hclge_vport *vport, 404 struct hclge_respond_to_vf_msg *resp_msg) 405 { 406 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 407 unsigned int i; 408 409 for (i = 0; i < kinfo->tc_info.num_tc; i++) 410 resp_msg->data[0] |= BIT(i); 411 412 resp_msg->len = sizeof(u8); 413 } 414 415 static void hclge_get_vf_queue_info(struct hclge_vport *vport, 416 struct hclge_respond_to_vf_msg *resp_msg) 417 { 418 #define HCLGE_TQPS_RSS_INFO_LEN 6 419 #define HCLGE_TQPS_ALLOC_OFFSET 0 420 #define HCLGE_TQPS_RSS_SIZE_OFFSET 2 421 #define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 422 423 struct hclge_dev *hdev = vport->back; 424 425 /* get the queue related info */ 426 memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], 427 &vport->alloc_tqps, sizeof(u16)); 428 memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], 429 &vport->nic.kinfo.rss_size, sizeof(u16)); 430 memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], 431 &hdev->rx_buf_len, sizeof(u16)); 432 resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; 433 } 434 435 static void hclge_get_vf_mac_addr(struct hclge_vport *vport, 436 struct hclge_respond_to_vf_msg *resp_msg) 437 { 438 ether_addr_copy(resp_msg->data, vport->vf_info.mac); 439 resp_msg->len = ETH_ALEN; 440 } 441 442 static void hclge_get_vf_queue_depth(struct hclge_vport *vport, 443 struct hclge_respond_to_vf_msg *resp_msg) 444 { 445 #define HCLGE_TQPS_DEPTH_INFO_LEN 4 446 #define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 447 #define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 448 449 struct hclge_dev *hdev = vport->back; 450 451 /* get the queue depth info */ 452 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], 453 &hdev->num_tx_desc, sizeof(u16)); 454 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], 455 &hdev->num_rx_desc, sizeof(u16)); 456 resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; 457 } 458 459 static void hclge_get_vf_media_type(struct hclge_vport *vport, 460 struct hclge_respond_to_vf_msg *resp_msg) 461 { 462 #define HCLGE_VF_MEDIA_TYPE_OFFSET 0 463 #define HCLGE_VF_MODULE_TYPE_OFFSET 1 464 #define HCLGE_VF_MEDIA_TYPE_LENGTH 2 465 466 struct hclge_dev *hdev = vport->back; 467 468 resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = 469 hdev->hw.mac.media_type; 470 resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = 471 hdev->hw.mac.module_type; 472 resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; 473 } 474 475 int hclge_push_vf_link_status(struct hclge_vport *vport) 476 { 477 #define HCLGE_VF_LINK_STATE_UP 1U 478 #define HCLGE_VF_LINK_STATE_DOWN 0U 479 480 struct hclge_dev *hdev = vport->back; 481 u16 link_status; 482 u8 msg_data[9]; 483 u16 duplex; 484 485 /* mac.link can only be 0 or 1 */ 486 switch (vport->vf_info.link_state) { 487 case IFLA_VF_LINK_STATE_ENABLE: 488 link_status = HCLGE_VF_LINK_STATE_UP; 489 break; 490 case IFLA_VF_LINK_STATE_DISABLE: 491 link_status = HCLGE_VF_LINK_STATE_DOWN; 492 break; 493 case IFLA_VF_LINK_STATE_AUTO: 494 default: 495 link_status = (u16)hdev->hw.mac.link; 496 break; 497 } 498 499 duplex = hdev->hw.mac.duplex; 500 memcpy(&msg_data[0], &link_status, sizeof(u16)); 501 memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); 502 memcpy(&msg_data[6], &duplex, sizeof(u16)); 503 msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; 504 505 /* send this requested info to VF */ 506 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 507 HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); 508 } 509 510 static void hclge_get_link_mode(struct hclge_vport *vport, 511 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 512 { 513 #define HCLGE_SUPPORTED 1 514 struct hclge_dev *hdev = vport->back; 515 unsigned long advertising; 516 unsigned long supported; 517 unsigned long send_data; 518 u8 msg_data[10] = {}; 519 u8 dest_vfid; 520 521 advertising = hdev->hw.mac.advertising[0]; 522 supported = hdev->hw.mac.supported[0]; 523 dest_vfid = mbx_req->mbx_src_vfid; 524 msg_data[0] = mbx_req->msg.data[0]; 525 526 send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; 527 528 memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); 529 hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 530 HCLGE_MBX_LINK_STAT_MODE, dest_vfid); 531 } 532 533 static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, 534 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 535 struct hclge_respond_to_vf_msg *resp_msg) 536 { 537 #define HCLGE_RESET_ALL_QUEUE_DONE 1U 538 struct hnae3_handle *handle = &vport->nic; 539 struct hclge_dev *hdev = vport->back; 540 u16 queue_id; 541 int ret; 542 543 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 544 resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; 545 resp_msg->len = sizeof(u8); 546 547 /* pf will reset vf's all queues at a time. So it is unnecessary 548 * to reset queues if queue_id > 0, just return success. 549 */ 550 if (queue_id > 0) 551 return 0; 552 553 ret = hclge_reset_tqp(handle); 554 if (ret) 555 dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", 556 vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); 557 558 return ret; 559 } 560 561 static int hclge_reset_vf(struct hclge_vport *vport) 562 { 563 struct hclge_dev *hdev = vport->back; 564 565 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", 566 vport->vport_id); 567 568 return hclge_func_reset_cmd(hdev, vport->vport_id); 569 } 570 571 static void hclge_vf_keep_alive(struct hclge_vport *vport) 572 { 573 vport->last_active_jiffies = jiffies; 574 } 575 576 static int hclge_set_vf_mtu(struct hclge_vport *vport, 577 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 578 { 579 u32 mtu; 580 581 memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); 582 583 return hclge_set_vport_mtu(vport, mtu); 584 } 585 586 static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, 587 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 588 struct hclge_respond_to_vf_msg *resp_msg) 589 { 590 u16 queue_id, qid_in_pf; 591 592 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 593 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); 594 memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); 595 resp_msg->len = sizeof(qid_in_pf); 596 } 597 598 static void hclge_get_rss_key(struct hclge_vport *vport, 599 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 600 struct hclge_respond_to_vf_msg *resp_msg) 601 { 602 #define HCLGE_RSS_MBX_RESP_LEN 8 603 struct hclge_dev *hdev = vport->back; 604 u8 index; 605 606 index = mbx_req->msg.data[0]; 607 608 /* Check the query index of rss_hash_key from VF, make sure no 609 * more than the size of rss_hash_key. 610 */ 611 if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > 612 sizeof(vport[0].rss_hash_key)) { 613 dev_warn(&hdev->pdev->dev, 614 "failed to get the rss hash key, the index(%u) invalid !\n", 615 index); 616 return; 617 } 618 619 memcpy(resp_msg->data, 620 &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], 621 HCLGE_RSS_MBX_RESP_LEN); 622 resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; 623 } 624 625 static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) 626 { 627 switch (link_fail_code) { 628 case HCLGE_LF_REF_CLOCK_LOST: 629 dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); 630 break; 631 case HCLGE_LF_XSFP_TX_DISABLE: 632 dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); 633 break; 634 case HCLGE_LF_XSFP_ABSENT: 635 dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); 636 break; 637 default: 638 break; 639 } 640 } 641 642 static void hclge_handle_link_change_event(struct hclge_dev *hdev, 643 struct hclge_mbx_vf_to_pf_cmd *req) 644 { 645 hclge_task_schedule(hdev, 0); 646 647 if (!req->msg.subcode) 648 hclge_link_fail_parse(hdev, req->msg.data[0]); 649 } 650 651 static bool hclge_cmd_crq_empty(struct hclge_hw *hw) 652 { 653 u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); 654 655 return tail == hw->cmq.crq.next_to_use; 656 } 657 658 static void hclge_handle_ncsi_error(struct hclge_dev *hdev) 659 { 660 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 661 662 ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); 663 dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); 664 ae_dev->ops->reset_event(hdev->pdev, NULL); 665 } 666 667 static void hclge_handle_vf_tbl(struct hclge_vport *vport, 668 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 669 { 670 struct hclge_dev *hdev = vport->back; 671 struct hclge_vf_vlan_cfg *msg_cmd; 672 673 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 674 if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { 675 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); 676 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); 677 hclge_rm_vport_all_vlan_table(vport, true); 678 } else { 679 dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", 680 msg_cmd->subcode); 681 } 682 } 683 684 void hclge_mbx_handler(struct hclge_dev *hdev) 685 { 686 struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; 687 struct hclge_respond_to_vf_msg resp_msg; 688 struct hclge_mbx_vf_to_pf_cmd *req; 689 struct hclge_vport *vport; 690 struct hclge_desc *desc; 691 bool is_del = false; 692 unsigned int flag; 693 int ret = 0; 694 695 /* handle all the mailbox requests in the queue */ 696 while (!hclge_cmd_crq_empty(&hdev->hw)) { 697 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { 698 dev_warn(&hdev->pdev->dev, 699 "command queue needs re-initializing\n"); 700 return; 701 } 702 703 desc = &crq->desc[crq->next_to_use]; 704 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 705 706 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 707 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { 708 dev_warn(&hdev->pdev->dev, 709 "dropped invalid mailbox message, code = %u\n", 710 req->msg.code); 711 712 /* dropping/not processing this invalid message */ 713 crq->desc[crq->next_to_use].flag = 0; 714 hclge_mbx_ring_ptr_move_crq(crq); 715 continue; 716 } 717 718 vport = &hdev->vport[req->mbx_src_vfid]; 719 720 trace_hclge_pf_mbx_get(hdev, req); 721 722 /* clear the resp_msg before processing every mailbox message */ 723 memset(&resp_msg, 0, sizeof(resp_msg)); 724 725 switch (req->msg.code) { 726 case HCLGE_MBX_MAP_RING_TO_VECTOR: 727 ret = hclge_map_unmap_ring_to_vf_vector(vport, true, 728 req); 729 break; 730 case HCLGE_MBX_UNMAP_RING_TO_VECTOR: 731 ret = hclge_map_unmap_ring_to_vf_vector(vport, false, 732 req); 733 break; 734 case HCLGE_MBX_SET_PROMISC_MODE: 735 hclge_set_vf_promisc_mode(vport, req); 736 break; 737 case HCLGE_MBX_SET_UNICAST: 738 ret = hclge_set_vf_uc_mac_addr(vport, req); 739 if (ret) 740 dev_err(&hdev->pdev->dev, 741 "PF fail(%d) to set VF UC MAC Addr\n", 742 ret); 743 break; 744 case HCLGE_MBX_SET_MULTICAST: 745 ret = hclge_set_vf_mc_mac_addr(vport, req); 746 if (ret) 747 dev_err(&hdev->pdev->dev, 748 "PF fail(%d) to set VF MC MAC Addr\n", 749 ret); 750 break; 751 case HCLGE_MBX_SET_VLAN: 752 ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg); 753 if (ret) 754 dev_err(&hdev->pdev->dev, 755 "PF failed(%d) to config VF's VLAN\n", 756 ret); 757 break; 758 case HCLGE_MBX_SET_ALIVE: 759 ret = hclge_set_vf_alive(vport, req); 760 if (ret) 761 dev_err(&hdev->pdev->dev, 762 "PF failed(%d) to set VF's ALIVE\n", 763 ret); 764 break; 765 case HCLGE_MBX_GET_QINFO: 766 hclge_get_vf_queue_info(vport, &resp_msg); 767 break; 768 case HCLGE_MBX_GET_QDEPTH: 769 hclge_get_vf_queue_depth(vport, &resp_msg); 770 break; 771 case HCLGE_MBX_GET_TCINFO: 772 hclge_get_vf_tcinfo(vport, &resp_msg); 773 break; 774 case HCLGE_MBX_GET_LINK_STATUS: 775 ret = hclge_push_vf_link_status(vport); 776 if (ret) 777 dev_err(&hdev->pdev->dev, 778 "failed to inform link stat to VF, ret = %d\n", 779 ret); 780 break; 781 case HCLGE_MBX_QUEUE_RESET: 782 ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg); 783 break; 784 case HCLGE_MBX_RESET: 785 ret = hclge_reset_vf(vport); 786 break; 787 case HCLGE_MBX_KEEP_ALIVE: 788 hclge_vf_keep_alive(vport); 789 break; 790 case HCLGE_MBX_SET_MTU: 791 ret = hclge_set_vf_mtu(vport, req); 792 if (ret) 793 dev_err(&hdev->pdev->dev, 794 "VF fail(%d) to set mtu\n", ret); 795 break; 796 case HCLGE_MBX_GET_QID_IN_PF: 797 hclge_get_queue_id_in_pf(vport, req, &resp_msg); 798 break; 799 case HCLGE_MBX_GET_RSS_KEY: 800 hclge_get_rss_key(vport, req, &resp_msg); 801 break; 802 case HCLGE_MBX_GET_LINK_MODE: 803 hclge_get_link_mode(vport, req); 804 break; 805 case HCLGE_MBX_GET_VF_FLR_STATUS: 806 case HCLGE_MBX_VF_UNINIT: 807 is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; 808 hclge_rm_vport_all_mac_table(vport, is_del, 809 HCLGE_MAC_ADDR_UC); 810 hclge_rm_vport_all_mac_table(vport, is_del, 811 HCLGE_MAC_ADDR_MC); 812 hclge_rm_vport_all_vlan_table(vport, is_del); 813 break; 814 case HCLGE_MBX_GET_MEDIA_TYPE: 815 hclge_get_vf_media_type(vport, &resp_msg); 816 break; 817 case HCLGE_MBX_PUSH_LINK_STATUS: 818 hclge_handle_link_change_event(hdev, req); 819 break; 820 case HCLGE_MBX_GET_MAC_ADDR: 821 hclge_get_vf_mac_addr(vport, &resp_msg); 822 break; 823 case HCLGE_MBX_NCSI_ERROR: 824 hclge_handle_ncsi_error(hdev); 825 break; 826 case HCLGE_MBX_HANDLE_VF_TBL: 827 hclge_handle_vf_tbl(vport, req); 828 break; 829 default: 830 dev_err(&hdev->pdev->dev, 831 "un-supported mailbox message, code = %u\n", 832 req->msg.code); 833 break; 834 } 835 836 /* PF driver should not reply IMP */ 837 if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && 838 req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { 839 resp_msg.status = ret; 840 hclge_gen_resp_to_vf(vport, req, &resp_msg); 841 } 842 843 crq->desc[crq->next_to_use].flag = 0; 844 hclge_mbx_ring_ptr_move_crq(crq); 845 846 /* reinitialize ret after complete the mbx message processing */ 847 ret = 0; 848 } 849 850 /* Write back CMDQ_RQ header pointer, M7 need this pointer */ 851 hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); 852 } 853