1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_mbx.h" 6 #include "hnae3.h" 7 8 #define CREATE_TRACE_POINTS 9 #include "hclge_trace.h" 10 11 static u16 hclge_errno_to_resp(int errno) 12 { 13 return abs(errno); 14 } 15 16 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF 17 * receives a mailbox message from VF. 18 * @vport: pointer to struct hclge_vport 19 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox 20 * message 21 * @resp_status: indicate to VF whether its request success(0) or failed. 22 */ 23 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, 24 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, 25 struct hclge_respond_to_vf_msg *resp_msg) 26 { 27 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 28 struct hclge_dev *hdev = vport->back; 29 enum hclge_cmd_status status; 30 struct hclge_desc desc; 31 u16 resp; 32 33 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 34 35 if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { 36 dev_err(&hdev->pdev->dev, 37 "PF fail to gen resp to VF len %u exceeds max len %u\n", 38 resp_msg->len, 39 HCLGE_MBX_MAX_RESP_DATA_SIZE); 40 /* If resp_msg->len is too long, set the value to max length 41 * and return the msg to VF 42 */ 43 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 44 } 45 46 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 47 48 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; 49 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; 50 resp_pf_to_vf->match_id = vf_to_pf_req->match_id; 51 52 resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; 53 resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; 54 resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; 55 resp = hclge_errno_to_resp(resp_msg->status); 56 if (resp < SHRT_MAX) { 57 resp_pf_to_vf->msg.resp_status = resp; 58 } else { 59 dev_warn(&hdev->pdev->dev, 60 "failed to send response to VF, response status %u is out-of-bound\n", 61 resp); 62 resp_pf_to_vf->msg.resp_status = EIO; 63 } 64 65 if (resp_msg->len > 0) 66 memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, 67 resp_msg->len); 68 69 trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); 70 71 status = hclge_cmd_send(&hdev->hw, &desc, 1); 72 if (status) 73 dev_err(&hdev->pdev->dev, 74 "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", 75 status, vf_to_pf_req->mbx_src_vfid, 76 vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); 77 78 return status; 79 } 80 81 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, 82 u16 mbx_opcode, u8 dest_vfid) 83 { 84 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 85 struct hclge_dev *hdev = vport->back; 86 enum hclge_cmd_status status; 87 struct hclge_desc desc; 88 89 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 90 91 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 92 93 resp_pf_to_vf->dest_vfid = dest_vfid; 94 resp_pf_to_vf->msg_len = msg_len; 95 resp_pf_to_vf->msg.code = mbx_opcode; 96 97 memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); 98 99 trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); 100 101 status = hclge_cmd_send(&hdev->hw, &desc, 1); 102 if (status) 103 dev_err(&hdev->pdev->dev, 104 "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", 105 status, dest_vfid, mbx_opcode); 106 107 return status; 108 } 109 110 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) 111 { 112 struct hclge_dev *hdev = vport->back; 113 u16 reset_type; 114 u8 msg_data[2]; 115 u8 dest_vfid; 116 117 BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); 118 119 dest_vfid = (u8)vport->vport_id; 120 121 if (hdev->reset_type == HNAE3_FUNC_RESET) 122 reset_type = HNAE3_VF_PF_FUNC_RESET; 123 else if (hdev->reset_type == HNAE3_FLR_RESET) 124 reset_type = HNAE3_VF_FULL_RESET; 125 else 126 reset_type = HNAE3_VF_FUNC_RESET; 127 128 memcpy(&msg_data[0], &reset_type, sizeof(u16)); 129 130 /* send this requested info to VF */ 131 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 132 HCLGE_MBX_ASSERTING_RESET, dest_vfid); 133 } 134 135 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) 136 { 137 struct hnae3_ring_chain_node *chain_tmp, *chain; 138 139 chain = head->next; 140 141 while (chain) { 142 chain_tmp = chain->next; 143 kfree_sensitive(chain); 144 chain = chain_tmp; 145 } 146 } 147 148 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx 149 * from mailbox message 150 * msg[0]: opcode 151 * msg[1]: <not relevant to this function> 152 * msg[2]: ring_num 153 * msg[3]: first ring type (TX|RX) 154 * msg[4]: first tqp id 155 * msg[5]: first int_gl idx 156 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx 157 */ 158 static int hclge_get_ring_chain_from_mbx( 159 struct hclge_mbx_vf_to_pf_cmd *req, 160 struct hnae3_ring_chain_node *ring_chain, 161 struct hclge_vport *vport) 162 { 163 struct hnae3_ring_chain_node *cur_chain, *new_chain; 164 struct hclge_dev *hdev = vport->back; 165 int ring_num; 166 int i; 167 168 ring_num = req->msg.ring_num; 169 170 if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) 171 return -ENOMEM; 172 173 for (i = 0; i < ring_num; i++) { 174 if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { 175 dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", 176 req->msg.param[i].tqp_index, 177 vport->nic.kinfo.rss_size - 1); 178 return -EINVAL; 179 } 180 } 181 182 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, 183 req->msg.param[0].ring_type); 184 ring_chain->tqp_index = 185 hclge_get_queue_id(vport->nic.kinfo.tqp 186 [req->msg.param[0].tqp_index]); 187 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 188 HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); 189 190 cur_chain = ring_chain; 191 192 for (i = 1; i < ring_num; i++) { 193 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); 194 if (!new_chain) 195 goto err; 196 197 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, 198 req->msg.param[i].ring_type); 199 200 new_chain->tqp_index = 201 hclge_get_queue_id(vport->nic.kinfo.tqp 202 [req->msg.param[i].tqp_index]); 203 204 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 205 HNAE3_RING_GL_IDX_S, 206 req->msg.param[i].int_gl_index); 207 208 cur_chain->next = new_chain; 209 cur_chain = new_chain; 210 } 211 212 return 0; 213 err: 214 hclge_free_vector_ring_chain(ring_chain); 215 return -ENOMEM; 216 } 217 218 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, 219 struct hclge_mbx_vf_to_pf_cmd *req) 220 { 221 struct hnae3_ring_chain_node ring_chain; 222 int vector_id = req->msg.vector_id; 223 int ret; 224 225 memset(&ring_chain, 0, sizeof(ring_chain)); 226 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 227 if (ret) 228 return ret; 229 230 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); 231 232 hclge_free_vector_ring_chain(&ring_chain); 233 234 return ret; 235 } 236 237 static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, 238 struct hclge_mbx_vf_to_pf_cmd *req) 239 { 240 struct hnae3_handle *handle = &vport->nic; 241 struct hclge_dev *hdev = vport->back; 242 243 vport->vf_info.request_uc_en = req->msg.en_uc; 244 vport->vf_info.request_mc_en = req->msg.en_mc; 245 vport->vf_info.request_bc_en = req->msg.en_bc; 246 247 if (req->msg.en_limit_promisc) 248 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); 249 else 250 clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, 251 &handle->priv_flags); 252 253 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 254 hclge_task_schedule(hdev, 0); 255 } 256 257 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, 258 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 259 { 260 #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 261 262 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 263 struct hclge_dev *hdev = vport->back; 264 int status; 265 266 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { 267 const u8 *old_addr = (const u8 *) 268 (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); 269 270 /* If VF MAC has been configured by the host then it 271 * cannot be overridden by the MAC specified by the VM. 272 */ 273 if (!is_zero_ether_addr(vport->vf_info.mac) && 274 !ether_addr_equal(mac_addr, vport->vf_info.mac)) 275 return -EPERM; 276 277 if (!is_valid_ether_addr(mac_addr)) 278 return -EINVAL; 279 280 spin_lock_bh(&vport->mac_list_lock); 281 status = hclge_update_mac_node_for_dev_addr(vport, old_addr, 282 mac_addr); 283 spin_unlock_bh(&vport->mac_list_lock); 284 hclge_task_schedule(hdev, 0); 285 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { 286 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 287 HCLGE_MAC_ADDR_UC, mac_addr); 288 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { 289 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 290 HCLGE_MAC_ADDR_UC, mac_addr); 291 } else { 292 dev_err(&hdev->pdev->dev, 293 "failed to set unicast mac addr, unknown subcode %u\n", 294 mbx_req->msg.subcode); 295 return -EIO; 296 } 297 298 return status; 299 } 300 301 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, 302 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 303 { 304 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 305 struct hclge_dev *hdev = vport->back; 306 307 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { 308 hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 309 HCLGE_MAC_ADDR_MC, mac_addr); 310 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { 311 hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 312 HCLGE_MAC_ADDR_MC, mac_addr); 313 } else { 314 dev_err(&hdev->pdev->dev, 315 "failed to set mcast mac addr, unknown subcode %u\n", 316 mbx_req->msg.subcode); 317 return -EIO; 318 } 319 320 return 0; 321 } 322 323 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 324 u16 state, 325 struct hclge_vlan_info *vlan_info) 326 { 327 #define MSG_DATA_SIZE 8 328 329 u8 msg_data[MSG_DATA_SIZE]; 330 331 memcpy(&msg_data[0], &state, sizeof(u16)); 332 memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); 333 memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); 334 memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); 335 336 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 337 HCLGE_MBX_PUSH_VLAN_INFO, vfid); 338 } 339 340 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 341 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 342 struct hclge_respond_to_vf_msg *resp_msg) 343 { 344 #define HCLGE_MBX_VLAN_STATE_OFFSET 0 345 #define HCLGE_MBX_VLAN_INFO_OFFSET 2 346 347 struct hnae3_handle *handle = &vport->nic; 348 struct hclge_dev *hdev = vport->back; 349 struct hclge_vf_vlan_cfg *msg_cmd; 350 351 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 352 switch (msg_cmd->subcode) { 353 case HCLGE_MBX_VLAN_FILTER: 354 return hclge_set_vlan_filter(handle, 355 cpu_to_be16(msg_cmd->proto), 356 msg_cmd->vlan, msg_cmd->is_kill); 357 case HCLGE_MBX_VLAN_RX_OFF_CFG: 358 return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); 359 case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: 360 /* vf does not need to know about the port based VLAN state 361 * on device HNAE3_DEVICE_VERSION_V3. So always return disable 362 * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port 363 * based VLAN state. 364 */ 365 resp_msg->data[0] = 366 hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? 367 HNAE3_PORT_BASE_VLAN_DISABLE : 368 vport->port_base_vlan_cfg.state; 369 resp_msg->len = sizeof(u8); 370 return 0; 371 case HCLGE_MBX_ENABLE_VLAN_FILTER: 372 return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); 373 default: 374 return 0; 375 } 376 } 377 378 static int hclge_set_vf_alive(struct hclge_vport *vport, 379 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 380 { 381 bool alive = !!mbx_req->msg.data[0]; 382 int ret = 0; 383 384 if (alive) 385 ret = hclge_vport_start(vport); 386 else 387 hclge_vport_stop(vport); 388 389 return ret; 390 } 391 392 static void hclge_get_basic_info(struct hclge_vport *vport, 393 struct hclge_respond_to_vf_msg *resp_msg) 394 { 395 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 396 struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; 397 struct hclge_basic_info *basic_info; 398 unsigned int i; 399 400 basic_info = (struct hclge_basic_info *)resp_msg->data; 401 for (i = 0; i < kinfo->tc_info.num_tc; i++) 402 basic_info->hw_tc_map |= BIT(i); 403 404 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 405 hnae3_set_bit(basic_info->pf_caps, 406 HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); 407 408 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 409 } 410 411 static void hclge_get_vf_queue_info(struct hclge_vport *vport, 412 struct hclge_respond_to_vf_msg *resp_msg) 413 { 414 #define HCLGE_TQPS_RSS_INFO_LEN 6 415 #define HCLGE_TQPS_ALLOC_OFFSET 0 416 #define HCLGE_TQPS_RSS_SIZE_OFFSET 2 417 #define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 418 419 struct hclge_dev *hdev = vport->back; 420 421 /* get the queue related info */ 422 memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], 423 &vport->alloc_tqps, sizeof(u16)); 424 memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], 425 &vport->nic.kinfo.rss_size, sizeof(u16)); 426 memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], 427 &hdev->rx_buf_len, sizeof(u16)); 428 resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; 429 } 430 431 static void hclge_get_vf_mac_addr(struct hclge_vport *vport, 432 struct hclge_respond_to_vf_msg *resp_msg) 433 { 434 ether_addr_copy(resp_msg->data, vport->vf_info.mac); 435 resp_msg->len = ETH_ALEN; 436 } 437 438 static void hclge_get_vf_queue_depth(struct hclge_vport *vport, 439 struct hclge_respond_to_vf_msg *resp_msg) 440 { 441 #define HCLGE_TQPS_DEPTH_INFO_LEN 4 442 #define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 443 #define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 444 445 struct hclge_dev *hdev = vport->back; 446 447 /* get the queue depth info */ 448 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], 449 &hdev->num_tx_desc, sizeof(u16)); 450 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], 451 &hdev->num_rx_desc, sizeof(u16)); 452 resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; 453 } 454 455 static void hclge_get_vf_media_type(struct hclge_vport *vport, 456 struct hclge_respond_to_vf_msg *resp_msg) 457 { 458 #define HCLGE_VF_MEDIA_TYPE_OFFSET 0 459 #define HCLGE_VF_MODULE_TYPE_OFFSET 1 460 #define HCLGE_VF_MEDIA_TYPE_LENGTH 2 461 462 struct hclge_dev *hdev = vport->back; 463 464 resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = 465 hdev->hw.mac.media_type; 466 resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = 467 hdev->hw.mac.module_type; 468 resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; 469 } 470 471 int hclge_push_vf_link_status(struct hclge_vport *vport) 472 { 473 #define HCLGE_VF_LINK_STATE_UP 1U 474 #define HCLGE_VF_LINK_STATE_DOWN 0U 475 476 struct hclge_dev *hdev = vport->back; 477 u16 link_status; 478 u8 msg_data[9]; 479 u16 duplex; 480 481 /* mac.link can only be 0 or 1 */ 482 switch (vport->vf_info.link_state) { 483 case IFLA_VF_LINK_STATE_ENABLE: 484 link_status = HCLGE_VF_LINK_STATE_UP; 485 break; 486 case IFLA_VF_LINK_STATE_DISABLE: 487 link_status = HCLGE_VF_LINK_STATE_DOWN; 488 break; 489 case IFLA_VF_LINK_STATE_AUTO: 490 default: 491 link_status = (u16)hdev->hw.mac.link; 492 break; 493 } 494 495 duplex = hdev->hw.mac.duplex; 496 memcpy(&msg_data[0], &link_status, sizeof(u16)); 497 memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); 498 memcpy(&msg_data[6], &duplex, sizeof(u16)); 499 msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; 500 501 /* send this requested info to VF */ 502 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 503 HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); 504 } 505 506 static void hclge_get_link_mode(struct hclge_vport *vport, 507 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 508 { 509 #define HCLGE_SUPPORTED 1 510 struct hclge_dev *hdev = vport->back; 511 unsigned long advertising; 512 unsigned long supported; 513 unsigned long send_data; 514 u8 msg_data[10] = {}; 515 u8 dest_vfid; 516 517 advertising = hdev->hw.mac.advertising[0]; 518 supported = hdev->hw.mac.supported[0]; 519 dest_vfid = mbx_req->mbx_src_vfid; 520 msg_data[0] = mbx_req->msg.data[0]; 521 522 send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; 523 524 memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); 525 hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 526 HCLGE_MBX_LINK_STAT_MODE, dest_vfid); 527 } 528 529 static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, 530 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 531 struct hclge_respond_to_vf_msg *resp_msg) 532 { 533 #define HCLGE_RESET_ALL_QUEUE_DONE 1U 534 struct hnae3_handle *handle = &vport->nic; 535 struct hclge_dev *hdev = vport->back; 536 u16 queue_id; 537 int ret; 538 539 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 540 resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; 541 resp_msg->len = sizeof(u8); 542 543 /* pf will reset vf's all queues at a time. So it is unnecessary 544 * to reset queues if queue_id > 0, just return success. 545 */ 546 if (queue_id > 0) 547 return 0; 548 549 ret = hclge_reset_tqp(handle); 550 if (ret) 551 dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", 552 vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); 553 554 return ret; 555 } 556 557 static int hclge_reset_vf(struct hclge_vport *vport) 558 { 559 struct hclge_dev *hdev = vport->back; 560 561 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", 562 vport->vport_id); 563 564 return hclge_func_reset_cmd(hdev, vport->vport_id); 565 } 566 567 static void hclge_vf_keep_alive(struct hclge_vport *vport) 568 { 569 vport->last_active_jiffies = jiffies; 570 } 571 572 static int hclge_set_vf_mtu(struct hclge_vport *vport, 573 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 574 { 575 u32 mtu; 576 577 memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); 578 579 return hclge_set_vport_mtu(vport, mtu); 580 } 581 582 static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, 583 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 584 struct hclge_respond_to_vf_msg *resp_msg) 585 { 586 u16 queue_id, qid_in_pf; 587 588 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 589 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); 590 memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); 591 resp_msg->len = sizeof(qid_in_pf); 592 } 593 594 static void hclge_get_rss_key(struct hclge_vport *vport, 595 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 596 struct hclge_respond_to_vf_msg *resp_msg) 597 { 598 #define HCLGE_RSS_MBX_RESP_LEN 8 599 struct hclge_dev *hdev = vport->back; 600 u8 index; 601 602 index = mbx_req->msg.data[0]; 603 604 /* Check the query index of rss_hash_key from VF, make sure no 605 * more than the size of rss_hash_key. 606 */ 607 if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > 608 sizeof(vport[0].rss_hash_key)) { 609 dev_warn(&hdev->pdev->dev, 610 "failed to get the rss hash key, the index(%u) invalid !\n", 611 index); 612 return; 613 } 614 615 memcpy(resp_msg->data, 616 &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], 617 HCLGE_RSS_MBX_RESP_LEN); 618 resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; 619 } 620 621 static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) 622 { 623 switch (link_fail_code) { 624 case HCLGE_LF_REF_CLOCK_LOST: 625 dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); 626 break; 627 case HCLGE_LF_XSFP_TX_DISABLE: 628 dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); 629 break; 630 case HCLGE_LF_XSFP_ABSENT: 631 dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); 632 break; 633 default: 634 break; 635 } 636 } 637 638 static void hclge_handle_link_change_event(struct hclge_dev *hdev, 639 struct hclge_mbx_vf_to_pf_cmd *req) 640 { 641 hclge_task_schedule(hdev, 0); 642 643 if (!req->msg.subcode) 644 hclge_link_fail_parse(hdev, req->msg.data[0]); 645 } 646 647 static bool hclge_cmd_crq_empty(struct hclge_hw *hw) 648 { 649 u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); 650 651 return tail == hw->cmq.crq.next_to_use; 652 } 653 654 static void hclge_handle_ncsi_error(struct hclge_dev *hdev) 655 { 656 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 657 658 ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); 659 dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); 660 ae_dev->ops->reset_event(hdev->pdev, NULL); 661 } 662 663 static void hclge_handle_vf_tbl(struct hclge_vport *vport, 664 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 665 { 666 struct hclge_dev *hdev = vport->back; 667 struct hclge_vf_vlan_cfg *msg_cmd; 668 669 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 670 if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { 671 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); 672 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); 673 hclge_rm_vport_all_vlan_table(vport, true); 674 } else { 675 dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", 676 msg_cmd->subcode); 677 } 678 } 679 680 void hclge_mbx_handler(struct hclge_dev *hdev) 681 { 682 struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; 683 struct hclge_respond_to_vf_msg resp_msg; 684 struct hclge_mbx_vf_to_pf_cmd *req; 685 struct hclge_vport *vport; 686 struct hclge_desc *desc; 687 bool is_del = false; 688 unsigned int flag; 689 int ret = 0; 690 691 /* handle all the mailbox requests in the queue */ 692 while (!hclge_cmd_crq_empty(&hdev->hw)) { 693 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { 694 dev_warn(&hdev->pdev->dev, 695 "command queue needs re-initializing\n"); 696 return; 697 } 698 699 desc = &crq->desc[crq->next_to_use]; 700 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 701 702 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 703 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { 704 dev_warn(&hdev->pdev->dev, 705 "dropped invalid mailbox message, code = %u\n", 706 req->msg.code); 707 708 /* dropping/not processing this invalid message */ 709 crq->desc[crq->next_to_use].flag = 0; 710 hclge_mbx_ring_ptr_move_crq(crq); 711 continue; 712 } 713 714 vport = &hdev->vport[req->mbx_src_vfid]; 715 716 trace_hclge_pf_mbx_get(hdev, req); 717 718 /* clear the resp_msg before processing every mailbox message */ 719 memset(&resp_msg, 0, sizeof(resp_msg)); 720 721 switch (req->msg.code) { 722 case HCLGE_MBX_MAP_RING_TO_VECTOR: 723 ret = hclge_map_unmap_ring_to_vf_vector(vport, true, 724 req); 725 break; 726 case HCLGE_MBX_UNMAP_RING_TO_VECTOR: 727 ret = hclge_map_unmap_ring_to_vf_vector(vport, false, 728 req); 729 break; 730 case HCLGE_MBX_SET_PROMISC_MODE: 731 hclge_set_vf_promisc_mode(vport, req); 732 break; 733 case HCLGE_MBX_SET_UNICAST: 734 ret = hclge_set_vf_uc_mac_addr(vport, req); 735 if (ret) 736 dev_err(&hdev->pdev->dev, 737 "PF fail(%d) to set VF UC MAC Addr\n", 738 ret); 739 break; 740 case HCLGE_MBX_SET_MULTICAST: 741 ret = hclge_set_vf_mc_mac_addr(vport, req); 742 if (ret) 743 dev_err(&hdev->pdev->dev, 744 "PF fail(%d) to set VF MC MAC Addr\n", 745 ret); 746 break; 747 case HCLGE_MBX_SET_VLAN: 748 ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg); 749 if (ret) 750 dev_err(&hdev->pdev->dev, 751 "PF failed(%d) to config VF's VLAN\n", 752 ret); 753 break; 754 case HCLGE_MBX_SET_ALIVE: 755 ret = hclge_set_vf_alive(vport, req); 756 if (ret) 757 dev_err(&hdev->pdev->dev, 758 "PF failed(%d) to set VF's ALIVE\n", 759 ret); 760 break; 761 case HCLGE_MBX_GET_QINFO: 762 hclge_get_vf_queue_info(vport, &resp_msg); 763 break; 764 case HCLGE_MBX_GET_QDEPTH: 765 hclge_get_vf_queue_depth(vport, &resp_msg); 766 break; 767 case HCLGE_MBX_GET_BASIC_INFO: 768 hclge_get_basic_info(vport, &resp_msg); 769 break; 770 case HCLGE_MBX_GET_LINK_STATUS: 771 ret = hclge_push_vf_link_status(vport); 772 if (ret) 773 dev_err(&hdev->pdev->dev, 774 "failed to inform link stat to VF, ret = %d\n", 775 ret); 776 break; 777 case HCLGE_MBX_QUEUE_RESET: 778 ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg); 779 break; 780 case HCLGE_MBX_RESET: 781 ret = hclge_reset_vf(vport); 782 break; 783 case HCLGE_MBX_KEEP_ALIVE: 784 hclge_vf_keep_alive(vport); 785 break; 786 case HCLGE_MBX_SET_MTU: 787 ret = hclge_set_vf_mtu(vport, req); 788 if (ret) 789 dev_err(&hdev->pdev->dev, 790 "VF fail(%d) to set mtu\n", ret); 791 break; 792 case HCLGE_MBX_GET_QID_IN_PF: 793 hclge_get_queue_id_in_pf(vport, req, &resp_msg); 794 break; 795 case HCLGE_MBX_GET_RSS_KEY: 796 hclge_get_rss_key(vport, req, &resp_msg); 797 break; 798 case HCLGE_MBX_GET_LINK_MODE: 799 hclge_get_link_mode(vport, req); 800 break; 801 case HCLGE_MBX_GET_VF_FLR_STATUS: 802 case HCLGE_MBX_VF_UNINIT: 803 is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; 804 hclge_rm_vport_all_mac_table(vport, is_del, 805 HCLGE_MAC_ADDR_UC); 806 hclge_rm_vport_all_mac_table(vport, is_del, 807 HCLGE_MAC_ADDR_MC); 808 hclge_rm_vport_all_vlan_table(vport, is_del); 809 break; 810 case HCLGE_MBX_GET_MEDIA_TYPE: 811 hclge_get_vf_media_type(vport, &resp_msg); 812 break; 813 case HCLGE_MBX_PUSH_LINK_STATUS: 814 hclge_handle_link_change_event(hdev, req); 815 break; 816 case HCLGE_MBX_GET_MAC_ADDR: 817 hclge_get_vf_mac_addr(vport, &resp_msg); 818 break; 819 case HCLGE_MBX_NCSI_ERROR: 820 hclge_handle_ncsi_error(hdev); 821 break; 822 case HCLGE_MBX_HANDLE_VF_TBL: 823 hclge_handle_vf_tbl(vport, req); 824 break; 825 default: 826 dev_err(&hdev->pdev->dev, 827 "un-supported mailbox message, code = %u\n", 828 req->msg.code); 829 break; 830 } 831 832 /* PF driver should not reply IMP */ 833 if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && 834 req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { 835 resp_msg.status = ret; 836 hclge_gen_resp_to_vf(vport, req, &resp_msg); 837 } 838 839 crq->desc[crq->next_to_use].flag = 0; 840 hclge_mbx_ring_ptr_move_crq(crq); 841 842 /* reinitialize ret after complete the mbx message processing */ 843 ret = 0; 844 } 845 846 /* Write back CMDQ_RQ header pointer, M7 need this pointer */ 847 hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); 848 } 849