1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_mbx.h" 6 #include "hnae3.h" 7 8 #define CREATE_TRACE_POINTS 9 #include "hclge_trace.h" 10 11 static u16 hclge_errno_to_resp(int errno) 12 { 13 return abs(errno); 14 } 15 16 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF 17 * receives a mailbox message from VF. 18 * @vport: pointer to struct hclge_vport 19 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox 20 * message 21 * @resp_status: indicate to VF whether its request success(0) or failed. 22 */ 23 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, 24 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, 25 struct hclge_respond_to_vf_msg *resp_msg) 26 { 27 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 28 struct hclge_dev *hdev = vport->back; 29 enum hclge_cmd_status status; 30 struct hclge_desc desc; 31 u16 resp; 32 33 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 34 35 if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { 36 dev_err(&hdev->pdev->dev, 37 "PF fail to gen resp to VF len %u exceeds max len %u\n", 38 resp_msg->len, 39 HCLGE_MBX_MAX_RESP_DATA_SIZE); 40 /* If resp_msg->len is too long, set the value to max length 41 * and return the msg to VF 42 */ 43 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 44 } 45 46 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 47 48 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; 49 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; 50 resp_pf_to_vf->match_id = vf_to_pf_req->match_id; 51 52 resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; 53 resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; 54 resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; 55 resp = hclge_errno_to_resp(resp_msg->status); 56 if (resp < SHRT_MAX) { 57 resp_pf_to_vf->msg.resp_status = resp; 58 } else { 59 dev_warn(&hdev->pdev->dev, 60 "failed to send response to VF, response status %u is out-of-bound\n", 61 resp); 62 resp_pf_to_vf->msg.resp_status = EIO; 63 } 64 65 if (resp_msg->len > 0) 66 memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, 67 resp_msg->len); 68 69 status = hclge_cmd_send(&hdev->hw, &desc, 1); 70 if (status) 71 dev_err(&hdev->pdev->dev, 72 "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", 73 status, vf_to_pf_req->mbx_src_vfid, 74 vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); 75 76 return status; 77 } 78 79 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, 80 u16 mbx_opcode, u8 dest_vfid) 81 { 82 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 83 struct hclge_dev *hdev = vport->back; 84 enum hclge_cmd_status status; 85 struct hclge_desc desc; 86 87 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 88 89 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 90 91 resp_pf_to_vf->dest_vfid = dest_vfid; 92 resp_pf_to_vf->msg_len = msg_len; 93 resp_pf_to_vf->msg.code = mbx_opcode; 94 95 memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); 96 97 trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); 98 99 status = hclge_cmd_send(&hdev->hw, &desc, 1); 100 if (status) 101 dev_err(&hdev->pdev->dev, 102 "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", 103 status, dest_vfid, mbx_opcode); 104 105 return status; 106 } 107 108 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) 109 { 110 struct hclge_dev *hdev = vport->back; 111 u16 reset_type; 112 u8 msg_data[2]; 113 u8 dest_vfid; 114 115 BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); 116 117 dest_vfid = (u8)vport->vport_id; 118 119 if (hdev->reset_type == HNAE3_FUNC_RESET) 120 reset_type = HNAE3_VF_PF_FUNC_RESET; 121 else if (hdev->reset_type == HNAE3_FLR_RESET) 122 reset_type = HNAE3_VF_FULL_RESET; 123 else 124 reset_type = HNAE3_VF_FUNC_RESET; 125 126 memcpy(&msg_data[0], &reset_type, sizeof(u16)); 127 128 /* send this requested info to VF */ 129 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 130 HCLGE_MBX_ASSERTING_RESET, dest_vfid); 131 } 132 133 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) 134 { 135 struct hnae3_ring_chain_node *chain_tmp, *chain; 136 137 chain = head->next; 138 139 while (chain) { 140 chain_tmp = chain->next; 141 kfree_sensitive(chain); 142 chain = chain_tmp; 143 } 144 } 145 146 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx 147 * from mailbox message 148 * msg[0]: opcode 149 * msg[1]: <not relevant to this function> 150 * msg[2]: ring_num 151 * msg[3]: first ring type (TX|RX) 152 * msg[4]: first tqp id 153 * msg[5]: first int_gl idx 154 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx 155 */ 156 static int hclge_get_ring_chain_from_mbx( 157 struct hclge_mbx_vf_to_pf_cmd *req, 158 struct hnae3_ring_chain_node *ring_chain, 159 struct hclge_vport *vport) 160 { 161 struct hnae3_ring_chain_node *cur_chain, *new_chain; 162 struct hclge_dev *hdev = vport->back; 163 int ring_num; 164 int i; 165 166 ring_num = req->msg.ring_num; 167 168 if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) 169 return -ENOMEM; 170 171 for (i = 0; i < ring_num; i++) { 172 if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { 173 dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", 174 req->msg.param[i].tqp_index, 175 vport->nic.kinfo.rss_size - 1); 176 return -EINVAL; 177 } 178 } 179 180 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, 181 req->msg.param[0].ring_type); 182 ring_chain->tqp_index = 183 hclge_get_queue_id(vport->nic.kinfo.tqp 184 [req->msg.param[0].tqp_index]); 185 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 186 HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); 187 188 cur_chain = ring_chain; 189 190 for (i = 1; i < ring_num; i++) { 191 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); 192 if (!new_chain) 193 goto err; 194 195 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, 196 req->msg.param[i].ring_type); 197 198 new_chain->tqp_index = 199 hclge_get_queue_id(vport->nic.kinfo.tqp 200 [req->msg.param[i].tqp_index]); 201 202 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 203 HNAE3_RING_GL_IDX_S, 204 req->msg.param[i].int_gl_index); 205 206 cur_chain->next = new_chain; 207 cur_chain = new_chain; 208 } 209 210 return 0; 211 err: 212 hclge_free_vector_ring_chain(ring_chain); 213 return -ENOMEM; 214 } 215 216 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, 217 struct hclge_mbx_vf_to_pf_cmd *req) 218 { 219 struct hnae3_ring_chain_node ring_chain; 220 int vector_id = req->msg.vector_id; 221 int ret; 222 223 memset(&ring_chain, 0, sizeof(ring_chain)); 224 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 225 if (ret) 226 return ret; 227 228 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); 229 230 hclge_free_vector_ring_chain(&ring_chain); 231 232 return ret; 233 } 234 235 static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, 236 struct hclge_mbx_vf_to_pf_cmd *req) 237 { 238 struct hnae3_handle *handle = &vport->nic; 239 struct hclge_dev *hdev = vport->back; 240 241 vport->vf_info.request_uc_en = req->msg.en_uc; 242 vport->vf_info.request_mc_en = req->msg.en_mc; 243 vport->vf_info.request_bc_en = req->msg.en_bc; 244 245 if (req->msg.en_limit_promisc) 246 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); 247 else 248 clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, 249 &handle->priv_flags); 250 251 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 252 hclge_task_schedule(hdev, 0); 253 } 254 255 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, 256 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 257 { 258 #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 259 260 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 261 struct hclge_dev *hdev = vport->back; 262 int status; 263 264 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { 265 const u8 *old_addr = (const u8 *) 266 (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); 267 268 /* If VF MAC has been configured by the host then it 269 * cannot be overridden by the MAC specified by the VM. 270 */ 271 if (!is_zero_ether_addr(vport->vf_info.mac) && 272 !ether_addr_equal(mac_addr, vport->vf_info.mac)) 273 return -EPERM; 274 275 if (!is_valid_ether_addr(mac_addr)) 276 return -EINVAL; 277 278 spin_lock_bh(&vport->mac_list_lock); 279 status = hclge_update_mac_node_for_dev_addr(vport, old_addr, 280 mac_addr); 281 spin_unlock_bh(&vport->mac_list_lock); 282 hclge_task_schedule(hdev, 0); 283 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { 284 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 285 HCLGE_MAC_ADDR_UC, mac_addr); 286 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { 287 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 288 HCLGE_MAC_ADDR_UC, mac_addr); 289 } else { 290 dev_err(&hdev->pdev->dev, 291 "failed to set unicast mac addr, unknown subcode %u\n", 292 mbx_req->msg.subcode); 293 return -EIO; 294 } 295 296 return status; 297 } 298 299 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, 300 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 301 { 302 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 303 struct hclge_dev *hdev = vport->back; 304 305 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { 306 hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 307 HCLGE_MAC_ADDR_MC, mac_addr); 308 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { 309 hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 310 HCLGE_MAC_ADDR_MC, mac_addr); 311 } else { 312 dev_err(&hdev->pdev->dev, 313 "failed to set mcast mac addr, unknown subcode %u\n", 314 mbx_req->msg.subcode); 315 return -EIO; 316 } 317 318 return 0; 319 } 320 321 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 322 u16 state, 323 struct hclge_vlan_info *vlan_info) 324 { 325 #define MSG_DATA_SIZE 8 326 327 u8 msg_data[MSG_DATA_SIZE]; 328 329 memcpy(&msg_data[0], &state, sizeof(u16)); 330 memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); 331 memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); 332 memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); 333 334 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 335 HCLGE_MBX_PUSH_VLAN_INFO, vfid); 336 } 337 338 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 339 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 340 struct hclge_respond_to_vf_msg *resp_msg) 341 { 342 #define HCLGE_MBX_VLAN_STATE_OFFSET 0 343 #define HCLGE_MBX_VLAN_INFO_OFFSET 2 344 345 struct hnae3_handle *handle = &vport->nic; 346 struct hclge_dev *hdev = vport->back; 347 struct hclge_vf_vlan_cfg *msg_cmd; 348 349 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 350 switch (msg_cmd->subcode) { 351 case HCLGE_MBX_VLAN_FILTER: 352 return hclge_set_vlan_filter(handle, 353 cpu_to_be16(msg_cmd->proto), 354 msg_cmd->vlan, msg_cmd->is_kill); 355 case HCLGE_MBX_VLAN_RX_OFF_CFG: 356 return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); 357 case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: 358 /* vf does not need to know about the port based VLAN state 359 * on device HNAE3_DEVICE_VERSION_V3. So always return disable 360 * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port 361 * based VLAN state. 362 */ 363 resp_msg->data[0] = 364 hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? 365 HNAE3_PORT_BASE_VLAN_DISABLE : 366 vport->port_base_vlan_cfg.state; 367 resp_msg->len = sizeof(u8); 368 return 0; 369 case HCLGE_MBX_ENABLE_VLAN_FILTER: 370 return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); 371 default: 372 return 0; 373 } 374 } 375 376 static int hclge_set_vf_alive(struct hclge_vport *vport, 377 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 378 { 379 bool alive = !!mbx_req->msg.data[0]; 380 int ret = 0; 381 382 if (alive) 383 ret = hclge_vport_start(vport); 384 else 385 hclge_vport_stop(vport); 386 387 return ret; 388 } 389 390 static void hclge_get_basic_info(struct hclge_vport *vport, 391 struct hclge_respond_to_vf_msg *resp_msg) 392 { 393 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 394 struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; 395 struct hclge_basic_info *basic_info; 396 unsigned int i; 397 398 basic_info = (struct hclge_basic_info *)resp_msg->data; 399 for (i = 0; i < kinfo->tc_info.num_tc; i++) 400 basic_info->hw_tc_map |= BIT(i); 401 402 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 403 hnae3_set_bit(basic_info->pf_caps, 404 HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); 405 406 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 407 } 408 409 static void hclge_get_vf_queue_info(struct hclge_vport *vport, 410 struct hclge_respond_to_vf_msg *resp_msg) 411 { 412 #define HCLGE_TQPS_RSS_INFO_LEN 6 413 #define HCLGE_TQPS_ALLOC_OFFSET 0 414 #define HCLGE_TQPS_RSS_SIZE_OFFSET 2 415 #define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 416 417 struct hclge_dev *hdev = vport->back; 418 419 /* get the queue related info */ 420 memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], 421 &vport->alloc_tqps, sizeof(u16)); 422 memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], 423 &vport->nic.kinfo.rss_size, sizeof(u16)); 424 memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], 425 &hdev->rx_buf_len, sizeof(u16)); 426 resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; 427 } 428 429 static void hclge_get_vf_mac_addr(struct hclge_vport *vport, 430 struct hclge_respond_to_vf_msg *resp_msg) 431 { 432 ether_addr_copy(resp_msg->data, vport->vf_info.mac); 433 resp_msg->len = ETH_ALEN; 434 } 435 436 static void hclge_get_vf_queue_depth(struct hclge_vport *vport, 437 struct hclge_respond_to_vf_msg *resp_msg) 438 { 439 #define HCLGE_TQPS_DEPTH_INFO_LEN 4 440 #define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 441 #define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 442 443 struct hclge_dev *hdev = vport->back; 444 445 /* get the queue depth info */ 446 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], 447 &hdev->num_tx_desc, sizeof(u16)); 448 memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], 449 &hdev->num_rx_desc, sizeof(u16)); 450 resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; 451 } 452 453 static void hclge_get_vf_media_type(struct hclge_vport *vport, 454 struct hclge_respond_to_vf_msg *resp_msg) 455 { 456 #define HCLGE_VF_MEDIA_TYPE_OFFSET 0 457 #define HCLGE_VF_MODULE_TYPE_OFFSET 1 458 #define HCLGE_VF_MEDIA_TYPE_LENGTH 2 459 460 struct hclge_dev *hdev = vport->back; 461 462 resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = 463 hdev->hw.mac.media_type; 464 resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = 465 hdev->hw.mac.module_type; 466 resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; 467 } 468 469 int hclge_push_vf_link_status(struct hclge_vport *vport) 470 { 471 #define HCLGE_VF_LINK_STATE_UP 1U 472 #define HCLGE_VF_LINK_STATE_DOWN 0U 473 474 struct hclge_dev *hdev = vport->back; 475 u16 link_status; 476 u8 msg_data[9]; 477 u16 duplex; 478 479 /* mac.link can only be 0 or 1 */ 480 switch (vport->vf_info.link_state) { 481 case IFLA_VF_LINK_STATE_ENABLE: 482 link_status = HCLGE_VF_LINK_STATE_UP; 483 break; 484 case IFLA_VF_LINK_STATE_DISABLE: 485 link_status = HCLGE_VF_LINK_STATE_DOWN; 486 break; 487 case IFLA_VF_LINK_STATE_AUTO: 488 default: 489 link_status = (u16)hdev->hw.mac.link; 490 break; 491 } 492 493 duplex = hdev->hw.mac.duplex; 494 memcpy(&msg_data[0], &link_status, sizeof(u16)); 495 memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); 496 memcpy(&msg_data[6], &duplex, sizeof(u16)); 497 msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; 498 499 /* send this requested info to VF */ 500 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 501 HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); 502 } 503 504 static void hclge_get_link_mode(struct hclge_vport *vport, 505 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 506 { 507 #define HCLGE_SUPPORTED 1 508 struct hclge_dev *hdev = vport->back; 509 unsigned long advertising; 510 unsigned long supported; 511 unsigned long send_data; 512 u8 msg_data[10] = {}; 513 u8 dest_vfid; 514 515 advertising = hdev->hw.mac.advertising[0]; 516 supported = hdev->hw.mac.supported[0]; 517 dest_vfid = mbx_req->mbx_src_vfid; 518 msg_data[0] = mbx_req->msg.data[0]; 519 520 send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; 521 522 memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); 523 hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), 524 HCLGE_MBX_LINK_STAT_MODE, dest_vfid); 525 } 526 527 static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, 528 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 529 struct hclge_respond_to_vf_msg *resp_msg) 530 { 531 #define HCLGE_RESET_ALL_QUEUE_DONE 1U 532 struct hnae3_handle *handle = &vport->nic; 533 struct hclge_dev *hdev = vport->back; 534 u16 queue_id; 535 int ret; 536 537 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 538 resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; 539 resp_msg->len = sizeof(u8); 540 541 /* pf will reset vf's all queues at a time. So it is unnecessary 542 * to reset queues if queue_id > 0, just return success. 543 */ 544 if (queue_id > 0) 545 return 0; 546 547 ret = hclge_reset_tqp(handle); 548 if (ret) 549 dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", 550 vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); 551 552 return ret; 553 } 554 555 static int hclge_reset_vf(struct hclge_vport *vport) 556 { 557 struct hclge_dev *hdev = vport->back; 558 559 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", 560 vport->vport_id); 561 562 return hclge_func_reset_cmd(hdev, vport->vport_id); 563 } 564 565 static void hclge_vf_keep_alive(struct hclge_vport *vport) 566 { 567 vport->last_active_jiffies = jiffies; 568 } 569 570 static int hclge_set_vf_mtu(struct hclge_vport *vport, 571 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 572 { 573 u32 mtu; 574 575 memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); 576 577 return hclge_set_vport_mtu(vport, mtu); 578 } 579 580 static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, 581 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 582 struct hclge_respond_to_vf_msg *resp_msg) 583 { 584 u16 queue_id, qid_in_pf; 585 586 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 587 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); 588 memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); 589 resp_msg->len = sizeof(qid_in_pf); 590 } 591 592 static void hclge_get_rss_key(struct hclge_vport *vport, 593 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 594 struct hclge_respond_to_vf_msg *resp_msg) 595 { 596 #define HCLGE_RSS_MBX_RESP_LEN 8 597 struct hclge_dev *hdev = vport->back; 598 u8 index; 599 600 index = mbx_req->msg.data[0]; 601 602 /* Check the query index of rss_hash_key from VF, make sure no 603 * more than the size of rss_hash_key. 604 */ 605 if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > 606 sizeof(vport[0].rss_hash_key)) { 607 dev_warn(&hdev->pdev->dev, 608 "failed to get the rss hash key, the index(%u) invalid !\n", 609 index); 610 return; 611 } 612 613 memcpy(resp_msg->data, 614 &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], 615 HCLGE_RSS_MBX_RESP_LEN); 616 resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; 617 } 618 619 static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) 620 { 621 switch (link_fail_code) { 622 case HCLGE_LF_REF_CLOCK_LOST: 623 dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); 624 break; 625 case HCLGE_LF_XSFP_TX_DISABLE: 626 dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); 627 break; 628 case HCLGE_LF_XSFP_ABSENT: 629 dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); 630 break; 631 default: 632 break; 633 } 634 } 635 636 static void hclge_handle_link_change_event(struct hclge_dev *hdev, 637 struct hclge_mbx_vf_to_pf_cmd *req) 638 { 639 hclge_task_schedule(hdev, 0); 640 641 if (!req->msg.subcode) 642 hclge_link_fail_parse(hdev, req->msg.data[0]); 643 } 644 645 static bool hclge_cmd_crq_empty(struct hclge_hw *hw) 646 { 647 u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); 648 649 return tail == hw->cmq.crq.next_to_use; 650 } 651 652 static void hclge_handle_ncsi_error(struct hclge_dev *hdev) 653 { 654 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 655 656 ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); 657 dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); 658 ae_dev->ops->reset_event(hdev->pdev, NULL); 659 } 660 661 static void hclge_handle_vf_tbl(struct hclge_vport *vport, 662 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 663 { 664 struct hclge_dev *hdev = vport->back; 665 struct hclge_vf_vlan_cfg *msg_cmd; 666 667 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 668 if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { 669 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); 670 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); 671 hclge_rm_vport_all_vlan_table(vport, true); 672 } else { 673 dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", 674 msg_cmd->subcode); 675 } 676 } 677 678 void hclge_mbx_handler(struct hclge_dev *hdev) 679 { 680 struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; 681 struct hclge_respond_to_vf_msg resp_msg; 682 struct hclge_mbx_vf_to_pf_cmd *req; 683 struct hclge_vport *vport; 684 struct hclge_desc *desc; 685 bool is_del = false; 686 unsigned int flag; 687 int ret = 0; 688 689 /* handle all the mailbox requests in the queue */ 690 while (!hclge_cmd_crq_empty(&hdev->hw)) { 691 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { 692 dev_warn(&hdev->pdev->dev, 693 "command queue needs re-initializing\n"); 694 return; 695 } 696 697 desc = &crq->desc[crq->next_to_use]; 698 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 699 700 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 701 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { 702 dev_warn(&hdev->pdev->dev, 703 "dropped invalid mailbox message, code = %u\n", 704 req->msg.code); 705 706 /* dropping/not processing this invalid message */ 707 crq->desc[crq->next_to_use].flag = 0; 708 hclge_mbx_ring_ptr_move_crq(crq); 709 continue; 710 } 711 712 vport = &hdev->vport[req->mbx_src_vfid]; 713 714 trace_hclge_pf_mbx_get(hdev, req); 715 716 /* clear the resp_msg before processing every mailbox message */ 717 memset(&resp_msg, 0, sizeof(resp_msg)); 718 719 switch (req->msg.code) { 720 case HCLGE_MBX_MAP_RING_TO_VECTOR: 721 ret = hclge_map_unmap_ring_to_vf_vector(vport, true, 722 req); 723 break; 724 case HCLGE_MBX_UNMAP_RING_TO_VECTOR: 725 ret = hclge_map_unmap_ring_to_vf_vector(vport, false, 726 req); 727 break; 728 case HCLGE_MBX_SET_PROMISC_MODE: 729 hclge_set_vf_promisc_mode(vport, req); 730 break; 731 case HCLGE_MBX_SET_UNICAST: 732 ret = hclge_set_vf_uc_mac_addr(vport, req); 733 if (ret) 734 dev_err(&hdev->pdev->dev, 735 "PF fail(%d) to set VF UC MAC Addr\n", 736 ret); 737 break; 738 case HCLGE_MBX_SET_MULTICAST: 739 ret = hclge_set_vf_mc_mac_addr(vport, req); 740 if (ret) 741 dev_err(&hdev->pdev->dev, 742 "PF fail(%d) to set VF MC MAC Addr\n", 743 ret); 744 break; 745 case HCLGE_MBX_SET_VLAN: 746 ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg); 747 if (ret) 748 dev_err(&hdev->pdev->dev, 749 "PF failed(%d) to config VF's VLAN\n", 750 ret); 751 break; 752 case HCLGE_MBX_SET_ALIVE: 753 ret = hclge_set_vf_alive(vport, req); 754 if (ret) 755 dev_err(&hdev->pdev->dev, 756 "PF failed(%d) to set VF's ALIVE\n", 757 ret); 758 break; 759 case HCLGE_MBX_GET_QINFO: 760 hclge_get_vf_queue_info(vport, &resp_msg); 761 break; 762 case HCLGE_MBX_GET_QDEPTH: 763 hclge_get_vf_queue_depth(vport, &resp_msg); 764 break; 765 case HCLGE_MBX_GET_BASIC_INFO: 766 hclge_get_basic_info(vport, &resp_msg); 767 break; 768 case HCLGE_MBX_GET_LINK_STATUS: 769 ret = hclge_push_vf_link_status(vport); 770 if (ret) 771 dev_err(&hdev->pdev->dev, 772 "failed to inform link stat to VF, ret = %d\n", 773 ret); 774 break; 775 case HCLGE_MBX_QUEUE_RESET: 776 ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg); 777 break; 778 case HCLGE_MBX_RESET: 779 ret = hclge_reset_vf(vport); 780 break; 781 case HCLGE_MBX_KEEP_ALIVE: 782 hclge_vf_keep_alive(vport); 783 break; 784 case HCLGE_MBX_SET_MTU: 785 ret = hclge_set_vf_mtu(vport, req); 786 if (ret) 787 dev_err(&hdev->pdev->dev, 788 "VF fail(%d) to set mtu\n", ret); 789 break; 790 case HCLGE_MBX_GET_QID_IN_PF: 791 hclge_get_queue_id_in_pf(vport, req, &resp_msg); 792 break; 793 case HCLGE_MBX_GET_RSS_KEY: 794 hclge_get_rss_key(vport, req, &resp_msg); 795 break; 796 case HCLGE_MBX_GET_LINK_MODE: 797 hclge_get_link_mode(vport, req); 798 break; 799 case HCLGE_MBX_GET_VF_FLR_STATUS: 800 case HCLGE_MBX_VF_UNINIT: 801 is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; 802 hclge_rm_vport_all_mac_table(vport, is_del, 803 HCLGE_MAC_ADDR_UC); 804 hclge_rm_vport_all_mac_table(vport, is_del, 805 HCLGE_MAC_ADDR_MC); 806 hclge_rm_vport_all_vlan_table(vport, is_del); 807 break; 808 case HCLGE_MBX_GET_MEDIA_TYPE: 809 hclge_get_vf_media_type(vport, &resp_msg); 810 break; 811 case HCLGE_MBX_PUSH_LINK_STATUS: 812 hclge_handle_link_change_event(hdev, req); 813 break; 814 case HCLGE_MBX_GET_MAC_ADDR: 815 hclge_get_vf_mac_addr(vport, &resp_msg); 816 break; 817 case HCLGE_MBX_NCSI_ERROR: 818 hclge_handle_ncsi_error(hdev); 819 break; 820 case HCLGE_MBX_HANDLE_VF_TBL: 821 hclge_handle_vf_tbl(vport, req); 822 break; 823 default: 824 dev_err(&hdev->pdev->dev, 825 "un-supported mailbox message, code = %u\n", 826 req->msg.code); 827 break; 828 } 829 830 /* PF driver should not reply IMP */ 831 if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && 832 req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { 833 resp_msg.status = ret; 834 hclge_gen_resp_to_vf(vport, req, &resp_msg); 835 } 836 837 crq->desc[crq->next_to_use].flag = 0; 838 hclge_mbx_ring_ptr_move_crq(crq); 839 840 /* reinitialize ret after complete the mbx message processing */ 841 ret = 0; 842 } 843 844 /* Write back CMDQ_RQ header pointer, M7 need this pointer */ 845 hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); 846 } 847