1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2021-2021 Hisilicon Limited. 3 4 #include "hnae3.h" 5 #include "hclge_comm_cmd.h" 6 7 static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw, 8 struct hclge_comm_cmq_ring *ring) 9 { 10 dma_addr_t dma = ring->desc_dma_addr; 11 u32 reg_val; 12 13 if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) { 14 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 15 lower_32_bits(dma)); 16 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 17 upper_32_bits(dma)); 18 reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 19 reg_val &= HCLGE_COMM_NIC_SW_RST_RDY; 20 reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; 21 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); 22 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); 23 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); 24 } else { 25 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 26 lower_32_bits(dma)); 27 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 28 upper_32_bits(dma)); 29 reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; 30 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val); 31 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); 32 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); 33 } 34 } 35 36 void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw) 37 { 38 hclge_comm_cmd_config_regs(hw, &hw->cmq.csq); 39 hclge_comm_cmd_config_regs(hw, &hw->cmq.crq); 40 } 41 42 void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) 43 { 44 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | 45 HCLGE_COMM_CMD_FLAG_IN); 46 if (is_read) 47 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); 48 else 49 desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); 50 } 51 52 static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, 53 bool is_pf) 54 { 55 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); 56 if (is_pf) { 57 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); 58 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); 59 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); 60 } 61 } 62 63 void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, 64 enum hclge_opcode_type opcode, 65 bool is_read) 66 { 67 memset((void *)desc, 0, sizeof(struct hclge_desc)); 68 desc->opcode = cpu_to_le16(opcode); 69 desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | 70 HCLGE_COMM_CMD_FLAG_IN); 71 72 if (is_read) 73 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); 74 } 75 76 int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, 77 struct hclge_comm_hw *hw, bool en) 78 { 79 struct hclge_comm_firmware_compat_cmd *req; 80 struct hclge_desc desc; 81 u32 compat = 0; 82 83 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); 84 85 if (en) { 86 req = (struct hclge_comm_firmware_compat_cmd *)desc.data; 87 88 hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1); 89 hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1); 90 if (hclge_comm_dev_phy_imp_supported(ae_dev)) 91 hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1); 92 hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1); 93 hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1); 94 hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1); 95 96 req->compat = cpu_to_le32(compat); 97 } 98 99 return hclge_comm_cmd_send(hw, &desc, 1); 100 } 101 102 void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring) 103 { 104 int size = ring->desc_num * sizeof(struct hclge_desc); 105 106 if (!ring->desc) 107 return; 108 109 dma_free_coherent(&ring->pdev->dev, size, 110 ring->desc, ring->desc_dma_addr); 111 ring->desc = NULL; 112 } 113 114 static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring) 115 { 116 int size = ring->desc_num * sizeof(struct hclge_desc); 117 118 ring->desc = dma_alloc_coherent(&ring->pdev->dev, 119 size, &ring->desc_dma_addr, GFP_KERNEL); 120 if (!ring->desc) 121 return -ENOMEM; 122 123 return 0; 124 } 125 126 static __le32 hclge_comm_build_api_caps(void) 127 { 128 u32 api_caps = 0; 129 130 hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1); 131 132 return cpu_to_le32(api_caps); 133 } 134 135 static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { 136 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, 137 {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, 138 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, 139 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, 140 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, 141 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, 142 {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, 143 {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, 144 {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, 145 {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, 146 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, 147 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, 148 {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, 149 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, 150 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, 151 HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, 152 {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, 153 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, 154 {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, 155 {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B}, 156 {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B}, 157 {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, 158 }; 159 160 static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { 161 {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, 162 {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, 163 {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, 164 {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, 165 {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, 166 {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, 167 {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, 168 {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, 169 {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, 170 {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, 171 }; 172 173 static void 174 hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf, 175 struct hclge_comm_query_version_cmd *cmd) 176 { 177 const struct hclge_comm_caps_bit_map *caps_map = 178 is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps; 179 u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) : 180 ARRAY_SIZE(hclge_vf_cmd_caps); 181 u32 caps, i; 182 183 caps = __le32_to_cpu(cmd->caps[0]); 184 for (i = 0; i < size; i++) 185 if (hnae3_get_bit(caps, caps_map[i].imp_bit)) 186 set_bit(caps_map[i].local_bit, ae_dev->caps); 187 } 188 189 int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type) 190 { 191 struct hclge_comm_cmq_ring *ring = 192 (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq : 193 &hw->cmq.crq; 194 int ret; 195 196 ring->ring_type = ring_type; 197 198 ret = hclge_comm_alloc_cmd_desc(ring); 199 if (ret) 200 dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n", 201 (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ", 202 ret); 203 204 return ret; 205 } 206 207 int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, 208 struct hclge_comm_hw *hw, 209 u32 *fw_version, bool is_pf) 210 { 211 struct hclge_comm_query_version_cmd *resp; 212 struct hclge_desc desc; 213 int ret; 214 215 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); 216 resp = (struct hclge_comm_query_version_cmd *)desc.data; 217 resp->api_caps = hclge_comm_build_api_caps(); 218 219 ret = hclge_comm_cmd_send(hw, &desc, 1); 220 if (ret) 221 return ret; 222 223 *fw_version = le32_to_cpu(resp->firmware); 224 225 ae_dev->dev_version = le32_to_cpu(resp->hardware) << 226 HNAE3_PCI_REVISION_BIT_SIZE; 227 ae_dev->dev_version |= ae_dev->pdev->revision; 228 229 if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { 230 hclge_comm_set_default_capability(ae_dev, is_pf); 231 return 0; 232 } 233 234 hclge_comm_parse_capability(ae_dev, is_pf, resp); 235 236 return ret; 237 } 238 239 static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT, 240 HCLGE_OPC_STATS_32_BIT, 241 HCLGE_OPC_STATS_MAC, 242 HCLGE_OPC_STATS_MAC_ALL, 243 HCLGE_OPC_QUERY_32_BIT_REG, 244 HCLGE_OPC_QUERY_64_BIT_REG, 245 HCLGE_QUERY_CLEAR_MPF_RAS_INT, 246 HCLGE_QUERY_CLEAR_PF_RAS_INT, 247 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, 248 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, 249 HCLGE_QUERY_ALL_ERR_INFO }; 250 251 static bool hclge_comm_is_special_opcode(u16 opcode) 252 { 253 /* these commands have several descriptors, 254 * and use the first one to save opcode and return value 255 */ 256 u32 i; 257 258 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) 259 if (spec_opcode[i] == opcode) 260 return true; 261 262 return false; 263 } 264 265 static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) 266 { 267 int ntc = ring->next_to_clean; 268 int ntu = ring->next_to_use; 269 int used = (ntu - ntc + ring->desc_num) % ring->desc_num; 270 271 return ring->desc_num - used - 1; 272 } 273 274 static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw, 275 struct hclge_desc *desc, int num) 276 { 277 struct hclge_desc *desc_to_use; 278 int handle = 0; 279 280 while (handle < num) { 281 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; 282 *desc_to_use = desc[handle]; 283 (hw->cmq.csq.next_to_use)++; 284 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) 285 hw->cmq.csq.next_to_use = 0; 286 handle++; 287 } 288 } 289 290 static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring, 291 int head) 292 { 293 int ntc = ring->next_to_clean; 294 int ntu = ring->next_to_use; 295 296 if (ntu > ntc) 297 return head >= ntc && head <= ntu; 298 299 return head >= ntc || head <= ntu; 300 } 301 302 static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw) 303 { 304 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; 305 int clean; 306 u32 head; 307 308 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); 309 rmb(); /* Make sure head is ready before touch any data */ 310 311 if (!hclge_comm_is_valid_csq_clean_head(csq, head)) { 312 dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n", 313 head, csq->next_to_use, csq->next_to_clean); 314 dev_warn(&hw->cmq.csq.pdev->dev, 315 "Disabling any further commands to IMP firmware\n"); 316 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 317 dev_warn(&hw->cmq.csq.pdev->dev, 318 "IMP firmware watchdog reset soon expected!\n"); 319 return -EIO; 320 } 321 322 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; 323 csq->next_to_clean = head; 324 return clean; 325 } 326 327 static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) 328 { 329 u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); 330 return head == hw->cmq.csq.next_to_use; 331 } 332 333 static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, 334 bool *is_completed) 335 { 336 u32 timeout = 0; 337 338 do { 339 if (hclge_comm_cmd_csq_done(hw)) { 340 *is_completed = true; 341 break; 342 } 343 udelay(1); 344 timeout++; 345 } while (timeout < hw->cmq.tx_timeout); 346 } 347 348 static int hclge_comm_cmd_convert_err_code(u16 desc_ret) 349 { 350 struct hclge_comm_errcode hclge_comm_cmd_errcode[] = { 351 { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 }, 352 { HCLGE_COMM_CMD_NO_AUTH, -EPERM }, 353 { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP }, 354 { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL }, 355 { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR }, 356 { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK }, 357 { HCLGE_COMM_CMD_PARA_ERR, -EINVAL }, 358 { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE }, 359 { HCLGE_COMM_CMD_TIMEOUT, -ETIME }, 360 { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK }, 361 { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO }, 362 { HCLGE_COMM_CMD_INVALID, -EBADR }, 363 }; 364 u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode); 365 u32 i; 366 367 for (i = 0; i < errcode_count; i++) 368 if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret) 369 return hclge_comm_cmd_errcode[i].common_errno; 370 371 return -EIO; 372 } 373 374 static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, 375 struct hclge_desc *desc, int num, 376 int ntc) 377 { 378 u16 opcode, desc_ret; 379 int handle; 380 381 opcode = le16_to_cpu(desc[0].opcode); 382 for (handle = 0; handle < num; handle++) { 383 desc[handle] = hw->cmq.csq.desc[ntc]; 384 ntc++; 385 if (ntc >= hw->cmq.csq.desc_num) 386 ntc = 0; 387 } 388 if (likely(!hclge_comm_is_special_opcode(opcode))) 389 desc_ret = le16_to_cpu(desc[num - 1].retval); 390 else 391 desc_ret = le16_to_cpu(desc[0].retval); 392 393 hw->cmq.last_status = desc_ret; 394 395 return hclge_comm_cmd_convert_err_code(desc_ret); 396 } 397 398 static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, 399 struct hclge_desc *desc, 400 int num, int ntc) 401 { 402 bool is_completed = false; 403 int handle, ret; 404 405 /* If the command is sync, wait for the firmware to write back, 406 * if multi descriptors to be sent, use the first one to check 407 */ 408 if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 409 hclge_comm_wait_for_resp(hw, &is_completed); 410 411 if (!is_completed) 412 ret = -EBADE; 413 else 414 ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc); 415 416 /* Clean the command send queue */ 417 handle = hclge_comm_cmd_csq_clean(hw); 418 if (handle < 0) 419 ret = handle; 420 else if (handle != num) 421 dev_warn(&hw->cmq.csq.pdev->dev, 422 "cleaned %d, need to clean %d\n", handle, num); 423 return ret; 424 } 425 426 /** 427 * hclge_comm_cmd_send - send command to command queue 428 * @hw: pointer to the hw struct 429 * @desc: prefilled descriptor for describing the command 430 * @num : the number of descriptors to be sent 431 * 432 * This is the main send command for command queue, it 433 * sends the queue, cleans the queue, etc 434 **/ 435 int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 436 int num) 437 { 438 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; 439 int ret; 440 int ntc; 441 442 spin_lock_bh(&hw->cmq.csq.lock); 443 444 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { 445 spin_unlock_bh(&hw->cmq.csq.lock); 446 return -EBUSY; 447 } 448 449 if (num > hclge_comm_ring_space(&hw->cmq.csq)) { 450 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, 451 * need update the SW HEAD pointer csq->next_to_clean 452 */ 453 csq->next_to_clean = 454 hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); 455 spin_unlock_bh(&hw->cmq.csq.lock); 456 return -EBUSY; 457 } 458 459 /** 460 * Record the location of desc in the ring for this time 461 * which will be use for hardware to write back 462 */ 463 ntc = hw->cmq.csq.next_to_use; 464 465 hclge_comm_cmd_copy_desc(hw, desc, num); 466 467 /* Write to hardware */ 468 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 469 hw->cmq.csq.next_to_use); 470 471 ret = hclge_comm_cmd_check_result(hw, desc, num, ntc); 472 473 spin_unlock_bh(&hw->cmq.csq.lock); 474 475 return ret; 476 } 477 478 static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) 479 { 480 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0); 481 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0); 482 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0); 483 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); 484 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); 485 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0); 486 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0); 487 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0); 488 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); 489 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); 490 } 491 492 void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, 493 struct hclge_comm_hw *hw) 494 { 495 struct hclge_comm_cmq *cmdq = &hw->cmq; 496 497 hclge_comm_firmware_compat_config(ae_dev, hw, false); 498 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 499 500 /* wait to ensure that the firmware completes the possible left 501 * over commands. 502 */ 503 msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME); 504 spin_lock_bh(&cmdq->csq.lock); 505 spin_lock(&cmdq->crq.lock); 506 hclge_comm_cmd_uninit_regs(hw); 507 spin_unlock(&cmdq->crq.lock); 508 spin_unlock_bh(&cmdq->csq.lock); 509 510 hclge_comm_free_cmd_desc(&cmdq->csq); 511 hclge_comm_free_cmd_desc(&cmdq->crq); 512 } 513 514 int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) 515 { 516 struct hclge_comm_cmq *cmdq = &hw->cmq; 517 int ret; 518 519 /* Setup the lock for command queue */ 520 spin_lock_init(&cmdq->csq.lock); 521 spin_lock_init(&cmdq->crq.lock); 522 523 cmdq->csq.pdev = pdev; 524 cmdq->crq.pdev = pdev; 525 526 /* Setup the queue entries for use cmd queue */ 527 cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; 528 cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; 529 530 /* Setup Tx write back timeout */ 531 cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT; 532 533 /* Setup queue rings */ 534 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); 535 if (ret) { 536 dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret); 537 return ret; 538 } 539 540 ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ); 541 if (ret) { 542 dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret); 543 goto err_csq; 544 } 545 546 return 0; 547 err_csq: 548 hclge_comm_free_cmd_desc(&hw->cmq.csq); 549 return ret; 550 } 551 552 int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, 553 u32 *fw_version, bool is_pf, 554 unsigned long reset_pending) 555 { 556 struct hclge_comm_cmq *cmdq = &hw->cmq; 557 int ret; 558 559 spin_lock_bh(&cmdq->csq.lock); 560 spin_lock(&cmdq->crq.lock); 561 562 cmdq->csq.next_to_clean = 0; 563 cmdq->csq.next_to_use = 0; 564 cmdq->crq.next_to_clean = 0; 565 cmdq->crq.next_to_use = 0; 566 567 hclge_comm_cmd_init_regs(hw); 568 569 spin_unlock(&cmdq->crq.lock); 570 spin_unlock_bh(&cmdq->csq.lock); 571 572 clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 573 574 /* Check if there is new reset pending, because the higher level 575 * reset may happen when lower level reset is being processed. 576 */ 577 if (reset_pending) { 578 ret = -EBUSY; 579 goto err_cmd_init; 580 } 581 582 /* get version and device capabilities */ 583 ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw, 584 fw_version, is_pf); 585 if (ret) { 586 dev_err(&ae_dev->pdev->dev, 587 "failed to query version and capabilities, ret = %d\n", 588 ret); 589 goto err_cmd_init; 590 } 591 592 dev_info(&ae_dev->pdev->dev, 593 "The firmware version is %lu.%lu.%lu.%lu\n", 594 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK, 595 HNAE3_FW_VERSION_BYTE3_SHIFT), 596 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK, 597 HNAE3_FW_VERSION_BYTE2_SHIFT), 598 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK, 599 HNAE3_FW_VERSION_BYTE1_SHIFT), 600 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK, 601 HNAE3_FW_VERSION_BYTE0_SHIFT)); 602 603 if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) 604 return 0; 605 606 /* ask the firmware to enable some features, driver can work without 607 * it. 608 */ 609 ret = hclge_comm_firmware_compat_config(ae_dev, hw, true); 610 if (ret) 611 dev_warn(&ae_dev->pdev->dev, 612 "Firmware compatible features not enabled(%d).\n", 613 ret); 614 return 0; 615 616 err_cmd_init: 617 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); 618 619 return ret; 620 } 621