1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_type.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 #include <linux/avf/virtchnl.h> 9 10 /** 11 * i40e_set_mac_type - Sets MAC type 12 * @hw: pointer to the HW structure 13 * 14 * This function sets the mac type of the adapter based on the 15 * vendor ID and device ID stored in the hw structure. 16 **/ 17 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 18 { 19 i40e_status status = 0; 20 21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 22 switch (hw->device_id) { 23 case I40E_DEV_ID_SFP_XL710: 24 case I40E_DEV_ID_QEMU: 25 case I40E_DEV_ID_KX_B: 26 case I40E_DEV_ID_KX_C: 27 case I40E_DEV_ID_QSFP_A: 28 case I40E_DEV_ID_QSFP_B: 29 case I40E_DEV_ID_QSFP_C: 30 case I40E_DEV_ID_1G_BASE_T_BC: 31 case I40E_DEV_ID_5G_BASE_T_BC: 32 case I40E_DEV_ID_10G_BASE_T: 33 case I40E_DEV_ID_10G_BASE_T4: 34 case I40E_DEV_ID_10G_BASE_T_BC: 35 case I40E_DEV_ID_10G_B: 36 case I40E_DEV_ID_10G_SFP: 37 case I40E_DEV_ID_20G_KR2: 38 case I40E_DEV_ID_20G_KR2_A: 39 case I40E_DEV_ID_25G_B: 40 case I40E_DEV_ID_25G_SFP28: 41 case I40E_DEV_ID_X710_N3000: 42 case I40E_DEV_ID_XXV710_N3000: 43 hw->mac.type = I40E_MAC_XL710; 44 break; 45 case I40E_DEV_ID_KX_X722: 46 case I40E_DEV_ID_QSFP_X722: 47 case I40E_DEV_ID_SFP_X722: 48 case I40E_DEV_ID_1G_BASE_T_X722: 49 case I40E_DEV_ID_10G_BASE_T_X722: 50 case I40E_DEV_ID_SFP_I_X722: 51 case I40E_DEV_ID_SFP_X722_A: 52 hw->mac.type = I40E_MAC_X722; 53 break; 54 default: 55 hw->mac.type = I40E_MAC_GENERIC; 56 break; 57 } 58 } else { 59 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 60 } 61 62 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 63 hw->mac.type, status); 64 return status; 65 } 66 67 /** 68 * i40e_aq_str - convert AQ err code to a string 69 * @hw: pointer to the HW structure 70 * @aq_err: the AQ error code to convert 71 **/ 72 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 73 { 74 switch (aq_err) { 75 case I40E_AQ_RC_OK: 76 return "OK"; 77 case I40E_AQ_RC_EPERM: 78 return "I40E_AQ_RC_EPERM"; 79 case I40E_AQ_RC_ENOENT: 80 return "I40E_AQ_RC_ENOENT"; 81 case I40E_AQ_RC_ESRCH: 82 return "I40E_AQ_RC_ESRCH"; 83 case I40E_AQ_RC_EINTR: 84 return "I40E_AQ_RC_EINTR"; 85 case I40E_AQ_RC_EIO: 86 return "I40E_AQ_RC_EIO"; 87 case I40E_AQ_RC_ENXIO: 88 return "I40E_AQ_RC_ENXIO"; 89 case I40E_AQ_RC_E2BIG: 90 return "I40E_AQ_RC_E2BIG"; 91 case I40E_AQ_RC_EAGAIN: 92 return "I40E_AQ_RC_EAGAIN"; 93 case I40E_AQ_RC_ENOMEM: 94 return "I40E_AQ_RC_ENOMEM"; 95 case I40E_AQ_RC_EACCES: 96 return "I40E_AQ_RC_EACCES"; 97 case I40E_AQ_RC_EFAULT: 98 return "I40E_AQ_RC_EFAULT"; 99 case I40E_AQ_RC_EBUSY: 100 return "I40E_AQ_RC_EBUSY"; 101 case I40E_AQ_RC_EEXIST: 102 return "I40E_AQ_RC_EEXIST"; 103 case I40E_AQ_RC_EINVAL: 104 return "I40E_AQ_RC_EINVAL"; 105 case I40E_AQ_RC_ENOTTY: 106 return "I40E_AQ_RC_ENOTTY"; 107 case I40E_AQ_RC_ENOSPC: 108 return "I40E_AQ_RC_ENOSPC"; 109 case I40E_AQ_RC_ENOSYS: 110 return "I40E_AQ_RC_ENOSYS"; 111 case I40E_AQ_RC_ERANGE: 112 return "I40E_AQ_RC_ERANGE"; 113 case I40E_AQ_RC_EFLUSHED: 114 return "I40E_AQ_RC_EFLUSHED"; 115 case I40E_AQ_RC_BAD_ADDR: 116 return "I40E_AQ_RC_BAD_ADDR"; 117 case I40E_AQ_RC_EMODE: 118 return "I40E_AQ_RC_EMODE"; 119 case I40E_AQ_RC_EFBIG: 120 return "I40E_AQ_RC_EFBIG"; 121 } 122 123 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 124 return hw->err_str; 125 } 126 127 /** 128 * i40e_stat_str - convert status err code to a string 129 * @hw: pointer to the HW structure 130 * @stat_err: the status error code to convert 131 **/ 132 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 133 { 134 switch (stat_err) { 135 case 0: 136 return "OK"; 137 case I40E_ERR_NVM: 138 return "I40E_ERR_NVM"; 139 case I40E_ERR_NVM_CHECKSUM: 140 return "I40E_ERR_NVM_CHECKSUM"; 141 case I40E_ERR_PHY: 142 return "I40E_ERR_PHY"; 143 case I40E_ERR_CONFIG: 144 return "I40E_ERR_CONFIG"; 145 case I40E_ERR_PARAM: 146 return "I40E_ERR_PARAM"; 147 case I40E_ERR_MAC_TYPE: 148 return "I40E_ERR_MAC_TYPE"; 149 case I40E_ERR_UNKNOWN_PHY: 150 return "I40E_ERR_UNKNOWN_PHY"; 151 case I40E_ERR_LINK_SETUP: 152 return "I40E_ERR_LINK_SETUP"; 153 case I40E_ERR_ADAPTER_STOPPED: 154 return "I40E_ERR_ADAPTER_STOPPED"; 155 case I40E_ERR_INVALID_MAC_ADDR: 156 return "I40E_ERR_INVALID_MAC_ADDR"; 157 case I40E_ERR_DEVICE_NOT_SUPPORTED: 158 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 159 case I40E_ERR_PRIMARY_REQUESTS_PENDING: 160 return "I40E_ERR_PRIMARY_REQUESTS_PENDING"; 161 case I40E_ERR_INVALID_LINK_SETTINGS: 162 return "I40E_ERR_INVALID_LINK_SETTINGS"; 163 case I40E_ERR_AUTONEG_NOT_COMPLETE: 164 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 165 case I40E_ERR_RESET_FAILED: 166 return "I40E_ERR_RESET_FAILED"; 167 case I40E_ERR_SWFW_SYNC: 168 return "I40E_ERR_SWFW_SYNC"; 169 case I40E_ERR_NO_AVAILABLE_VSI: 170 return "I40E_ERR_NO_AVAILABLE_VSI"; 171 case I40E_ERR_NO_MEMORY: 172 return "I40E_ERR_NO_MEMORY"; 173 case I40E_ERR_BAD_PTR: 174 return "I40E_ERR_BAD_PTR"; 175 case I40E_ERR_RING_FULL: 176 return "I40E_ERR_RING_FULL"; 177 case I40E_ERR_INVALID_PD_ID: 178 return "I40E_ERR_INVALID_PD_ID"; 179 case I40E_ERR_INVALID_QP_ID: 180 return "I40E_ERR_INVALID_QP_ID"; 181 case I40E_ERR_INVALID_CQ_ID: 182 return "I40E_ERR_INVALID_CQ_ID"; 183 case I40E_ERR_INVALID_CEQ_ID: 184 return "I40E_ERR_INVALID_CEQ_ID"; 185 case I40E_ERR_INVALID_AEQ_ID: 186 return "I40E_ERR_INVALID_AEQ_ID"; 187 case I40E_ERR_INVALID_SIZE: 188 return "I40E_ERR_INVALID_SIZE"; 189 case I40E_ERR_INVALID_ARP_INDEX: 190 return "I40E_ERR_INVALID_ARP_INDEX"; 191 case I40E_ERR_INVALID_FPM_FUNC_ID: 192 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 193 case I40E_ERR_QP_INVALID_MSG_SIZE: 194 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 195 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 196 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 197 case I40E_ERR_INVALID_FRAG_COUNT: 198 return "I40E_ERR_INVALID_FRAG_COUNT"; 199 case I40E_ERR_QUEUE_EMPTY: 200 return "I40E_ERR_QUEUE_EMPTY"; 201 case I40E_ERR_INVALID_ALIGNMENT: 202 return "I40E_ERR_INVALID_ALIGNMENT"; 203 case I40E_ERR_FLUSHED_QUEUE: 204 return "I40E_ERR_FLUSHED_QUEUE"; 205 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 206 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 207 case I40E_ERR_INVALID_IMM_DATA_SIZE: 208 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 209 case I40E_ERR_TIMEOUT: 210 return "I40E_ERR_TIMEOUT"; 211 case I40E_ERR_OPCODE_MISMATCH: 212 return "I40E_ERR_OPCODE_MISMATCH"; 213 case I40E_ERR_CQP_COMPL_ERROR: 214 return "I40E_ERR_CQP_COMPL_ERROR"; 215 case I40E_ERR_INVALID_VF_ID: 216 return "I40E_ERR_INVALID_VF_ID"; 217 case I40E_ERR_INVALID_HMCFN_ID: 218 return "I40E_ERR_INVALID_HMCFN_ID"; 219 case I40E_ERR_BACKING_PAGE_ERROR: 220 return "I40E_ERR_BACKING_PAGE_ERROR"; 221 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 222 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 223 case I40E_ERR_INVALID_PBLE_INDEX: 224 return "I40E_ERR_INVALID_PBLE_INDEX"; 225 case I40E_ERR_INVALID_SD_INDEX: 226 return "I40E_ERR_INVALID_SD_INDEX"; 227 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 228 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 229 case I40E_ERR_INVALID_SD_TYPE: 230 return "I40E_ERR_INVALID_SD_TYPE"; 231 case I40E_ERR_MEMCPY_FAILED: 232 return "I40E_ERR_MEMCPY_FAILED"; 233 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 234 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 235 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 236 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 237 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 238 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 239 case I40E_ERR_SRQ_ENABLED: 240 return "I40E_ERR_SRQ_ENABLED"; 241 case I40E_ERR_ADMIN_QUEUE_ERROR: 242 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 243 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 244 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 245 case I40E_ERR_BUF_TOO_SHORT: 246 return "I40E_ERR_BUF_TOO_SHORT"; 247 case I40E_ERR_ADMIN_QUEUE_FULL: 248 return "I40E_ERR_ADMIN_QUEUE_FULL"; 249 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 250 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 251 case I40E_ERR_BAD_IWARP_CQE: 252 return "I40E_ERR_BAD_IWARP_CQE"; 253 case I40E_ERR_NVM_BLANK_MODE: 254 return "I40E_ERR_NVM_BLANK_MODE"; 255 case I40E_ERR_NOT_IMPLEMENTED: 256 return "I40E_ERR_NOT_IMPLEMENTED"; 257 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 258 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 259 case I40E_ERR_DIAG_TEST_FAILED: 260 return "I40E_ERR_DIAG_TEST_FAILED"; 261 case I40E_ERR_NOT_READY: 262 return "I40E_ERR_NOT_READY"; 263 case I40E_NOT_SUPPORTED: 264 return "I40E_NOT_SUPPORTED"; 265 case I40E_ERR_FIRMWARE_API_VERSION: 266 return "I40E_ERR_FIRMWARE_API_VERSION"; 267 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 268 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 269 } 270 271 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 272 return hw->err_str; 273 } 274 275 /** 276 * i40e_debug_aq 277 * @hw: debug mask related to admin queue 278 * @mask: debug mask 279 * @desc: pointer to admin queue descriptor 280 * @buffer: pointer to command buffer 281 * @buf_len: max length of buffer 282 * 283 * Dumps debug log about adminq command with descriptor contents. 284 **/ 285 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 286 void *buffer, u16 buf_len) 287 { 288 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 289 u32 effective_mask = hw->debug_mask & mask; 290 char prefix[27]; 291 u16 len; 292 u8 *buf = (u8 *)buffer; 293 294 if (!effective_mask || !desc) 295 return; 296 297 len = le16_to_cpu(aq_desc->datalen); 298 299 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 300 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 301 le16_to_cpu(aq_desc->opcode), 302 le16_to_cpu(aq_desc->flags), 303 le16_to_cpu(aq_desc->datalen), 304 le16_to_cpu(aq_desc->retval)); 305 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 306 "\tcookie (h,l) 0x%08X 0x%08X\n", 307 le32_to_cpu(aq_desc->cookie_high), 308 le32_to_cpu(aq_desc->cookie_low)); 309 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 310 "\tparam (0,1) 0x%08X 0x%08X\n", 311 le32_to_cpu(aq_desc->params.internal.param0), 312 le32_to_cpu(aq_desc->params.internal.param1)); 313 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 314 "\taddr (h,l) 0x%08X 0x%08X\n", 315 le32_to_cpu(aq_desc->params.external.addr_high), 316 le32_to_cpu(aq_desc->params.external.addr_low)); 317 318 if (buffer && buf_len != 0 && len != 0 && 319 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 320 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 321 if (buf_len < len) 322 len = buf_len; 323 324 snprintf(prefix, sizeof(prefix), 325 "i40e %02x:%02x.%x: \t0x", 326 hw->bus.bus_id, 327 hw->bus.device, 328 hw->bus.func); 329 330 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 331 16, 1, buf, len, false); 332 } 333 } 334 335 /** 336 * i40e_check_asq_alive 337 * @hw: pointer to the hw struct 338 * 339 * Returns true if Queue is enabled else false. 340 **/ 341 bool i40e_check_asq_alive(struct i40e_hw *hw) 342 { 343 if (hw->aq.asq.len) 344 return !!(rd32(hw, hw->aq.asq.len) & 345 I40E_PF_ATQLEN_ATQENABLE_MASK); 346 else 347 return false; 348 } 349 350 /** 351 * i40e_aq_queue_shutdown 352 * @hw: pointer to the hw struct 353 * @unloading: is the driver unloading itself 354 * 355 * Tell the Firmware that we're shutting down the AdminQ and whether 356 * or not the driver is unloading as well. 357 **/ 358 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 359 bool unloading) 360 { 361 struct i40e_aq_desc desc; 362 struct i40e_aqc_queue_shutdown *cmd = 363 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 364 i40e_status status; 365 366 i40e_fill_default_direct_cmd_desc(&desc, 367 i40e_aqc_opc_queue_shutdown); 368 369 if (unloading) 370 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 371 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 372 373 return status; 374 } 375 376 /** 377 * i40e_aq_get_set_rss_lut 378 * @hw: pointer to the hardware structure 379 * @vsi_id: vsi fw index 380 * @pf_lut: for PF table set true, for VSI table set false 381 * @lut: pointer to the lut buffer provided by the caller 382 * @lut_size: size of the lut buffer 383 * @set: set true to set the table, false to get the table 384 * 385 * Internal function to get or set RSS look up table 386 **/ 387 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 388 u16 vsi_id, bool pf_lut, 389 u8 *lut, u16 lut_size, 390 bool set) 391 { 392 i40e_status status; 393 struct i40e_aq_desc desc; 394 struct i40e_aqc_get_set_rss_lut *cmd_resp = 395 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 396 397 if (set) 398 i40e_fill_default_direct_cmd_desc(&desc, 399 i40e_aqc_opc_set_rss_lut); 400 else 401 i40e_fill_default_direct_cmd_desc(&desc, 402 i40e_aqc_opc_get_rss_lut); 403 404 /* Indirect command */ 405 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 406 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 407 408 cmd_resp->vsi_id = 409 cpu_to_le16((u16)((vsi_id << 410 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 411 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 412 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 413 414 if (pf_lut) 415 cmd_resp->flags |= cpu_to_le16((u16) 416 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 419 else 420 cmd_resp->flags |= cpu_to_le16((u16) 421 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 422 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 423 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 424 425 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 426 427 return status; 428 } 429 430 /** 431 * i40e_aq_get_rss_lut 432 * @hw: pointer to the hardware structure 433 * @vsi_id: vsi fw index 434 * @pf_lut: for PF table set true, for VSI table set false 435 * @lut: pointer to the lut buffer provided by the caller 436 * @lut_size: size of the lut buffer 437 * 438 * get the RSS lookup table, PF or VSI type 439 **/ 440 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 441 bool pf_lut, u8 *lut, u16 lut_size) 442 { 443 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 444 false); 445 } 446 447 /** 448 * i40e_aq_set_rss_lut 449 * @hw: pointer to the hardware structure 450 * @vsi_id: vsi fw index 451 * @pf_lut: for PF table set true, for VSI table set false 452 * @lut: pointer to the lut buffer provided by the caller 453 * @lut_size: size of the lut buffer 454 * 455 * set the RSS lookup table, PF or VSI type 456 **/ 457 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 458 bool pf_lut, u8 *lut, u16 lut_size) 459 { 460 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 461 } 462 463 /** 464 * i40e_aq_get_set_rss_key 465 * @hw: pointer to the hw struct 466 * @vsi_id: vsi fw index 467 * @key: pointer to key info struct 468 * @set: set true to set the key, false to get the key 469 * 470 * get the RSS key per VSI 471 **/ 472 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 473 u16 vsi_id, 474 struct i40e_aqc_get_set_rss_key_data *key, 475 bool set) 476 { 477 i40e_status status; 478 struct i40e_aq_desc desc; 479 struct i40e_aqc_get_set_rss_key *cmd_resp = 480 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 481 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 482 483 if (set) 484 i40e_fill_default_direct_cmd_desc(&desc, 485 i40e_aqc_opc_set_rss_key); 486 else 487 i40e_fill_default_direct_cmd_desc(&desc, 488 i40e_aqc_opc_get_rss_key); 489 490 /* Indirect command */ 491 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 492 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 493 494 cmd_resp->vsi_id = 495 cpu_to_le16((u16)((vsi_id << 496 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 497 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 498 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 499 500 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 501 502 return status; 503 } 504 505 /** 506 * i40e_aq_get_rss_key 507 * @hw: pointer to the hw struct 508 * @vsi_id: vsi fw index 509 * @key: pointer to key info struct 510 * 511 **/ 512 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 513 u16 vsi_id, 514 struct i40e_aqc_get_set_rss_key_data *key) 515 { 516 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 517 } 518 519 /** 520 * i40e_aq_set_rss_key 521 * @hw: pointer to the hw struct 522 * @vsi_id: vsi fw index 523 * @key: pointer to key info struct 524 * 525 * set the RSS key per VSI 526 **/ 527 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 528 u16 vsi_id, 529 struct i40e_aqc_get_set_rss_key_data *key) 530 { 531 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 532 } 533 534 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 535 * hardware to a bit-field that can be used by SW to more easily determine the 536 * packet type. 537 * 538 * Macros are used to shorten the table lines and make this table human 539 * readable. 540 * 541 * We store the PTYPE in the top byte of the bit field - this is just so that 542 * we can check that the table doesn't have a row missing, as the index into 543 * the table should be the PTYPE. 544 * 545 * Typical work flow: 546 * 547 * IF NOT i40e_ptype_lookup[ptype].known 548 * THEN 549 * Packet is unknown 550 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 551 * Use the rest of the fields to look at the tunnels, inner protocols, etc 552 * ELSE 553 * Use the enum i40e_rx_l2_ptype to decode the packet type 554 * ENDIF 555 */ 556 557 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 558 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 559 [PTYPE] = { \ 560 1, \ 561 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 562 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 563 I40E_RX_PTYPE_##OUTER_FRAG, \ 564 I40E_RX_PTYPE_TUNNEL_##T, \ 565 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 566 I40E_RX_PTYPE_##TEF, \ 567 I40E_RX_PTYPE_INNER_PROT_##I, \ 568 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 569 570 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 571 572 /* shorter macros makes the table fit but are terse */ 573 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 574 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 575 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 576 577 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 578 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 579 /* L2 Packet types */ 580 I40E_PTT_UNUSED_ENTRY(0), 581 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 582 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 583 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 584 I40E_PTT_UNUSED_ENTRY(4), 585 I40E_PTT_UNUSED_ENTRY(5), 586 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 587 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 588 I40E_PTT_UNUSED_ENTRY(8), 589 I40E_PTT_UNUSED_ENTRY(9), 590 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 591 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 592 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 599 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 600 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 601 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 602 603 /* Non Tunneled IPv4 */ 604 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 605 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 606 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 607 I40E_PTT_UNUSED_ENTRY(25), 608 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 609 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 610 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 611 612 /* IPv4 --> IPv4 */ 613 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 614 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 615 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 616 I40E_PTT_UNUSED_ENTRY(32), 617 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 618 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 619 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 620 621 /* IPv4 --> IPv6 */ 622 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 623 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 624 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 625 I40E_PTT_UNUSED_ENTRY(39), 626 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 627 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 628 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 629 630 /* IPv4 --> GRE/NAT */ 631 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 632 633 /* IPv4 --> GRE/NAT --> IPv4 */ 634 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 635 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 636 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 637 I40E_PTT_UNUSED_ENTRY(47), 638 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 639 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 640 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 641 642 /* IPv4 --> GRE/NAT --> IPv6 */ 643 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 644 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 645 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 646 I40E_PTT_UNUSED_ENTRY(54), 647 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 648 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 649 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 650 651 /* IPv4 --> GRE/NAT --> MAC */ 652 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 653 654 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 655 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 656 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 657 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 658 I40E_PTT_UNUSED_ENTRY(62), 659 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 660 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 661 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 662 663 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 664 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 665 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 666 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 667 I40E_PTT_UNUSED_ENTRY(69), 668 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 669 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 670 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 671 672 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 673 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 674 675 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 676 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 677 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 678 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 679 I40E_PTT_UNUSED_ENTRY(77), 680 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 681 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 682 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 683 684 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 685 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 686 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 687 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 688 I40E_PTT_UNUSED_ENTRY(84), 689 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 690 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 691 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 692 693 /* Non Tunneled IPv6 */ 694 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 695 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 696 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 697 I40E_PTT_UNUSED_ENTRY(91), 698 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 699 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 700 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 701 702 /* IPv6 --> IPv4 */ 703 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 704 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 705 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 706 I40E_PTT_UNUSED_ENTRY(98), 707 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 708 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 709 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 710 711 /* IPv6 --> IPv6 */ 712 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 713 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 714 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 715 I40E_PTT_UNUSED_ENTRY(105), 716 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 717 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 718 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 719 720 /* IPv6 --> GRE/NAT */ 721 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 722 723 /* IPv6 --> GRE/NAT -> IPv4 */ 724 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 725 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 726 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 727 I40E_PTT_UNUSED_ENTRY(113), 728 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 729 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 730 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 731 732 /* IPv6 --> GRE/NAT -> IPv6 */ 733 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 734 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 735 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 736 I40E_PTT_UNUSED_ENTRY(120), 737 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 738 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 739 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 740 741 /* IPv6 --> GRE/NAT -> MAC */ 742 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 743 744 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 745 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 746 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 747 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 748 I40E_PTT_UNUSED_ENTRY(128), 749 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 750 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 751 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 752 753 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 754 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 755 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 756 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 757 I40E_PTT_UNUSED_ENTRY(135), 758 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 759 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 760 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 761 762 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 763 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 764 765 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 766 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 767 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 768 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 769 I40E_PTT_UNUSED_ENTRY(143), 770 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 771 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 772 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 773 774 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 775 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 776 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 777 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 778 I40E_PTT_UNUSED_ENTRY(150), 779 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 780 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 781 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 782 783 /* unused entries */ 784 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 785 }; 786 787 /** 788 * i40e_init_shared_code - Initialize the shared code 789 * @hw: pointer to hardware structure 790 * 791 * This assigns the MAC type and PHY code and inits the NVM. 792 * Does not touch the hardware. This function must be called prior to any 793 * other function in the shared code. The i40e_hw structure should be 794 * memset to 0 prior to calling this function. The following fields in 795 * hw structure should be filled in prior to calling this function: 796 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 797 * subsystem_vendor_id, and revision_id 798 **/ 799 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 800 { 801 i40e_status status = 0; 802 u32 port, ari, func_rid; 803 804 i40e_set_mac_type(hw); 805 806 switch (hw->mac.type) { 807 case I40E_MAC_XL710: 808 case I40E_MAC_X722: 809 break; 810 default: 811 return I40E_ERR_DEVICE_NOT_SUPPORTED; 812 } 813 814 hw->phy.get_link_info = true; 815 816 /* Determine port number and PF number*/ 817 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 818 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 819 hw->port = (u8)port; 820 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 821 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 822 func_rid = rd32(hw, I40E_PF_FUNC_RID); 823 if (ari) 824 hw->pf_id = (u8)(func_rid & 0xff); 825 else 826 hw->pf_id = (u8)(func_rid & 0x7); 827 828 status = i40e_init_nvm(hw); 829 return status; 830 } 831 832 /** 833 * i40e_aq_mac_address_read - Retrieve the MAC addresses 834 * @hw: pointer to the hw struct 835 * @flags: a return indicator of what addresses were added to the addr store 836 * @addrs: the requestor's mac addr store 837 * @cmd_details: pointer to command details structure or NULL 838 **/ 839 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 840 u16 *flags, 841 struct i40e_aqc_mac_address_read_data *addrs, 842 struct i40e_asq_cmd_details *cmd_details) 843 { 844 struct i40e_aq_desc desc; 845 struct i40e_aqc_mac_address_read *cmd_data = 846 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 847 i40e_status status; 848 849 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 850 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 851 852 status = i40e_asq_send_command(hw, &desc, addrs, 853 sizeof(*addrs), cmd_details); 854 *flags = le16_to_cpu(cmd_data->command_flags); 855 856 return status; 857 } 858 859 /** 860 * i40e_aq_mac_address_write - Change the MAC addresses 861 * @hw: pointer to the hw struct 862 * @flags: indicates which MAC to be written 863 * @mac_addr: address to write 864 * @cmd_details: pointer to command details structure or NULL 865 **/ 866 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 867 u16 flags, u8 *mac_addr, 868 struct i40e_asq_cmd_details *cmd_details) 869 { 870 struct i40e_aq_desc desc; 871 struct i40e_aqc_mac_address_write *cmd_data = 872 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 873 i40e_status status; 874 875 i40e_fill_default_direct_cmd_desc(&desc, 876 i40e_aqc_opc_mac_address_write); 877 cmd_data->command_flags = cpu_to_le16(flags); 878 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 879 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 880 ((u32)mac_addr[3] << 16) | 881 ((u32)mac_addr[4] << 8) | 882 mac_addr[5]); 883 884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 885 886 return status; 887 } 888 889 /** 890 * i40e_get_mac_addr - get MAC address 891 * @hw: pointer to the HW structure 892 * @mac_addr: pointer to MAC address 893 * 894 * Reads the adapter's MAC address from register 895 **/ 896 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 897 { 898 struct i40e_aqc_mac_address_read_data addrs; 899 i40e_status status; 900 u16 flags = 0; 901 902 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 903 904 if (flags & I40E_AQC_LAN_ADDR_VALID) 905 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 906 907 return status; 908 } 909 910 /** 911 * i40e_get_port_mac_addr - get Port MAC address 912 * @hw: pointer to the HW structure 913 * @mac_addr: pointer to Port MAC address 914 * 915 * Reads the adapter's Port MAC address 916 **/ 917 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 918 { 919 struct i40e_aqc_mac_address_read_data addrs; 920 i40e_status status; 921 u16 flags = 0; 922 923 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 924 if (status) 925 return status; 926 927 if (flags & I40E_AQC_PORT_ADDR_VALID) 928 ether_addr_copy(mac_addr, addrs.port_mac); 929 else 930 status = I40E_ERR_INVALID_MAC_ADDR; 931 932 return status; 933 } 934 935 /** 936 * i40e_pre_tx_queue_cfg - pre tx queue configure 937 * @hw: pointer to the HW structure 938 * @queue: target PF queue index 939 * @enable: state change request 940 * 941 * Handles hw requirement to indicate intention to enable 942 * or disable target queue. 943 **/ 944 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 945 { 946 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 947 u32 reg_block = 0; 948 u32 reg_val; 949 950 if (abs_queue_idx >= 128) { 951 reg_block = abs_queue_idx / 128; 952 abs_queue_idx %= 128; 953 } 954 955 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 956 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 957 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 958 959 if (enable) 960 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 961 else 962 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 963 964 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 965 } 966 967 /** 968 * i40e_read_pba_string - Reads part number string from EEPROM 969 * @hw: pointer to hardware structure 970 * @pba_num: stores the part number string from the EEPROM 971 * @pba_num_size: part number string buffer length 972 * 973 * Reads the part number string from the EEPROM. 974 **/ 975 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 976 u32 pba_num_size) 977 { 978 i40e_status status = 0; 979 u16 pba_word = 0; 980 u16 pba_size = 0; 981 u16 pba_ptr = 0; 982 u16 i = 0; 983 984 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 985 if (status || (pba_word != 0xFAFA)) { 986 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 987 return status; 988 } 989 990 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 991 if (status) { 992 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 993 return status; 994 } 995 996 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 997 if (status) { 998 hw_dbg(hw, "Failed to read PBA Block size.\n"); 999 return status; 1000 } 1001 1002 /* Subtract one to get PBA word count (PBA Size word is included in 1003 * total size) 1004 */ 1005 pba_size--; 1006 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1007 hw_dbg(hw, "Buffer too small for PBA data.\n"); 1008 return I40E_ERR_PARAM; 1009 } 1010 1011 for (i = 0; i < pba_size; i++) { 1012 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1013 if (status) { 1014 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1015 return status; 1016 } 1017 1018 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1019 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1020 } 1021 pba_num[(pba_size * 2)] = '\0'; 1022 1023 return status; 1024 } 1025 1026 /** 1027 * i40e_get_media_type - Gets media type 1028 * @hw: pointer to the hardware structure 1029 **/ 1030 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1031 { 1032 enum i40e_media_type media; 1033 1034 switch (hw->phy.link_info.phy_type) { 1035 case I40E_PHY_TYPE_10GBASE_SR: 1036 case I40E_PHY_TYPE_10GBASE_LR: 1037 case I40E_PHY_TYPE_1000BASE_SX: 1038 case I40E_PHY_TYPE_1000BASE_LX: 1039 case I40E_PHY_TYPE_40GBASE_SR4: 1040 case I40E_PHY_TYPE_40GBASE_LR4: 1041 case I40E_PHY_TYPE_25GBASE_LR: 1042 case I40E_PHY_TYPE_25GBASE_SR: 1043 media = I40E_MEDIA_TYPE_FIBER; 1044 break; 1045 case I40E_PHY_TYPE_100BASE_TX: 1046 case I40E_PHY_TYPE_1000BASE_T: 1047 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 1048 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 1049 case I40E_PHY_TYPE_10GBASE_T: 1050 media = I40E_MEDIA_TYPE_BASET; 1051 break; 1052 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1053 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1054 case I40E_PHY_TYPE_10GBASE_CR1: 1055 case I40E_PHY_TYPE_40GBASE_CR4: 1056 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1057 case I40E_PHY_TYPE_40GBASE_AOC: 1058 case I40E_PHY_TYPE_10GBASE_AOC: 1059 case I40E_PHY_TYPE_25GBASE_CR: 1060 case I40E_PHY_TYPE_25GBASE_AOC: 1061 case I40E_PHY_TYPE_25GBASE_ACC: 1062 media = I40E_MEDIA_TYPE_DA; 1063 break; 1064 case I40E_PHY_TYPE_1000BASE_KX: 1065 case I40E_PHY_TYPE_10GBASE_KX4: 1066 case I40E_PHY_TYPE_10GBASE_KR: 1067 case I40E_PHY_TYPE_40GBASE_KR4: 1068 case I40E_PHY_TYPE_20GBASE_KR2: 1069 case I40E_PHY_TYPE_25GBASE_KR: 1070 media = I40E_MEDIA_TYPE_BACKPLANE; 1071 break; 1072 case I40E_PHY_TYPE_SGMII: 1073 case I40E_PHY_TYPE_XAUI: 1074 case I40E_PHY_TYPE_XFI: 1075 case I40E_PHY_TYPE_XLAUI: 1076 case I40E_PHY_TYPE_XLPPI: 1077 default: 1078 media = I40E_MEDIA_TYPE_UNKNOWN; 1079 break; 1080 } 1081 1082 return media; 1083 } 1084 1085 /** 1086 * i40e_poll_globr - Poll for Global Reset completion 1087 * @hw: pointer to the hardware structure 1088 * @retry_limit: how many times to retry before failure 1089 **/ 1090 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1091 u32 retry_limit) 1092 { 1093 u32 cnt, reg = 0; 1094 1095 for (cnt = 0; cnt < retry_limit; cnt++) { 1096 reg = rd32(hw, I40E_GLGEN_RSTAT); 1097 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1098 return 0; 1099 msleep(100); 1100 } 1101 1102 hw_dbg(hw, "Global reset failed.\n"); 1103 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1104 1105 return I40E_ERR_RESET_FAILED; 1106 } 1107 1108 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1109 #define I40E_PF_RESET_WAIT_COUNT 200 1110 /** 1111 * i40e_pf_reset - Reset the PF 1112 * @hw: pointer to the hardware structure 1113 * 1114 * Assuming someone else has triggered a global reset, 1115 * assure the global reset is complete and then reset the PF 1116 **/ 1117 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1118 { 1119 u32 cnt = 0; 1120 u32 cnt1 = 0; 1121 u32 reg = 0; 1122 u32 grst_del; 1123 1124 /* Poll for Global Reset steady state in case of recent GRST. 1125 * The grst delay value is in 100ms units, and we'll wait a 1126 * couple counts longer to be sure we don't just miss the end. 1127 */ 1128 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1129 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1130 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1131 1132 /* It can take upto 15 secs for GRST steady state. 1133 * Bump it to 16 secs max to be safe. 1134 */ 1135 grst_del = grst_del * 20; 1136 1137 for (cnt = 0; cnt < grst_del; cnt++) { 1138 reg = rd32(hw, I40E_GLGEN_RSTAT); 1139 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1140 break; 1141 msleep(100); 1142 } 1143 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1144 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1145 return I40E_ERR_RESET_FAILED; 1146 } 1147 1148 /* Now Wait for the FW to be ready */ 1149 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1150 reg = rd32(hw, I40E_GLNVM_ULD); 1151 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1152 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1153 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1154 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1155 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1156 break; 1157 } 1158 usleep_range(10000, 20000); 1159 } 1160 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1161 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1162 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1163 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1164 return I40E_ERR_RESET_FAILED; 1165 } 1166 1167 /* If there was a Global Reset in progress when we got here, 1168 * we don't need to do the PF Reset 1169 */ 1170 if (!cnt) { 1171 u32 reg2 = 0; 1172 if (hw->revision_id == 0) 1173 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1174 else 1175 cnt = I40E_PF_RESET_WAIT_COUNT; 1176 reg = rd32(hw, I40E_PFGEN_CTRL); 1177 wr32(hw, I40E_PFGEN_CTRL, 1178 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1179 for (; cnt; cnt--) { 1180 reg = rd32(hw, I40E_PFGEN_CTRL); 1181 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1182 break; 1183 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1184 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1185 break; 1186 usleep_range(1000, 2000); 1187 } 1188 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1189 if (i40e_poll_globr(hw, grst_del)) 1190 return I40E_ERR_RESET_FAILED; 1191 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1192 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1193 return I40E_ERR_RESET_FAILED; 1194 } 1195 } 1196 1197 i40e_clear_pxe_mode(hw); 1198 1199 return 0; 1200 } 1201 1202 /** 1203 * i40e_clear_hw - clear out any left over hw state 1204 * @hw: pointer to the hw struct 1205 * 1206 * Clear queues and interrupts, typically called at init time, 1207 * but after the capabilities have been found so we know how many 1208 * queues and msix vectors have been allocated. 1209 **/ 1210 void i40e_clear_hw(struct i40e_hw *hw) 1211 { 1212 u32 num_queues, base_queue; 1213 u32 num_pf_int; 1214 u32 num_vf_int; 1215 u32 num_vfs; 1216 u32 i, j; 1217 u32 val; 1218 u32 eol = 0x7ff; 1219 1220 /* get number of interrupts, queues, and VFs */ 1221 val = rd32(hw, I40E_GLPCI_CNF2); 1222 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1223 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1224 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1225 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1226 1227 val = rd32(hw, I40E_PFLAN_QALLOC); 1228 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1229 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1230 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1231 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1232 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1233 num_queues = (j - base_queue) + 1; 1234 else 1235 num_queues = 0; 1236 1237 val = rd32(hw, I40E_PF_VT_PFALLOC); 1238 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1239 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1240 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1241 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1242 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1243 num_vfs = (j - i) + 1; 1244 else 1245 num_vfs = 0; 1246 1247 /* stop all the interrupts */ 1248 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1249 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1250 for (i = 0; i < num_pf_int - 2; i++) 1251 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1252 1253 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1254 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1255 wr32(hw, I40E_PFINT_LNKLST0, val); 1256 for (i = 0; i < num_pf_int - 2; i++) 1257 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1258 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1259 for (i = 0; i < num_vfs; i++) 1260 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1261 for (i = 0; i < num_vf_int - 2; i++) 1262 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1263 1264 /* warn the HW of the coming Tx disables */ 1265 for (i = 0; i < num_queues; i++) { 1266 u32 abs_queue_idx = base_queue + i; 1267 u32 reg_block = 0; 1268 1269 if (abs_queue_idx >= 128) { 1270 reg_block = abs_queue_idx / 128; 1271 abs_queue_idx %= 128; 1272 } 1273 1274 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1275 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1276 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1277 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1278 1279 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1280 } 1281 udelay(400); 1282 1283 /* stop all the queues */ 1284 for (i = 0; i < num_queues; i++) { 1285 wr32(hw, I40E_QINT_TQCTL(i), 0); 1286 wr32(hw, I40E_QTX_ENA(i), 0); 1287 wr32(hw, I40E_QINT_RQCTL(i), 0); 1288 wr32(hw, I40E_QRX_ENA(i), 0); 1289 } 1290 1291 /* short wait for all queue disables to settle */ 1292 udelay(50); 1293 } 1294 1295 /** 1296 * i40e_clear_pxe_mode - clear pxe operations mode 1297 * @hw: pointer to the hw struct 1298 * 1299 * Make sure all PXE mode settings are cleared, including things 1300 * like descriptor fetch/write-back mode. 1301 **/ 1302 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1303 { 1304 u32 reg; 1305 1306 if (i40e_check_asq_alive(hw)) 1307 i40e_aq_clear_pxe_mode(hw, NULL); 1308 1309 /* Clear single descriptor fetch/write-back mode */ 1310 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1311 1312 if (hw->revision_id == 0) { 1313 /* As a work around clear PXE_MODE instead of setting it */ 1314 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1315 } else { 1316 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1317 } 1318 } 1319 1320 /** 1321 * i40e_led_is_mine - helper to find matching led 1322 * @hw: pointer to the hw struct 1323 * @idx: index into GPIO registers 1324 * 1325 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1326 */ 1327 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1328 { 1329 u32 gpio_val = 0; 1330 u32 port; 1331 1332 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1333 !hw->func_caps.led[idx]) 1334 return 0; 1335 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1336 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1337 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1338 1339 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1340 * if it is not our port then ignore 1341 */ 1342 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1343 (port != hw->port)) 1344 return 0; 1345 1346 return gpio_val; 1347 } 1348 1349 #define I40E_FW_LED BIT(4) 1350 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1351 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1352 1353 #define I40E_LED0 22 1354 1355 #define I40E_PIN_FUNC_SDP 0x0 1356 #define I40E_PIN_FUNC_LED 0x1 1357 1358 /** 1359 * i40e_led_get - return current on/off mode 1360 * @hw: pointer to the hw struct 1361 * 1362 * The value returned is the 'mode' field as defined in the 1363 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1364 * values are variations of possible behaviors relating to 1365 * blink, link, and wire. 1366 **/ 1367 u32 i40e_led_get(struct i40e_hw *hw) 1368 { 1369 u32 mode = 0; 1370 int i; 1371 1372 /* as per the documentation GPIO 22-29 are the LED 1373 * GPIO pins named LED0..LED7 1374 */ 1375 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1376 u32 gpio_val = i40e_led_is_mine(hw, i); 1377 1378 if (!gpio_val) 1379 continue; 1380 1381 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1382 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1383 break; 1384 } 1385 1386 return mode; 1387 } 1388 1389 /** 1390 * i40e_led_set - set new on/off mode 1391 * @hw: pointer to the hw struct 1392 * @mode: 0=off, 0xf=on (else see manual for mode details) 1393 * @blink: true if the LED should blink when on, false if steady 1394 * 1395 * if this function is used to turn on the blink it should 1396 * be used to disable the blink when restoring the original state. 1397 **/ 1398 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1399 { 1400 int i; 1401 1402 if (mode & ~I40E_LED_MODE_VALID) { 1403 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1404 return; 1405 } 1406 1407 /* as per the documentation GPIO 22-29 are the LED 1408 * GPIO pins named LED0..LED7 1409 */ 1410 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1411 u32 gpio_val = i40e_led_is_mine(hw, i); 1412 1413 if (!gpio_val) 1414 continue; 1415 1416 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1417 u32 pin_func = 0; 1418 1419 if (mode & I40E_FW_LED) 1420 pin_func = I40E_PIN_FUNC_SDP; 1421 else 1422 pin_func = I40E_PIN_FUNC_LED; 1423 1424 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1425 gpio_val |= ((pin_func << 1426 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1427 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1428 } 1429 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1430 /* this & is a bit of paranoia, but serves as a range check */ 1431 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1432 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1433 1434 if (blink) 1435 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1436 else 1437 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1438 1439 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1440 break; 1441 } 1442 } 1443 1444 /* Admin command wrappers */ 1445 1446 /** 1447 * i40e_aq_get_phy_capabilities 1448 * @hw: pointer to the hw struct 1449 * @abilities: structure for PHY capabilities to be filled 1450 * @qualified_modules: report Qualified Modules 1451 * @report_init: report init capabilities (active are default) 1452 * @cmd_details: pointer to command details structure or NULL 1453 * 1454 * Returns the various PHY abilities supported on the Port. 1455 **/ 1456 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1457 bool qualified_modules, bool report_init, 1458 struct i40e_aq_get_phy_abilities_resp *abilities, 1459 struct i40e_asq_cmd_details *cmd_details) 1460 { 1461 struct i40e_aq_desc desc; 1462 i40e_status status; 1463 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1464 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1465 1466 if (!abilities) 1467 return I40E_ERR_PARAM; 1468 1469 do { 1470 i40e_fill_default_direct_cmd_desc(&desc, 1471 i40e_aqc_opc_get_phy_abilities); 1472 1473 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1474 if (abilities_size > I40E_AQ_LARGE_BUF) 1475 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1476 1477 if (qualified_modules) 1478 desc.params.external.param0 |= 1479 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1480 1481 if (report_init) 1482 desc.params.external.param0 |= 1483 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1484 1485 status = i40e_asq_send_command(hw, &desc, abilities, 1486 abilities_size, cmd_details); 1487 1488 switch (hw->aq.asq_last_status) { 1489 case I40E_AQ_RC_EIO: 1490 status = I40E_ERR_UNKNOWN_PHY; 1491 break; 1492 case I40E_AQ_RC_EAGAIN: 1493 usleep_range(1000, 2000); 1494 total_delay++; 1495 status = I40E_ERR_TIMEOUT; 1496 break; 1497 /* also covers I40E_AQ_RC_OK */ 1498 default: 1499 break; 1500 } 1501 1502 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1503 (total_delay < max_delay)); 1504 1505 if (status) 1506 return status; 1507 1508 if (report_init) { 1509 if (hw->mac.type == I40E_MAC_XL710 && 1510 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1511 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1512 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1513 } else { 1514 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1515 hw->phy.phy_types |= 1516 ((u64)abilities->phy_type_ext << 32); 1517 } 1518 } 1519 1520 return status; 1521 } 1522 1523 /** 1524 * i40e_aq_set_phy_config 1525 * @hw: pointer to the hw struct 1526 * @config: structure with PHY configuration to be set 1527 * @cmd_details: pointer to command details structure or NULL 1528 * 1529 * Set the various PHY configuration parameters 1530 * supported on the Port.One or more of the Set PHY config parameters may be 1531 * ignored in an MFP mode as the PF may not have the privilege to set some 1532 * of the PHY Config parameters. This status will be indicated by the 1533 * command response. 1534 **/ 1535 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1536 struct i40e_aq_set_phy_config *config, 1537 struct i40e_asq_cmd_details *cmd_details) 1538 { 1539 struct i40e_aq_desc desc; 1540 struct i40e_aq_set_phy_config *cmd = 1541 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1542 enum i40e_status_code status; 1543 1544 if (!config) 1545 return I40E_ERR_PARAM; 1546 1547 i40e_fill_default_direct_cmd_desc(&desc, 1548 i40e_aqc_opc_set_phy_config); 1549 1550 *cmd = *config; 1551 1552 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1553 1554 return status; 1555 } 1556 1557 static noinline_for_stack enum i40e_status_code 1558 i40e_set_fc_status(struct i40e_hw *hw, 1559 struct i40e_aq_get_phy_abilities_resp *abilities, 1560 bool atomic_restart) 1561 { 1562 struct i40e_aq_set_phy_config config; 1563 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1564 u8 pause_mask = 0x0; 1565 1566 switch (fc_mode) { 1567 case I40E_FC_FULL: 1568 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1569 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1570 break; 1571 case I40E_FC_RX_PAUSE: 1572 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1573 break; 1574 case I40E_FC_TX_PAUSE: 1575 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1576 break; 1577 default: 1578 break; 1579 } 1580 1581 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1582 /* clear the old pause settings */ 1583 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1584 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1585 /* set the new abilities */ 1586 config.abilities |= pause_mask; 1587 /* If the abilities have changed, then set the new config */ 1588 if (config.abilities == abilities->abilities) 1589 return 0; 1590 1591 /* Auto restart link so settings take effect */ 1592 if (atomic_restart) 1593 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1594 /* Copy over all the old settings */ 1595 config.phy_type = abilities->phy_type; 1596 config.phy_type_ext = abilities->phy_type_ext; 1597 config.link_speed = abilities->link_speed; 1598 config.eee_capability = abilities->eee_capability; 1599 config.eeer = abilities->eeer_val; 1600 config.low_power_ctrl = abilities->d3_lpan; 1601 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1602 I40E_AQ_PHY_FEC_CONFIG_MASK; 1603 1604 return i40e_aq_set_phy_config(hw, &config, NULL); 1605 } 1606 1607 /** 1608 * i40e_set_fc 1609 * @hw: pointer to the hw struct 1610 * @aq_failures: buffer to return AdminQ failure information 1611 * @atomic_restart: whether to enable atomic link restart 1612 * 1613 * Set the requested flow control mode using set_phy_config. 1614 **/ 1615 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1616 bool atomic_restart) 1617 { 1618 struct i40e_aq_get_phy_abilities_resp abilities; 1619 enum i40e_status_code status; 1620 1621 *aq_failures = 0x0; 1622 1623 /* Get the current phy config */ 1624 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1625 NULL); 1626 if (status) { 1627 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1628 return status; 1629 } 1630 1631 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1632 if (status) 1633 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1634 1635 /* Update the link info */ 1636 status = i40e_update_link_info(hw); 1637 if (status) { 1638 /* Wait a little bit (on 40G cards it sometimes takes a really 1639 * long time for link to come back from the atomic reset) 1640 * and try once more 1641 */ 1642 msleep(1000); 1643 status = i40e_update_link_info(hw); 1644 } 1645 if (status) 1646 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1647 1648 return status; 1649 } 1650 1651 /** 1652 * i40e_aq_clear_pxe_mode 1653 * @hw: pointer to the hw struct 1654 * @cmd_details: pointer to command details structure or NULL 1655 * 1656 * Tell the firmware that the driver is taking over from PXE 1657 **/ 1658 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1659 struct i40e_asq_cmd_details *cmd_details) 1660 { 1661 i40e_status status; 1662 struct i40e_aq_desc desc; 1663 struct i40e_aqc_clear_pxe *cmd = 1664 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1665 1666 i40e_fill_default_direct_cmd_desc(&desc, 1667 i40e_aqc_opc_clear_pxe_mode); 1668 1669 cmd->rx_cnt = 0x2; 1670 1671 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1672 1673 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1674 1675 return status; 1676 } 1677 1678 /** 1679 * i40e_aq_set_link_restart_an 1680 * @hw: pointer to the hw struct 1681 * @enable_link: if true: enable link, if false: disable link 1682 * @cmd_details: pointer to command details structure or NULL 1683 * 1684 * Sets up the link and restarts the Auto-Negotiation over the link. 1685 **/ 1686 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1687 bool enable_link, 1688 struct i40e_asq_cmd_details *cmd_details) 1689 { 1690 struct i40e_aq_desc desc; 1691 struct i40e_aqc_set_link_restart_an *cmd = 1692 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1693 i40e_status status; 1694 1695 i40e_fill_default_direct_cmd_desc(&desc, 1696 i40e_aqc_opc_set_link_restart_an); 1697 1698 cmd->command = I40E_AQ_PHY_RESTART_AN; 1699 if (enable_link) 1700 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1701 else 1702 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1703 1704 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1705 1706 return status; 1707 } 1708 1709 /** 1710 * i40e_aq_get_link_info 1711 * @hw: pointer to the hw struct 1712 * @enable_lse: enable/disable LinkStatusEvent reporting 1713 * @link: pointer to link status structure - optional 1714 * @cmd_details: pointer to command details structure or NULL 1715 * 1716 * Returns the link status of the adapter. 1717 **/ 1718 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1719 bool enable_lse, struct i40e_link_status *link, 1720 struct i40e_asq_cmd_details *cmd_details) 1721 { 1722 struct i40e_aq_desc desc; 1723 struct i40e_aqc_get_link_status *resp = 1724 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1725 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1726 i40e_status status; 1727 bool tx_pause, rx_pause; 1728 u16 command_flags; 1729 1730 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1731 1732 if (enable_lse) 1733 command_flags = I40E_AQ_LSE_ENABLE; 1734 else 1735 command_flags = I40E_AQ_LSE_DISABLE; 1736 resp->command_flags = cpu_to_le16(command_flags); 1737 1738 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1739 1740 if (status) 1741 goto aq_get_link_info_exit; 1742 1743 /* save off old link status information */ 1744 hw->phy.link_info_old = *hw_link_info; 1745 1746 /* update link status */ 1747 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1748 hw->phy.media_type = i40e_get_media_type(hw); 1749 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1750 hw_link_info->link_info = resp->link_info; 1751 hw_link_info->an_info = resp->an_info; 1752 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1753 I40E_AQ_CONFIG_FEC_RS_ENA); 1754 hw_link_info->ext_info = resp->ext_info; 1755 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1756 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1757 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1758 1759 /* update fc info */ 1760 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1761 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1762 if (tx_pause & rx_pause) 1763 hw->fc.current_mode = I40E_FC_FULL; 1764 else if (tx_pause) 1765 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1766 else if (rx_pause) 1767 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1768 else 1769 hw->fc.current_mode = I40E_FC_NONE; 1770 1771 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1772 hw_link_info->crc_enable = true; 1773 else 1774 hw_link_info->crc_enable = false; 1775 1776 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1777 hw_link_info->lse_enable = true; 1778 else 1779 hw_link_info->lse_enable = false; 1780 1781 if ((hw->mac.type == I40E_MAC_XL710) && 1782 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1783 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1784 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1785 1786 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1787 hw->mac.type != I40E_MAC_X722) { 1788 __le32 tmp; 1789 1790 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1791 hw->phy.phy_types = le32_to_cpu(tmp); 1792 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1793 } 1794 1795 /* save link status information */ 1796 if (link) 1797 *link = *hw_link_info; 1798 1799 /* flag cleared so helper functions don't call AQ again */ 1800 hw->phy.get_link_info = false; 1801 1802 aq_get_link_info_exit: 1803 return status; 1804 } 1805 1806 /** 1807 * i40e_aq_set_phy_int_mask 1808 * @hw: pointer to the hw struct 1809 * @mask: interrupt mask to be set 1810 * @cmd_details: pointer to command details structure or NULL 1811 * 1812 * Set link interrupt mask. 1813 **/ 1814 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1815 u16 mask, 1816 struct i40e_asq_cmd_details *cmd_details) 1817 { 1818 struct i40e_aq_desc desc; 1819 struct i40e_aqc_set_phy_int_mask *cmd = 1820 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1821 i40e_status status; 1822 1823 i40e_fill_default_direct_cmd_desc(&desc, 1824 i40e_aqc_opc_set_phy_int_mask); 1825 1826 cmd->event_mask = cpu_to_le16(mask); 1827 1828 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1829 1830 return status; 1831 } 1832 1833 /** 1834 * i40e_aq_set_phy_debug 1835 * @hw: pointer to the hw struct 1836 * @cmd_flags: debug command flags 1837 * @cmd_details: pointer to command details structure or NULL 1838 * 1839 * Reset the external PHY. 1840 **/ 1841 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1842 struct i40e_asq_cmd_details *cmd_details) 1843 { 1844 struct i40e_aq_desc desc; 1845 struct i40e_aqc_set_phy_debug *cmd = 1846 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1847 i40e_status status; 1848 1849 i40e_fill_default_direct_cmd_desc(&desc, 1850 i40e_aqc_opc_set_phy_debug); 1851 1852 cmd->command_flags = cmd_flags; 1853 1854 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1855 1856 return status; 1857 } 1858 1859 /** 1860 * i40e_is_aq_api_ver_ge 1861 * @aq: pointer to AdminQ info containing HW API version to compare 1862 * @maj: API major value 1863 * @min: API minor value 1864 * 1865 * Assert whether current HW API version is greater/equal than provided. 1866 **/ 1867 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, 1868 u16 min) 1869 { 1870 return (aq->api_maj_ver > maj || 1871 (aq->api_maj_ver == maj && aq->api_min_ver >= min)); 1872 } 1873 1874 /** 1875 * i40e_aq_add_vsi 1876 * @hw: pointer to the hw struct 1877 * @vsi_ctx: pointer to a vsi context struct 1878 * @cmd_details: pointer to command details structure or NULL 1879 * 1880 * Add a VSI context to the hardware. 1881 **/ 1882 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1883 struct i40e_vsi_context *vsi_ctx, 1884 struct i40e_asq_cmd_details *cmd_details) 1885 { 1886 struct i40e_aq_desc desc; 1887 struct i40e_aqc_add_get_update_vsi *cmd = 1888 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1889 struct i40e_aqc_add_get_update_vsi_completion *resp = 1890 (struct i40e_aqc_add_get_update_vsi_completion *) 1891 &desc.params.raw; 1892 i40e_status status; 1893 1894 i40e_fill_default_direct_cmd_desc(&desc, 1895 i40e_aqc_opc_add_vsi); 1896 1897 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1898 cmd->connection_type = vsi_ctx->connection_type; 1899 cmd->vf_id = vsi_ctx->vf_num; 1900 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1901 1902 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1903 1904 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1905 sizeof(vsi_ctx->info), 1906 cmd_details, true); 1907 1908 if (status) 1909 goto aq_add_vsi_exit; 1910 1911 vsi_ctx->seid = le16_to_cpu(resp->seid); 1912 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1913 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1914 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1915 1916 aq_add_vsi_exit: 1917 return status; 1918 } 1919 1920 /** 1921 * i40e_aq_set_default_vsi 1922 * @hw: pointer to the hw struct 1923 * @seid: vsi number 1924 * @cmd_details: pointer to command details structure or NULL 1925 **/ 1926 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 1927 u16 seid, 1928 struct i40e_asq_cmd_details *cmd_details) 1929 { 1930 struct i40e_aq_desc desc; 1931 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1932 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1933 &desc.params.raw; 1934 i40e_status status; 1935 1936 i40e_fill_default_direct_cmd_desc(&desc, 1937 i40e_aqc_opc_set_vsi_promiscuous_modes); 1938 1939 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1940 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1941 cmd->seid = cpu_to_le16(seid); 1942 1943 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1944 1945 return status; 1946 } 1947 1948 /** 1949 * i40e_aq_clear_default_vsi 1950 * @hw: pointer to the hw struct 1951 * @seid: vsi number 1952 * @cmd_details: pointer to command details structure or NULL 1953 **/ 1954 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1955 u16 seid, 1956 struct i40e_asq_cmd_details *cmd_details) 1957 { 1958 struct i40e_aq_desc desc; 1959 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1960 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1961 &desc.params.raw; 1962 i40e_status status; 1963 1964 i40e_fill_default_direct_cmd_desc(&desc, 1965 i40e_aqc_opc_set_vsi_promiscuous_modes); 1966 1967 cmd->promiscuous_flags = cpu_to_le16(0); 1968 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1969 cmd->seid = cpu_to_le16(seid); 1970 1971 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1972 1973 return status; 1974 } 1975 1976 /** 1977 * i40e_aq_set_vsi_unicast_promiscuous 1978 * @hw: pointer to the hw struct 1979 * @seid: vsi number 1980 * @set: set unicast promiscuous enable/disable 1981 * @cmd_details: pointer to command details structure or NULL 1982 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 1983 **/ 1984 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 1985 u16 seid, bool set, 1986 struct i40e_asq_cmd_details *cmd_details, 1987 bool rx_only_promisc) 1988 { 1989 struct i40e_aq_desc desc; 1990 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1991 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1992 i40e_status status; 1993 u16 flags = 0; 1994 1995 i40e_fill_default_direct_cmd_desc(&desc, 1996 i40e_aqc_opc_set_vsi_promiscuous_modes); 1997 1998 if (set) { 1999 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2000 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2001 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2002 } 2003 2004 cmd->promiscuous_flags = cpu_to_le16(flags); 2005 2006 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2007 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2008 cmd->valid_flags |= 2009 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2010 2011 cmd->seid = cpu_to_le16(seid); 2012 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2013 2014 return status; 2015 } 2016 2017 /** 2018 * i40e_aq_set_vsi_multicast_promiscuous 2019 * @hw: pointer to the hw struct 2020 * @seid: vsi number 2021 * @set: set multicast promiscuous enable/disable 2022 * @cmd_details: pointer to command details structure or NULL 2023 **/ 2024 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2025 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2026 { 2027 struct i40e_aq_desc desc; 2028 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2029 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2030 i40e_status status; 2031 u16 flags = 0; 2032 2033 i40e_fill_default_direct_cmd_desc(&desc, 2034 i40e_aqc_opc_set_vsi_promiscuous_modes); 2035 2036 if (set) 2037 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2038 2039 cmd->promiscuous_flags = cpu_to_le16(flags); 2040 2041 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2042 2043 cmd->seid = cpu_to_le16(seid); 2044 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2045 2046 return status; 2047 } 2048 2049 /** 2050 * i40e_aq_set_vsi_mc_promisc_on_vlan 2051 * @hw: pointer to the hw struct 2052 * @seid: vsi number 2053 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2054 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2055 * @cmd_details: pointer to command details structure or NULL 2056 **/ 2057 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2058 u16 seid, bool enable, 2059 u16 vid, 2060 struct i40e_asq_cmd_details *cmd_details) 2061 { 2062 struct i40e_aq_desc desc; 2063 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2064 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2065 enum i40e_status_code status; 2066 u16 flags = 0; 2067 2068 i40e_fill_default_direct_cmd_desc(&desc, 2069 i40e_aqc_opc_set_vsi_promiscuous_modes); 2070 2071 if (enable) 2072 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2073 2074 cmd->promiscuous_flags = cpu_to_le16(flags); 2075 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2076 cmd->seid = cpu_to_le16(seid); 2077 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2078 2079 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2080 cmd_details, true); 2081 2082 return status; 2083 } 2084 2085 /** 2086 * i40e_aq_set_vsi_uc_promisc_on_vlan 2087 * @hw: pointer to the hw struct 2088 * @seid: vsi number 2089 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2090 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2091 * @cmd_details: pointer to command details structure or NULL 2092 **/ 2093 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2094 u16 seid, bool enable, 2095 u16 vid, 2096 struct i40e_asq_cmd_details *cmd_details) 2097 { 2098 struct i40e_aq_desc desc; 2099 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2100 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2101 enum i40e_status_code status; 2102 u16 flags = 0; 2103 2104 i40e_fill_default_direct_cmd_desc(&desc, 2105 i40e_aqc_opc_set_vsi_promiscuous_modes); 2106 2107 if (enable) { 2108 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2109 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2110 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2111 } 2112 2113 cmd->promiscuous_flags = cpu_to_le16(flags); 2114 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2115 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2116 cmd->valid_flags |= 2117 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2118 cmd->seid = cpu_to_le16(seid); 2119 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2120 2121 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2122 cmd_details, true); 2123 2124 return status; 2125 } 2126 2127 /** 2128 * i40e_aq_set_vsi_bc_promisc_on_vlan 2129 * @hw: pointer to the hw struct 2130 * @seid: vsi number 2131 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2132 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2133 * @cmd_details: pointer to command details structure or NULL 2134 **/ 2135 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2136 u16 seid, bool enable, u16 vid, 2137 struct i40e_asq_cmd_details *cmd_details) 2138 { 2139 struct i40e_aq_desc desc; 2140 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2141 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2142 i40e_status status; 2143 u16 flags = 0; 2144 2145 i40e_fill_default_direct_cmd_desc(&desc, 2146 i40e_aqc_opc_set_vsi_promiscuous_modes); 2147 2148 if (enable) 2149 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2150 2151 cmd->promiscuous_flags = cpu_to_le16(flags); 2152 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2153 cmd->seid = cpu_to_le16(seid); 2154 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2155 2156 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2157 2158 return status; 2159 } 2160 2161 /** 2162 * i40e_aq_set_vsi_broadcast 2163 * @hw: pointer to the hw struct 2164 * @seid: vsi number 2165 * @set_filter: true to set filter, false to clear filter 2166 * @cmd_details: pointer to command details structure or NULL 2167 * 2168 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2169 **/ 2170 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2171 u16 seid, bool set_filter, 2172 struct i40e_asq_cmd_details *cmd_details) 2173 { 2174 struct i40e_aq_desc desc; 2175 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2176 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2177 i40e_status status; 2178 2179 i40e_fill_default_direct_cmd_desc(&desc, 2180 i40e_aqc_opc_set_vsi_promiscuous_modes); 2181 2182 if (set_filter) 2183 cmd->promiscuous_flags 2184 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2185 else 2186 cmd->promiscuous_flags 2187 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2188 2189 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2190 cmd->seid = cpu_to_le16(seid); 2191 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2192 2193 return status; 2194 } 2195 2196 /** 2197 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2198 * @hw: pointer to the hw struct 2199 * @seid: vsi number 2200 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2201 * @cmd_details: pointer to command details structure or NULL 2202 **/ 2203 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2204 u16 seid, bool enable, 2205 struct i40e_asq_cmd_details *cmd_details) 2206 { 2207 struct i40e_aq_desc desc; 2208 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2209 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2210 i40e_status status; 2211 u16 flags = 0; 2212 2213 i40e_fill_default_direct_cmd_desc(&desc, 2214 i40e_aqc_opc_set_vsi_promiscuous_modes); 2215 if (enable) 2216 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2217 2218 cmd->promiscuous_flags = cpu_to_le16(flags); 2219 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2220 cmd->seid = cpu_to_le16(seid); 2221 2222 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2223 2224 return status; 2225 } 2226 2227 /** 2228 * i40e_aq_get_vsi_params - get VSI configuration info 2229 * @hw: pointer to the hw struct 2230 * @vsi_ctx: pointer to a vsi context struct 2231 * @cmd_details: pointer to command details structure or NULL 2232 **/ 2233 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2234 struct i40e_vsi_context *vsi_ctx, 2235 struct i40e_asq_cmd_details *cmd_details) 2236 { 2237 struct i40e_aq_desc desc; 2238 struct i40e_aqc_add_get_update_vsi *cmd = 2239 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2240 struct i40e_aqc_add_get_update_vsi_completion *resp = 2241 (struct i40e_aqc_add_get_update_vsi_completion *) 2242 &desc.params.raw; 2243 i40e_status status; 2244 2245 i40e_fill_default_direct_cmd_desc(&desc, 2246 i40e_aqc_opc_get_vsi_parameters); 2247 2248 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2249 2250 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2251 2252 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2253 sizeof(vsi_ctx->info), NULL); 2254 2255 if (status) 2256 goto aq_get_vsi_params_exit; 2257 2258 vsi_ctx->seid = le16_to_cpu(resp->seid); 2259 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2260 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2261 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2262 2263 aq_get_vsi_params_exit: 2264 return status; 2265 } 2266 2267 /** 2268 * i40e_aq_update_vsi_params 2269 * @hw: pointer to the hw struct 2270 * @vsi_ctx: pointer to a vsi context struct 2271 * @cmd_details: pointer to command details structure or NULL 2272 * 2273 * Update a VSI context. 2274 **/ 2275 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2276 struct i40e_vsi_context *vsi_ctx, 2277 struct i40e_asq_cmd_details *cmd_details) 2278 { 2279 struct i40e_aq_desc desc; 2280 struct i40e_aqc_add_get_update_vsi *cmd = 2281 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2282 struct i40e_aqc_add_get_update_vsi_completion *resp = 2283 (struct i40e_aqc_add_get_update_vsi_completion *) 2284 &desc.params.raw; 2285 i40e_status status; 2286 2287 i40e_fill_default_direct_cmd_desc(&desc, 2288 i40e_aqc_opc_update_vsi_parameters); 2289 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2290 2291 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2292 2293 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2294 sizeof(vsi_ctx->info), 2295 cmd_details, true); 2296 2297 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2298 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2299 2300 return status; 2301 } 2302 2303 /** 2304 * i40e_aq_get_switch_config 2305 * @hw: pointer to the hardware structure 2306 * @buf: pointer to the result buffer 2307 * @buf_size: length of input buffer 2308 * @start_seid: seid to start for the report, 0 == beginning 2309 * @cmd_details: pointer to command details structure or NULL 2310 * 2311 * Fill the buf with switch configuration returned from AdminQ command 2312 **/ 2313 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2314 struct i40e_aqc_get_switch_config_resp *buf, 2315 u16 buf_size, u16 *start_seid, 2316 struct i40e_asq_cmd_details *cmd_details) 2317 { 2318 struct i40e_aq_desc desc; 2319 struct i40e_aqc_switch_seid *scfg = 2320 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2321 i40e_status status; 2322 2323 i40e_fill_default_direct_cmd_desc(&desc, 2324 i40e_aqc_opc_get_switch_config); 2325 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2326 if (buf_size > I40E_AQ_LARGE_BUF) 2327 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2328 scfg->seid = cpu_to_le16(*start_seid); 2329 2330 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2331 *start_seid = le16_to_cpu(scfg->seid); 2332 2333 return status; 2334 } 2335 2336 /** 2337 * i40e_aq_set_switch_config 2338 * @hw: pointer to the hardware structure 2339 * @flags: bit flag values to set 2340 * @mode: cloud filter mode 2341 * @valid_flags: which bit flags to set 2342 * @mode: cloud filter mode 2343 * @cmd_details: pointer to command details structure or NULL 2344 * 2345 * Set switch configuration bits 2346 **/ 2347 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2348 u16 flags, 2349 u16 valid_flags, u8 mode, 2350 struct i40e_asq_cmd_details *cmd_details) 2351 { 2352 struct i40e_aq_desc desc; 2353 struct i40e_aqc_set_switch_config *scfg = 2354 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2355 enum i40e_status_code status; 2356 2357 i40e_fill_default_direct_cmd_desc(&desc, 2358 i40e_aqc_opc_set_switch_config); 2359 scfg->flags = cpu_to_le16(flags); 2360 scfg->valid_flags = cpu_to_le16(valid_flags); 2361 scfg->mode = mode; 2362 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2363 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2364 scfg->first_tag = cpu_to_le16(hw->first_tag); 2365 scfg->second_tag = cpu_to_le16(hw->second_tag); 2366 } 2367 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2368 2369 return status; 2370 } 2371 2372 /** 2373 * i40e_aq_get_firmware_version 2374 * @hw: pointer to the hw struct 2375 * @fw_major_version: firmware major version 2376 * @fw_minor_version: firmware minor version 2377 * @fw_build: firmware build number 2378 * @api_major_version: major queue version 2379 * @api_minor_version: minor queue version 2380 * @cmd_details: pointer to command details structure or NULL 2381 * 2382 * Get the firmware version from the admin queue commands 2383 **/ 2384 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2385 u16 *fw_major_version, u16 *fw_minor_version, 2386 u32 *fw_build, 2387 u16 *api_major_version, u16 *api_minor_version, 2388 struct i40e_asq_cmd_details *cmd_details) 2389 { 2390 struct i40e_aq_desc desc; 2391 struct i40e_aqc_get_version *resp = 2392 (struct i40e_aqc_get_version *)&desc.params.raw; 2393 i40e_status status; 2394 2395 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2396 2397 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2398 2399 if (!status) { 2400 if (fw_major_version) 2401 *fw_major_version = le16_to_cpu(resp->fw_major); 2402 if (fw_minor_version) 2403 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2404 if (fw_build) 2405 *fw_build = le32_to_cpu(resp->fw_build); 2406 if (api_major_version) 2407 *api_major_version = le16_to_cpu(resp->api_major); 2408 if (api_minor_version) 2409 *api_minor_version = le16_to_cpu(resp->api_minor); 2410 } 2411 2412 return status; 2413 } 2414 2415 /** 2416 * i40e_aq_send_driver_version 2417 * @hw: pointer to the hw struct 2418 * @dv: driver's major, minor version 2419 * @cmd_details: pointer to command details structure or NULL 2420 * 2421 * Send the driver version to the firmware 2422 **/ 2423 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2424 struct i40e_driver_version *dv, 2425 struct i40e_asq_cmd_details *cmd_details) 2426 { 2427 struct i40e_aq_desc desc; 2428 struct i40e_aqc_driver_version *cmd = 2429 (struct i40e_aqc_driver_version *)&desc.params.raw; 2430 i40e_status status; 2431 u16 len; 2432 2433 if (dv == NULL) 2434 return I40E_ERR_PARAM; 2435 2436 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2437 2438 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2439 cmd->driver_major_ver = dv->major_version; 2440 cmd->driver_minor_ver = dv->minor_version; 2441 cmd->driver_build_ver = dv->build_version; 2442 cmd->driver_subbuild_ver = dv->subbuild_version; 2443 2444 len = 0; 2445 while (len < sizeof(dv->driver_string) && 2446 (dv->driver_string[len] < 0x80) && 2447 dv->driver_string[len]) 2448 len++; 2449 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2450 len, cmd_details); 2451 2452 return status; 2453 } 2454 2455 /** 2456 * i40e_get_link_status - get status of the HW network link 2457 * @hw: pointer to the hw struct 2458 * @link_up: pointer to bool (true/false = linkup/linkdown) 2459 * 2460 * Variable link_up true if link is up, false if link is down. 2461 * The variable link_up is invalid if returned value of status != 0 2462 * 2463 * Side effect: LinkStatusEvent reporting becomes enabled 2464 **/ 2465 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2466 { 2467 i40e_status status = 0; 2468 2469 if (hw->phy.get_link_info) { 2470 status = i40e_update_link_info(hw); 2471 2472 if (status) 2473 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2474 status); 2475 } 2476 2477 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2478 2479 return status; 2480 } 2481 2482 /** 2483 * i40e_update_link_info - update status of the HW network link 2484 * @hw: pointer to the hw struct 2485 **/ 2486 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2487 { 2488 struct i40e_aq_get_phy_abilities_resp abilities; 2489 i40e_status status = 0; 2490 2491 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2492 if (status) 2493 return status; 2494 2495 /* extra checking needed to ensure link info to user is timely */ 2496 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2497 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2498 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2499 status = i40e_aq_get_phy_capabilities(hw, false, false, 2500 &abilities, NULL); 2501 if (status) 2502 return status; 2503 2504 if (abilities.fec_cfg_curr_mod_ext_info & 2505 I40E_AQ_ENABLE_FEC_AUTO) 2506 hw->phy.link_info.req_fec_info = 2507 (I40E_AQ_REQUEST_FEC_KR | 2508 I40E_AQ_REQUEST_FEC_RS); 2509 else 2510 hw->phy.link_info.req_fec_info = 2511 abilities.fec_cfg_curr_mod_ext_info & 2512 (I40E_AQ_REQUEST_FEC_KR | 2513 I40E_AQ_REQUEST_FEC_RS); 2514 2515 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2516 sizeof(hw->phy.link_info.module_type)); 2517 } 2518 2519 return status; 2520 } 2521 2522 /** 2523 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2524 * @hw: pointer to the hw struct 2525 * @uplink_seid: the MAC or other gizmo SEID 2526 * @downlink_seid: the VSI SEID 2527 * @enabled_tc: bitmap of TCs to be enabled 2528 * @default_port: true for default port VSI, false for control port 2529 * @veb_seid: pointer to where to put the resulting VEB SEID 2530 * @enable_stats: true to turn on VEB stats 2531 * @cmd_details: pointer to command details structure or NULL 2532 * 2533 * This asks the FW to add a VEB between the uplink and downlink 2534 * elements. If the uplink SEID is 0, this will be a floating VEB. 2535 **/ 2536 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2537 u16 downlink_seid, u8 enabled_tc, 2538 bool default_port, u16 *veb_seid, 2539 bool enable_stats, 2540 struct i40e_asq_cmd_details *cmd_details) 2541 { 2542 struct i40e_aq_desc desc; 2543 struct i40e_aqc_add_veb *cmd = 2544 (struct i40e_aqc_add_veb *)&desc.params.raw; 2545 struct i40e_aqc_add_veb_completion *resp = 2546 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2547 i40e_status status; 2548 u16 veb_flags = 0; 2549 2550 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2551 if (!!uplink_seid != !!downlink_seid) 2552 return I40E_ERR_PARAM; 2553 2554 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2555 2556 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2557 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2558 cmd->enable_tcs = enabled_tc; 2559 if (!uplink_seid) 2560 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2561 if (default_port) 2562 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2563 else 2564 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2565 2566 /* reverse logic here: set the bitflag to disable the stats */ 2567 if (!enable_stats) 2568 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2569 2570 cmd->veb_flags = cpu_to_le16(veb_flags); 2571 2572 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2573 2574 if (!status && veb_seid) 2575 *veb_seid = le16_to_cpu(resp->veb_seid); 2576 2577 return status; 2578 } 2579 2580 /** 2581 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2582 * @hw: pointer to the hw struct 2583 * @veb_seid: the SEID of the VEB to query 2584 * @switch_id: the uplink switch id 2585 * @floating: set to true if the VEB is floating 2586 * @statistic_index: index of the stats counter block for this VEB 2587 * @vebs_used: number of VEB's used by function 2588 * @vebs_free: total VEB's not reserved by any function 2589 * @cmd_details: pointer to command details structure or NULL 2590 * 2591 * This retrieves the parameters for a particular VEB, specified by 2592 * uplink_seid, and returns them to the caller. 2593 **/ 2594 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2595 u16 veb_seid, u16 *switch_id, 2596 bool *floating, u16 *statistic_index, 2597 u16 *vebs_used, u16 *vebs_free, 2598 struct i40e_asq_cmd_details *cmd_details) 2599 { 2600 struct i40e_aq_desc desc; 2601 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2602 (struct i40e_aqc_get_veb_parameters_completion *) 2603 &desc.params.raw; 2604 i40e_status status; 2605 2606 if (veb_seid == 0) 2607 return I40E_ERR_PARAM; 2608 2609 i40e_fill_default_direct_cmd_desc(&desc, 2610 i40e_aqc_opc_get_veb_parameters); 2611 cmd_resp->seid = cpu_to_le16(veb_seid); 2612 2613 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2614 if (status) 2615 goto get_veb_exit; 2616 2617 if (switch_id) 2618 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2619 if (statistic_index) 2620 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2621 if (vebs_used) 2622 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2623 if (vebs_free) 2624 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2625 if (floating) { 2626 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2627 2628 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2629 *floating = true; 2630 else 2631 *floating = false; 2632 } 2633 2634 get_veb_exit: 2635 return status; 2636 } 2637 2638 /** 2639 * i40e_prepare_add_macvlan 2640 * @mv_list: list of macvlans to be added 2641 * @desc: pointer to AQ descriptor structure 2642 * @count: length of the list 2643 * @seid: VSI for the mac address 2644 * 2645 * Internal helper function that prepares the add macvlan request 2646 * and returns the buffer size. 2647 **/ 2648 static u16 2649 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2650 struct i40e_aq_desc *desc, u16 count, u16 seid) 2651 { 2652 struct i40e_aqc_macvlan *cmd = 2653 (struct i40e_aqc_macvlan *)&desc->params.raw; 2654 u16 buf_size; 2655 int i; 2656 2657 buf_size = count * sizeof(*mv_list); 2658 2659 /* prep the rest of the request */ 2660 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2661 cmd->num_addresses = cpu_to_le16(count); 2662 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2663 cmd->seid[1] = 0; 2664 cmd->seid[2] = 0; 2665 2666 for (i = 0; i < count; i++) 2667 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2668 mv_list[i].flags |= 2669 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2670 2671 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2672 if (buf_size > I40E_AQ_LARGE_BUF) 2673 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2674 2675 return buf_size; 2676 } 2677 2678 /** 2679 * i40e_aq_add_macvlan 2680 * @hw: pointer to the hw struct 2681 * @seid: VSI for the mac address 2682 * @mv_list: list of macvlans to be added 2683 * @count: length of the list 2684 * @cmd_details: pointer to command details structure or NULL 2685 * 2686 * Add MAC/VLAN addresses to the HW filtering 2687 **/ 2688 i40e_status 2689 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2690 struct i40e_aqc_add_macvlan_element_data *mv_list, 2691 u16 count, struct i40e_asq_cmd_details *cmd_details) 2692 { 2693 struct i40e_aq_desc desc; 2694 u16 buf_size; 2695 2696 if (count == 0 || !mv_list || !hw) 2697 return I40E_ERR_PARAM; 2698 2699 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2700 2701 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2702 cmd_details, true); 2703 } 2704 2705 /** 2706 * i40e_aq_add_macvlan_v2 2707 * @hw: pointer to the hw struct 2708 * @seid: VSI for the mac address 2709 * @mv_list: list of macvlans to be added 2710 * @count: length of the list 2711 * @cmd_details: pointer to command details structure or NULL 2712 * @aq_status: pointer to Admin Queue status return value 2713 * 2714 * Add MAC/VLAN addresses to the HW filtering. 2715 * The _v2 version returns the last Admin Queue status in aq_status 2716 * to avoid race conditions in access to hw->aq.asq_last_status. 2717 * It also calls _v2 versions of asq_send_command functions to 2718 * get the aq_status on the stack. 2719 **/ 2720 i40e_status 2721 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2722 struct i40e_aqc_add_macvlan_element_data *mv_list, 2723 u16 count, struct i40e_asq_cmd_details *cmd_details, 2724 enum i40e_admin_queue_err *aq_status) 2725 { 2726 struct i40e_aq_desc desc; 2727 u16 buf_size; 2728 2729 if (count == 0 || !mv_list || !hw) 2730 return I40E_ERR_PARAM; 2731 2732 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2733 2734 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2735 cmd_details, true, aq_status); 2736 } 2737 2738 /** 2739 * i40e_aq_remove_macvlan 2740 * @hw: pointer to the hw struct 2741 * @seid: VSI for the mac address 2742 * @mv_list: list of macvlans to be removed 2743 * @count: length of the list 2744 * @cmd_details: pointer to command details structure or NULL 2745 * 2746 * Remove MAC/VLAN addresses from the HW filtering 2747 **/ 2748 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2749 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2750 u16 count, struct i40e_asq_cmd_details *cmd_details) 2751 { 2752 struct i40e_aq_desc desc; 2753 struct i40e_aqc_macvlan *cmd = 2754 (struct i40e_aqc_macvlan *)&desc.params.raw; 2755 i40e_status status; 2756 u16 buf_size; 2757 2758 if (count == 0 || !mv_list || !hw) 2759 return I40E_ERR_PARAM; 2760 2761 buf_size = count * sizeof(*mv_list); 2762 2763 /* prep the rest of the request */ 2764 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2765 cmd->num_addresses = cpu_to_le16(count); 2766 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2767 cmd->seid[1] = 0; 2768 cmd->seid[2] = 0; 2769 2770 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2771 if (buf_size > I40E_AQ_LARGE_BUF) 2772 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2773 2774 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2775 cmd_details, true); 2776 2777 return status; 2778 } 2779 2780 /** 2781 * i40e_aq_remove_macvlan_v2 2782 * @hw: pointer to the hw struct 2783 * @seid: VSI for the mac address 2784 * @mv_list: list of macvlans to be removed 2785 * @count: length of the list 2786 * @cmd_details: pointer to command details structure or NULL 2787 * @aq_status: pointer to Admin Queue status return value 2788 * 2789 * Remove MAC/VLAN addresses from the HW filtering. 2790 * The _v2 version returns the last Admin Queue status in aq_status 2791 * to avoid race conditions in access to hw->aq.asq_last_status. 2792 * It also calls _v2 versions of asq_send_command functions to 2793 * get the aq_status on the stack. 2794 **/ 2795 i40e_status 2796 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2797 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2798 u16 count, struct i40e_asq_cmd_details *cmd_details, 2799 enum i40e_admin_queue_err *aq_status) 2800 { 2801 struct i40e_aqc_macvlan *cmd; 2802 struct i40e_aq_desc desc; 2803 u16 buf_size; 2804 2805 if (count == 0 || !mv_list || !hw) 2806 return I40E_ERR_PARAM; 2807 2808 buf_size = count * sizeof(*mv_list); 2809 2810 /* prep the rest of the request */ 2811 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2812 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2813 cmd->num_addresses = cpu_to_le16(count); 2814 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2815 cmd->seid[1] = 0; 2816 cmd->seid[2] = 0; 2817 2818 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2819 if (buf_size > I40E_AQ_LARGE_BUF) 2820 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2821 2822 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2823 cmd_details, true, aq_status); 2824 } 2825 2826 /** 2827 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2828 * @hw: pointer to the hw struct 2829 * @opcode: AQ opcode for add or delete mirror rule 2830 * @sw_seid: Switch SEID (to which rule refers) 2831 * @rule_type: Rule Type (ingress/egress/VLAN) 2832 * @id: Destination VSI SEID or Rule ID 2833 * @count: length of the list 2834 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2835 * @cmd_details: pointer to command details structure or NULL 2836 * @rule_id: Rule ID returned from FW 2837 * @rules_used: Number of rules used in internal switch 2838 * @rules_free: Number of rules free in internal switch 2839 * 2840 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2841 * VEBs/VEPA elements only 2842 **/ 2843 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2844 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2845 u16 count, __le16 *mr_list, 2846 struct i40e_asq_cmd_details *cmd_details, 2847 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2848 { 2849 struct i40e_aq_desc desc; 2850 struct i40e_aqc_add_delete_mirror_rule *cmd = 2851 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2852 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2853 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2854 i40e_status status; 2855 u16 buf_size; 2856 2857 buf_size = count * sizeof(*mr_list); 2858 2859 /* prep the rest of the request */ 2860 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2861 cmd->seid = cpu_to_le16(sw_seid); 2862 cmd->rule_type = cpu_to_le16(rule_type & 2863 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2864 cmd->num_entries = cpu_to_le16(count); 2865 /* Dest VSI for add, rule_id for delete */ 2866 cmd->destination = cpu_to_le16(id); 2867 if (mr_list) { 2868 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2869 I40E_AQ_FLAG_RD)); 2870 if (buf_size > I40E_AQ_LARGE_BUF) 2871 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2872 } 2873 2874 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2875 cmd_details); 2876 if (!status || 2877 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2878 if (rule_id) 2879 *rule_id = le16_to_cpu(resp->rule_id); 2880 if (rules_used) 2881 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2882 if (rules_free) 2883 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2884 } 2885 return status; 2886 } 2887 2888 /** 2889 * i40e_aq_add_mirrorrule - add a mirror rule 2890 * @hw: pointer to the hw struct 2891 * @sw_seid: Switch SEID (to which rule refers) 2892 * @rule_type: Rule Type (ingress/egress/VLAN) 2893 * @dest_vsi: SEID of VSI to which packets will be mirrored 2894 * @count: length of the list 2895 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2896 * @cmd_details: pointer to command details structure or NULL 2897 * @rule_id: Rule ID returned from FW 2898 * @rules_used: Number of rules used in internal switch 2899 * @rules_free: Number of rules free in internal switch 2900 * 2901 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2902 **/ 2903 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2904 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2905 struct i40e_asq_cmd_details *cmd_details, 2906 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2907 { 2908 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2909 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2910 if (count == 0 || !mr_list) 2911 return I40E_ERR_PARAM; 2912 } 2913 2914 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2915 rule_type, dest_vsi, count, mr_list, 2916 cmd_details, rule_id, rules_used, rules_free); 2917 } 2918 2919 /** 2920 * i40e_aq_delete_mirrorrule - delete a mirror rule 2921 * @hw: pointer to the hw struct 2922 * @sw_seid: Switch SEID (to which rule refers) 2923 * @rule_type: Rule Type (ingress/egress/VLAN) 2924 * @count: length of the list 2925 * @rule_id: Rule ID that is returned in the receive desc as part of 2926 * add_mirrorrule. 2927 * @mr_list: list of mirrored VLAN IDs to be removed 2928 * @cmd_details: pointer to command details structure or NULL 2929 * @rules_used: Number of rules used in internal switch 2930 * @rules_free: Number of rules free in internal switch 2931 * 2932 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2933 **/ 2934 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2935 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2936 struct i40e_asq_cmd_details *cmd_details, 2937 u16 *rules_used, u16 *rules_free) 2938 { 2939 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2940 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2941 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2942 * mirroring. For other rule_type, count and rule_type should 2943 * not matter. 2944 */ 2945 if (count == 0 || !mr_list) 2946 return I40E_ERR_PARAM; 2947 } 2948 2949 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2950 rule_type, rule_id, count, mr_list, 2951 cmd_details, NULL, rules_used, rules_free); 2952 } 2953 2954 /** 2955 * i40e_aq_send_msg_to_vf 2956 * @hw: pointer to the hardware structure 2957 * @vfid: VF id to send msg 2958 * @v_opcode: opcodes for VF-PF communication 2959 * @v_retval: return error code 2960 * @msg: pointer to the msg buffer 2961 * @msglen: msg length 2962 * @cmd_details: pointer to command details 2963 * 2964 * send msg to vf 2965 **/ 2966 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2967 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2968 struct i40e_asq_cmd_details *cmd_details) 2969 { 2970 struct i40e_aq_desc desc; 2971 struct i40e_aqc_pf_vf_message *cmd = 2972 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2973 i40e_status status; 2974 2975 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2976 cmd->id = cpu_to_le32(vfid); 2977 desc.cookie_high = cpu_to_le32(v_opcode); 2978 desc.cookie_low = cpu_to_le32(v_retval); 2979 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2980 if (msglen) { 2981 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2982 I40E_AQ_FLAG_RD)); 2983 if (msglen > I40E_AQ_LARGE_BUF) 2984 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2985 desc.datalen = cpu_to_le16(msglen); 2986 } 2987 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2988 2989 return status; 2990 } 2991 2992 /** 2993 * i40e_aq_debug_read_register 2994 * @hw: pointer to the hw struct 2995 * @reg_addr: register address 2996 * @reg_val: register value 2997 * @cmd_details: pointer to command details structure or NULL 2998 * 2999 * Read the register using the admin queue commands 3000 **/ 3001 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 3002 u32 reg_addr, u64 *reg_val, 3003 struct i40e_asq_cmd_details *cmd_details) 3004 { 3005 struct i40e_aq_desc desc; 3006 struct i40e_aqc_debug_reg_read_write *cmd_resp = 3007 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3008 i40e_status status; 3009 3010 if (reg_val == NULL) 3011 return I40E_ERR_PARAM; 3012 3013 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 3014 3015 cmd_resp->address = cpu_to_le32(reg_addr); 3016 3017 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3018 3019 if (!status) { 3020 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 3021 (u64)le32_to_cpu(cmd_resp->value_low); 3022 } 3023 3024 return status; 3025 } 3026 3027 /** 3028 * i40e_aq_debug_write_register 3029 * @hw: pointer to the hw struct 3030 * @reg_addr: register address 3031 * @reg_val: register value 3032 * @cmd_details: pointer to command details structure or NULL 3033 * 3034 * Write to a register using the admin queue commands 3035 **/ 3036 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3037 u32 reg_addr, u64 reg_val, 3038 struct i40e_asq_cmd_details *cmd_details) 3039 { 3040 struct i40e_aq_desc desc; 3041 struct i40e_aqc_debug_reg_read_write *cmd = 3042 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3043 i40e_status status; 3044 3045 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3046 3047 cmd->address = cpu_to_le32(reg_addr); 3048 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3049 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3050 3051 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3052 3053 return status; 3054 } 3055 3056 /** 3057 * i40e_aq_request_resource 3058 * @hw: pointer to the hw struct 3059 * @resource: resource id 3060 * @access: access type 3061 * @sdp_number: resource number 3062 * @timeout: the maximum time in ms that the driver may hold the resource 3063 * @cmd_details: pointer to command details structure or NULL 3064 * 3065 * requests common resource using the admin queue commands 3066 **/ 3067 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3068 enum i40e_aq_resources_ids resource, 3069 enum i40e_aq_resource_access_type access, 3070 u8 sdp_number, u64 *timeout, 3071 struct i40e_asq_cmd_details *cmd_details) 3072 { 3073 struct i40e_aq_desc desc; 3074 struct i40e_aqc_request_resource *cmd_resp = 3075 (struct i40e_aqc_request_resource *)&desc.params.raw; 3076 i40e_status status; 3077 3078 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3079 3080 cmd_resp->resource_id = cpu_to_le16(resource); 3081 cmd_resp->access_type = cpu_to_le16(access); 3082 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3083 3084 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3085 /* The completion specifies the maximum time in ms that the driver 3086 * may hold the resource in the Timeout field. 3087 * If the resource is held by someone else, the command completes with 3088 * busy return value and the timeout field indicates the maximum time 3089 * the current owner of the resource has to free it. 3090 */ 3091 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3092 *timeout = le32_to_cpu(cmd_resp->timeout); 3093 3094 return status; 3095 } 3096 3097 /** 3098 * i40e_aq_release_resource 3099 * @hw: pointer to the hw struct 3100 * @resource: resource id 3101 * @sdp_number: resource number 3102 * @cmd_details: pointer to command details structure or NULL 3103 * 3104 * release common resource using the admin queue commands 3105 **/ 3106 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3107 enum i40e_aq_resources_ids resource, 3108 u8 sdp_number, 3109 struct i40e_asq_cmd_details *cmd_details) 3110 { 3111 struct i40e_aq_desc desc; 3112 struct i40e_aqc_request_resource *cmd = 3113 (struct i40e_aqc_request_resource *)&desc.params.raw; 3114 i40e_status status; 3115 3116 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3117 3118 cmd->resource_id = cpu_to_le16(resource); 3119 cmd->resource_number = cpu_to_le32(sdp_number); 3120 3121 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3122 3123 return status; 3124 } 3125 3126 /** 3127 * i40e_aq_read_nvm 3128 * @hw: pointer to the hw struct 3129 * @module_pointer: module pointer location in words from the NVM beginning 3130 * @offset: byte offset from the module beginning 3131 * @length: length of the section to be read (in bytes from the offset) 3132 * @data: command buffer (size [bytes] = length) 3133 * @last_command: tells if this is the last command in a series 3134 * @cmd_details: pointer to command details structure or NULL 3135 * 3136 * Read the NVM using the admin queue commands 3137 **/ 3138 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3139 u32 offset, u16 length, void *data, 3140 bool last_command, 3141 struct i40e_asq_cmd_details *cmd_details) 3142 { 3143 struct i40e_aq_desc desc; 3144 struct i40e_aqc_nvm_update *cmd = 3145 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3146 i40e_status status; 3147 3148 /* In offset the highest byte must be zeroed. */ 3149 if (offset & 0xFF000000) { 3150 status = I40E_ERR_PARAM; 3151 goto i40e_aq_read_nvm_exit; 3152 } 3153 3154 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3155 3156 /* If this is the last command in a series, set the proper flag. */ 3157 if (last_command) 3158 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3159 cmd->module_pointer = module_pointer; 3160 cmd->offset = cpu_to_le32(offset); 3161 cmd->length = cpu_to_le16(length); 3162 3163 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3164 if (length > I40E_AQ_LARGE_BUF) 3165 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3166 3167 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3168 3169 i40e_aq_read_nvm_exit: 3170 return status; 3171 } 3172 3173 /** 3174 * i40e_aq_erase_nvm 3175 * @hw: pointer to the hw struct 3176 * @module_pointer: module pointer location in words from the NVM beginning 3177 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3178 * @length: length of the section to be erased (expressed in 4 KB) 3179 * @last_command: tells if this is the last command in a series 3180 * @cmd_details: pointer to command details structure or NULL 3181 * 3182 * Erase the NVM sector using the admin queue commands 3183 **/ 3184 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3185 u32 offset, u16 length, bool last_command, 3186 struct i40e_asq_cmd_details *cmd_details) 3187 { 3188 struct i40e_aq_desc desc; 3189 struct i40e_aqc_nvm_update *cmd = 3190 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3191 i40e_status status; 3192 3193 /* In offset the highest byte must be zeroed. */ 3194 if (offset & 0xFF000000) { 3195 status = I40E_ERR_PARAM; 3196 goto i40e_aq_erase_nvm_exit; 3197 } 3198 3199 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3200 3201 /* If this is the last command in a series, set the proper flag. */ 3202 if (last_command) 3203 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3204 cmd->module_pointer = module_pointer; 3205 cmd->offset = cpu_to_le32(offset); 3206 cmd->length = cpu_to_le16(length); 3207 3208 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3209 3210 i40e_aq_erase_nvm_exit: 3211 return status; 3212 } 3213 3214 /** 3215 * i40e_parse_discover_capabilities 3216 * @hw: pointer to the hw struct 3217 * @buff: pointer to a buffer containing device/function capability records 3218 * @cap_count: number of capability records in the list 3219 * @list_type_opc: type of capabilities list to parse 3220 * 3221 * Parse the device/function capabilities list. 3222 **/ 3223 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3224 u32 cap_count, 3225 enum i40e_admin_queue_opc list_type_opc) 3226 { 3227 struct i40e_aqc_list_capabilities_element_resp *cap; 3228 u32 valid_functions, num_functions; 3229 u32 number, logical_id, phys_id; 3230 struct i40e_hw_capabilities *p; 3231 u16 id, ocp_cfg_word0; 3232 i40e_status status; 3233 u8 major_rev; 3234 u32 i = 0; 3235 3236 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3237 3238 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3239 p = &hw->dev_caps; 3240 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3241 p = &hw->func_caps; 3242 else 3243 return; 3244 3245 for (i = 0; i < cap_count; i++, cap++) { 3246 id = le16_to_cpu(cap->id); 3247 number = le32_to_cpu(cap->number); 3248 logical_id = le32_to_cpu(cap->logical_id); 3249 phys_id = le32_to_cpu(cap->phys_id); 3250 major_rev = cap->major_rev; 3251 3252 switch (id) { 3253 case I40E_AQ_CAP_ID_SWITCH_MODE: 3254 p->switch_mode = number; 3255 break; 3256 case I40E_AQ_CAP_ID_MNG_MODE: 3257 p->management_mode = number; 3258 if (major_rev > 1) { 3259 p->mng_protocols_over_mctp = logical_id; 3260 i40e_debug(hw, I40E_DEBUG_INIT, 3261 "HW Capability: Protocols over MCTP = %d\n", 3262 p->mng_protocols_over_mctp); 3263 } else { 3264 p->mng_protocols_over_mctp = 0; 3265 } 3266 break; 3267 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3268 p->npar_enable = number; 3269 break; 3270 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3271 p->os2bmc = number; 3272 break; 3273 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3274 p->valid_functions = number; 3275 break; 3276 case I40E_AQ_CAP_ID_SRIOV: 3277 if (number == 1) 3278 p->sr_iov_1_1 = true; 3279 break; 3280 case I40E_AQ_CAP_ID_VF: 3281 p->num_vfs = number; 3282 p->vf_base_id = logical_id; 3283 break; 3284 case I40E_AQ_CAP_ID_VMDQ: 3285 if (number == 1) 3286 p->vmdq = true; 3287 break; 3288 case I40E_AQ_CAP_ID_8021QBG: 3289 if (number == 1) 3290 p->evb_802_1_qbg = true; 3291 break; 3292 case I40E_AQ_CAP_ID_8021QBR: 3293 if (number == 1) 3294 p->evb_802_1_qbh = true; 3295 break; 3296 case I40E_AQ_CAP_ID_VSI: 3297 p->num_vsis = number; 3298 break; 3299 case I40E_AQ_CAP_ID_DCB: 3300 if (number == 1) { 3301 p->dcb = true; 3302 p->enabled_tcmap = logical_id; 3303 p->maxtc = phys_id; 3304 } 3305 break; 3306 case I40E_AQ_CAP_ID_FCOE: 3307 if (number == 1) 3308 p->fcoe = true; 3309 break; 3310 case I40E_AQ_CAP_ID_ISCSI: 3311 if (number == 1) 3312 p->iscsi = true; 3313 break; 3314 case I40E_AQ_CAP_ID_RSS: 3315 p->rss = true; 3316 p->rss_table_size = number; 3317 p->rss_table_entry_width = logical_id; 3318 break; 3319 case I40E_AQ_CAP_ID_RXQ: 3320 p->num_rx_qp = number; 3321 p->base_queue = phys_id; 3322 break; 3323 case I40E_AQ_CAP_ID_TXQ: 3324 p->num_tx_qp = number; 3325 p->base_queue = phys_id; 3326 break; 3327 case I40E_AQ_CAP_ID_MSIX: 3328 p->num_msix_vectors = number; 3329 i40e_debug(hw, I40E_DEBUG_INIT, 3330 "HW Capability: MSIX vector count = %d\n", 3331 p->num_msix_vectors); 3332 break; 3333 case I40E_AQ_CAP_ID_VF_MSIX: 3334 p->num_msix_vectors_vf = number; 3335 break; 3336 case I40E_AQ_CAP_ID_FLEX10: 3337 if (major_rev == 1) { 3338 if (number == 1) { 3339 p->flex10_enable = true; 3340 p->flex10_capable = true; 3341 } 3342 } else { 3343 /* Capability revision >= 2 */ 3344 if (number & 1) 3345 p->flex10_enable = true; 3346 if (number & 2) 3347 p->flex10_capable = true; 3348 } 3349 p->flex10_mode = logical_id; 3350 p->flex10_status = phys_id; 3351 break; 3352 case I40E_AQ_CAP_ID_CEM: 3353 if (number == 1) 3354 p->mgmt_cem = true; 3355 break; 3356 case I40E_AQ_CAP_ID_IWARP: 3357 if (number == 1) 3358 p->iwarp = true; 3359 break; 3360 case I40E_AQ_CAP_ID_LED: 3361 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3362 p->led[phys_id] = true; 3363 break; 3364 case I40E_AQ_CAP_ID_SDP: 3365 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3366 p->sdp[phys_id] = true; 3367 break; 3368 case I40E_AQ_CAP_ID_MDIO: 3369 if (number == 1) { 3370 p->mdio_port_num = phys_id; 3371 p->mdio_port_mode = logical_id; 3372 } 3373 break; 3374 case I40E_AQ_CAP_ID_1588: 3375 if (number == 1) 3376 p->ieee_1588 = true; 3377 break; 3378 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3379 p->fd = true; 3380 p->fd_filters_guaranteed = number; 3381 p->fd_filters_best_effort = logical_id; 3382 break; 3383 case I40E_AQ_CAP_ID_WSR_PROT: 3384 p->wr_csr_prot = (u64)number; 3385 p->wr_csr_prot |= (u64)logical_id << 32; 3386 break; 3387 case I40E_AQ_CAP_ID_NVM_MGMT: 3388 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3389 p->sec_rev_disabled = true; 3390 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3391 p->update_disabled = true; 3392 break; 3393 default: 3394 break; 3395 } 3396 } 3397 3398 if (p->fcoe) 3399 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3400 3401 /* Software override ensuring FCoE is disabled if npar or mfp 3402 * mode because it is not supported in these modes. 3403 */ 3404 if (p->npar_enable || p->flex10_enable) 3405 p->fcoe = false; 3406 3407 /* count the enabled ports (aka the "not disabled" ports) */ 3408 hw->num_ports = 0; 3409 for (i = 0; i < 4; i++) { 3410 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3411 u64 port_cfg = 0; 3412 3413 /* use AQ read to get the physical register offset instead 3414 * of the port relative offset 3415 */ 3416 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3417 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3418 hw->num_ports++; 3419 } 3420 3421 /* OCP cards case: if a mezz is removed the Ethernet port is at 3422 * disabled state in PRTGEN_CNF register. Additional NVM read is 3423 * needed in order to check if we are dealing with OCP card. 3424 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3425 * physical ports results in wrong partition id calculation and thus 3426 * not supporting WoL. 3427 */ 3428 if (hw->mac.type == I40E_MAC_X722) { 3429 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3430 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3431 2 * I40E_SR_OCP_CFG_WORD0, 3432 sizeof(ocp_cfg_word0), 3433 &ocp_cfg_word0, true, NULL); 3434 if (!status && 3435 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3436 hw->num_ports = 4; 3437 i40e_release_nvm(hw); 3438 } 3439 } 3440 3441 valid_functions = p->valid_functions; 3442 num_functions = 0; 3443 while (valid_functions) { 3444 if (valid_functions & 1) 3445 num_functions++; 3446 valid_functions >>= 1; 3447 } 3448 3449 /* partition id is 1-based, and functions are evenly spread 3450 * across the ports as partitions 3451 */ 3452 if (hw->num_ports != 0) { 3453 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3454 hw->num_partitions = num_functions / hw->num_ports; 3455 } 3456 3457 /* additional HW specific goodies that might 3458 * someday be HW version specific 3459 */ 3460 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3461 } 3462 3463 /** 3464 * i40e_aq_discover_capabilities 3465 * @hw: pointer to the hw struct 3466 * @buff: a virtual buffer to hold the capabilities 3467 * @buff_size: Size of the virtual buffer 3468 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3469 * @list_type_opc: capabilities type to discover - pass in the command opcode 3470 * @cmd_details: pointer to command details structure or NULL 3471 * 3472 * Get the device capabilities descriptions from the firmware 3473 **/ 3474 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3475 void *buff, u16 buff_size, u16 *data_size, 3476 enum i40e_admin_queue_opc list_type_opc, 3477 struct i40e_asq_cmd_details *cmd_details) 3478 { 3479 struct i40e_aqc_list_capabilites *cmd; 3480 struct i40e_aq_desc desc; 3481 i40e_status status = 0; 3482 3483 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3484 3485 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3486 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3487 status = I40E_ERR_PARAM; 3488 goto exit; 3489 } 3490 3491 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3492 3493 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3494 if (buff_size > I40E_AQ_LARGE_BUF) 3495 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3496 3497 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3498 *data_size = le16_to_cpu(desc.datalen); 3499 3500 if (status) 3501 goto exit; 3502 3503 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3504 list_type_opc); 3505 3506 exit: 3507 return status; 3508 } 3509 3510 /** 3511 * i40e_aq_update_nvm 3512 * @hw: pointer to the hw struct 3513 * @module_pointer: module pointer location in words from the NVM beginning 3514 * @offset: byte offset from the module beginning 3515 * @length: length of the section to be written (in bytes from the offset) 3516 * @data: command buffer (size [bytes] = length) 3517 * @last_command: tells if this is the last command in a series 3518 * @preservation_flags: Preservation mode flags 3519 * @cmd_details: pointer to command details structure or NULL 3520 * 3521 * Update the NVM using the admin queue commands 3522 **/ 3523 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3524 u32 offset, u16 length, void *data, 3525 bool last_command, u8 preservation_flags, 3526 struct i40e_asq_cmd_details *cmd_details) 3527 { 3528 struct i40e_aq_desc desc; 3529 struct i40e_aqc_nvm_update *cmd = 3530 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3531 i40e_status status; 3532 3533 /* In offset the highest byte must be zeroed. */ 3534 if (offset & 0xFF000000) { 3535 status = I40E_ERR_PARAM; 3536 goto i40e_aq_update_nvm_exit; 3537 } 3538 3539 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3540 3541 /* If this is the last command in a series, set the proper flag. */ 3542 if (last_command) 3543 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3544 if (hw->mac.type == I40E_MAC_X722) { 3545 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3546 cmd->command_flags |= 3547 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3548 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3549 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3550 cmd->command_flags |= 3551 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3552 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3553 } 3554 cmd->module_pointer = module_pointer; 3555 cmd->offset = cpu_to_le32(offset); 3556 cmd->length = cpu_to_le16(length); 3557 3558 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3559 if (length > I40E_AQ_LARGE_BUF) 3560 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3561 3562 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3563 3564 i40e_aq_update_nvm_exit: 3565 return status; 3566 } 3567 3568 /** 3569 * i40e_aq_rearrange_nvm 3570 * @hw: pointer to the hw struct 3571 * @rearrange_nvm: defines direction of rearrangement 3572 * @cmd_details: pointer to command details structure or NULL 3573 * 3574 * Rearrange NVM structure, available only for transition FW 3575 **/ 3576 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3577 u8 rearrange_nvm, 3578 struct i40e_asq_cmd_details *cmd_details) 3579 { 3580 struct i40e_aqc_nvm_update *cmd; 3581 i40e_status status; 3582 struct i40e_aq_desc desc; 3583 3584 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3585 3586 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3587 3588 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3589 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3590 3591 if (!rearrange_nvm) { 3592 status = I40E_ERR_PARAM; 3593 goto i40e_aq_rearrange_nvm_exit; 3594 } 3595 3596 cmd->command_flags |= rearrange_nvm; 3597 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3598 3599 i40e_aq_rearrange_nvm_exit: 3600 return status; 3601 } 3602 3603 /** 3604 * i40e_aq_get_lldp_mib 3605 * @hw: pointer to the hw struct 3606 * @bridge_type: type of bridge requested 3607 * @mib_type: Local, Remote or both Local and Remote MIBs 3608 * @buff: pointer to a user supplied buffer to store the MIB block 3609 * @buff_size: size of the buffer (in bytes) 3610 * @local_len : length of the returned Local LLDP MIB 3611 * @remote_len: length of the returned Remote LLDP MIB 3612 * @cmd_details: pointer to command details structure or NULL 3613 * 3614 * Requests the complete LLDP MIB (entire packet). 3615 **/ 3616 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3617 u8 mib_type, void *buff, u16 buff_size, 3618 u16 *local_len, u16 *remote_len, 3619 struct i40e_asq_cmd_details *cmd_details) 3620 { 3621 struct i40e_aq_desc desc; 3622 struct i40e_aqc_lldp_get_mib *cmd = 3623 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3624 struct i40e_aqc_lldp_get_mib *resp = 3625 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3626 i40e_status status; 3627 3628 if (buff_size == 0 || !buff) 3629 return I40E_ERR_PARAM; 3630 3631 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3632 /* Indirect Command */ 3633 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3634 3635 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3636 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3637 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3638 3639 desc.datalen = cpu_to_le16(buff_size); 3640 3641 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3642 if (buff_size > I40E_AQ_LARGE_BUF) 3643 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3644 3645 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3646 if (!status) { 3647 if (local_len != NULL) 3648 *local_len = le16_to_cpu(resp->local_len); 3649 if (remote_len != NULL) 3650 *remote_len = le16_to_cpu(resp->remote_len); 3651 } 3652 3653 return status; 3654 } 3655 3656 /** 3657 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3658 * @hw: pointer to the hw struct 3659 * @mib_type: Local, Remote or both Local and Remote MIBs 3660 * @buff: pointer to a user supplied buffer to store the MIB block 3661 * @buff_size: size of the buffer (in bytes) 3662 * @cmd_details: pointer to command details structure or NULL 3663 * 3664 * Set the LLDP MIB. 3665 **/ 3666 enum i40e_status_code 3667 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3668 u8 mib_type, void *buff, u16 buff_size, 3669 struct i40e_asq_cmd_details *cmd_details) 3670 { 3671 struct i40e_aqc_lldp_set_local_mib *cmd; 3672 enum i40e_status_code status; 3673 struct i40e_aq_desc desc; 3674 3675 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3676 if (buff_size == 0 || !buff) 3677 return I40E_ERR_PARAM; 3678 3679 i40e_fill_default_direct_cmd_desc(&desc, 3680 i40e_aqc_opc_lldp_set_local_mib); 3681 /* Indirect Command */ 3682 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3683 if (buff_size > I40E_AQ_LARGE_BUF) 3684 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3685 desc.datalen = cpu_to_le16(buff_size); 3686 3687 cmd->type = mib_type; 3688 cmd->length = cpu_to_le16(buff_size); 3689 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3690 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3691 3692 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3693 return status; 3694 } 3695 3696 /** 3697 * i40e_aq_cfg_lldp_mib_change_event 3698 * @hw: pointer to the hw struct 3699 * @enable_update: Enable or Disable event posting 3700 * @cmd_details: pointer to command details structure or NULL 3701 * 3702 * Enable or Disable posting of an event on ARQ when LLDP MIB 3703 * associated with the interface changes 3704 **/ 3705 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3706 bool enable_update, 3707 struct i40e_asq_cmd_details *cmd_details) 3708 { 3709 struct i40e_aq_desc desc; 3710 struct i40e_aqc_lldp_update_mib *cmd = 3711 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3712 i40e_status status; 3713 3714 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3715 3716 if (!enable_update) 3717 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3718 3719 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3720 3721 return status; 3722 } 3723 3724 /** 3725 * i40e_aq_restore_lldp 3726 * @hw: pointer to the hw struct 3727 * @setting: pointer to factory setting variable or NULL 3728 * @restore: True if factory settings should be restored 3729 * @cmd_details: pointer to command details structure or NULL 3730 * 3731 * Restore LLDP Agent factory settings if @restore set to True. In other case 3732 * only returns factory setting in AQ response. 3733 **/ 3734 enum i40e_status_code 3735 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3736 struct i40e_asq_cmd_details *cmd_details) 3737 { 3738 struct i40e_aq_desc desc; 3739 struct i40e_aqc_lldp_restore *cmd = 3740 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3741 i40e_status status; 3742 3743 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3744 i40e_debug(hw, I40E_DEBUG_ALL, 3745 "Restore LLDP not supported by current FW version.\n"); 3746 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3747 } 3748 3749 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3750 3751 if (restore) 3752 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3753 3754 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3755 3756 if (setting) 3757 *setting = cmd->command & 1; 3758 3759 return status; 3760 } 3761 3762 /** 3763 * i40e_aq_stop_lldp 3764 * @hw: pointer to the hw struct 3765 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3766 * @persist: True if stop of LLDP should be persistent across power cycles 3767 * @cmd_details: pointer to command details structure or NULL 3768 * 3769 * Stop or Shutdown the embedded LLDP Agent 3770 **/ 3771 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3772 bool persist, 3773 struct i40e_asq_cmd_details *cmd_details) 3774 { 3775 struct i40e_aq_desc desc; 3776 struct i40e_aqc_lldp_stop *cmd = 3777 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3778 i40e_status status; 3779 3780 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3781 3782 if (shutdown_agent) 3783 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3784 3785 if (persist) { 3786 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3787 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3788 else 3789 i40e_debug(hw, I40E_DEBUG_ALL, 3790 "Persistent Stop LLDP not supported by current FW version.\n"); 3791 } 3792 3793 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3794 3795 return status; 3796 } 3797 3798 /** 3799 * i40e_aq_start_lldp 3800 * @hw: pointer to the hw struct 3801 * @persist: True if start of LLDP should be persistent across power cycles 3802 * @cmd_details: pointer to command details structure or NULL 3803 * 3804 * Start the embedded LLDP Agent on all ports. 3805 **/ 3806 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3807 struct i40e_asq_cmd_details *cmd_details) 3808 { 3809 struct i40e_aq_desc desc; 3810 struct i40e_aqc_lldp_start *cmd = 3811 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3812 i40e_status status; 3813 3814 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3815 3816 cmd->command = I40E_AQ_LLDP_AGENT_START; 3817 3818 if (persist) { 3819 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3820 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3821 else 3822 i40e_debug(hw, I40E_DEBUG_ALL, 3823 "Persistent Start LLDP not supported by current FW version.\n"); 3824 } 3825 3826 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3827 3828 return status; 3829 } 3830 3831 /** 3832 * i40e_aq_set_dcb_parameters 3833 * @hw: pointer to the hw struct 3834 * @cmd_details: pointer to command details structure or NULL 3835 * @dcb_enable: True if DCB configuration needs to be applied 3836 * 3837 **/ 3838 enum i40e_status_code 3839 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3840 struct i40e_asq_cmd_details *cmd_details) 3841 { 3842 struct i40e_aq_desc desc; 3843 struct i40e_aqc_set_dcb_parameters *cmd = 3844 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3845 i40e_status status; 3846 3847 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3848 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3849 3850 i40e_fill_default_direct_cmd_desc(&desc, 3851 i40e_aqc_opc_set_dcb_parameters); 3852 3853 if (dcb_enable) { 3854 cmd->valid_flags = I40E_DCB_VALID; 3855 cmd->command = I40E_AQ_DCB_SET_AGENT; 3856 } 3857 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3858 3859 return status; 3860 } 3861 3862 /** 3863 * i40e_aq_get_cee_dcb_config 3864 * @hw: pointer to the hw struct 3865 * @buff: response buffer that stores CEE operational configuration 3866 * @buff_size: size of the buffer passed 3867 * @cmd_details: pointer to command details structure or NULL 3868 * 3869 * Get CEE DCBX mode operational configuration from firmware 3870 **/ 3871 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3872 void *buff, u16 buff_size, 3873 struct i40e_asq_cmd_details *cmd_details) 3874 { 3875 struct i40e_aq_desc desc; 3876 i40e_status status; 3877 3878 if (buff_size == 0 || !buff) 3879 return I40E_ERR_PARAM; 3880 3881 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3882 3883 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3884 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3885 cmd_details); 3886 3887 return status; 3888 } 3889 3890 /** 3891 * i40e_aq_add_udp_tunnel 3892 * @hw: pointer to the hw struct 3893 * @udp_port: the UDP port to add in Host byte order 3894 * @protocol_index: protocol index type 3895 * @filter_index: pointer to filter index 3896 * @cmd_details: pointer to command details structure or NULL 3897 * 3898 * Note: Firmware expects the udp_port value to be in Little Endian format, 3899 * and this function will call cpu_to_le16 to convert from Host byte order to 3900 * Little Endian order. 3901 **/ 3902 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3903 u16 udp_port, u8 protocol_index, 3904 u8 *filter_index, 3905 struct i40e_asq_cmd_details *cmd_details) 3906 { 3907 struct i40e_aq_desc desc; 3908 struct i40e_aqc_add_udp_tunnel *cmd = 3909 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3910 struct i40e_aqc_del_udp_tunnel_completion *resp = 3911 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3912 i40e_status status; 3913 3914 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3915 3916 cmd->udp_port = cpu_to_le16(udp_port); 3917 cmd->protocol_type = protocol_index; 3918 3919 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3920 3921 if (!status && filter_index) 3922 *filter_index = resp->index; 3923 3924 return status; 3925 } 3926 3927 /** 3928 * i40e_aq_del_udp_tunnel 3929 * @hw: pointer to the hw struct 3930 * @index: filter index 3931 * @cmd_details: pointer to command details structure or NULL 3932 **/ 3933 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3934 struct i40e_asq_cmd_details *cmd_details) 3935 { 3936 struct i40e_aq_desc desc; 3937 struct i40e_aqc_remove_udp_tunnel *cmd = 3938 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3939 i40e_status status; 3940 3941 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3942 3943 cmd->index = index; 3944 3945 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3946 3947 return status; 3948 } 3949 3950 /** 3951 * i40e_aq_delete_element - Delete switch element 3952 * @hw: pointer to the hw struct 3953 * @seid: the SEID to delete from the switch 3954 * @cmd_details: pointer to command details structure or NULL 3955 * 3956 * This deletes a switch element from the switch. 3957 **/ 3958 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3959 struct i40e_asq_cmd_details *cmd_details) 3960 { 3961 struct i40e_aq_desc desc; 3962 struct i40e_aqc_switch_seid *cmd = 3963 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3964 i40e_status status; 3965 3966 if (seid == 0) 3967 return I40E_ERR_PARAM; 3968 3969 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3970 3971 cmd->seid = cpu_to_le16(seid); 3972 3973 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 3974 cmd_details, true); 3975 3976 return status; 3977 } 3978 3979 /** 3980 * i40e_aq_dcb_updated - DCB Updated Command 3981 * @hw: pointer to the hw struct 3982 * @cmd_details: pointer to command details structure or NULL 3983 * 3984 * EMP will return when the shared RPB settings have been 3985 * recomputed and modified. The retval field in the descriptor 3986 * will be set to 0 when RPB is modified. 3987 **/ 3988 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3989 struct i40e_asq_cmd_details *cmd_details) 3990 { 3991 struct i40e_aq_desc desc; 3992 i40e_status status; 3993 3994 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3995 3996 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3997 3998 return status; 3999 } 4000 4001 /** 4002 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 4003 * @hw: pointer to the hw struct 4004 * @seid: seid for the physical port/switching component/vsi 4005 * @buff: Indirect buffer to hold data parameters and response 4006 * @buff_size: Indirect buffer size 4007 * @opcode: Tx scheduler AQ command opcode 4008 * @cmd_details: pointer to command details structure or NULL 4009 * 4010 * Generic command handler for Tx scheduler AQ commands 4011 **/ 4012 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 4013 void *buff, u16 buff_size, 4014 enum i40e_admin_queue_opc opcode, 4015 struct i40e_asq_cmd_details *cmd_details) 4016 { 4017 struct i40e_aq_desc desc; 4018 struct i40e_aqc_tx_sched_ind *cmd = 4019 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4020 i40e_status status; 4021 bool cmd_param_flag = false; 4022 4023 switch (opcode) { 4024 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 4025 case i40e_aqc_opc_configure_vsi_tc_bw: 4026 case i40e_aqc_opc_enable_switching_comp_ets: 4027 case i40e_aqc_opc_modify_switching_comp_ets: 4028 case i40e_aqc_opc_disable_switching_comp_ets: 4029 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 4030 case i40e_aqc_opc_configure_switching_comp_bw_config: 4031 cmd_param_flag = true; 4032 break; 4033 case i40e_aqc_opc_query_vsi_bw_config: 4034 case i40e_aqc_opc_query_vsi_ets_sla_config: 4035 case i40e_aqc_opc_query_switching_comp_ets_config: 4036 case i40e_aqc_opc_query_port_ets_config: 4037 case i40e_aqc_opc_query_switching_comp_bw_config: 4038 cmd_param_flag = false; 4039 break; 4040 default: 4041 return I40E_ERR_PARAM; 4042 } 4043 4044 i40e_fill_default_direct_cmd_desc(&desc, opcode); 4045 4046 /* Indirect command */ 4047 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4048 if (cmd_param_flag) 4049 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4050 if (buff_size > I40E_AQ_LARGE_BUF) 4051 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4052 4053 desc.datalen = cpu_to_le16(buff_size); 4054 4055 cmd->vsi_seid = cpu_to_le16(seid); 4056 4057 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4058 4059 return status; 4060 } 4061 4062 /** 4063 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 4064 * @hw: pointer to the hw struct 4065 * @seid: VSI seid 4066 * @credit: BW limit credits (0 = disabled) 4067 * @max_credit: Max BW limit credits 4068 * @cmd_details: pointer to command details structure or NULL 4069 **/ 4070 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 4071 u16 seid, u16 credit, u8 max_credit, 4072 struct i40e_asq_cmd_details *cmd_details) 4073 { 4074 struct i40e_aq_desc desc; 4075 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4076 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4077 i40e_status status; 4078 4079 i40e_fill_default_direct_cmd_desc(&desc, 4080 i40e_aqc_opc_configure_vsi_bw_limit); 4081 4082 cmd->vsi_seid = cpu_to_le16(seid); 4083 cmd->credit = cpu_to_le16(credit); 4084 cmd->max_credit = max_credit; 4085 4086 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4087 4088 return status; 4089 } 4090 4091 /** 4092 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4093 * @hw: pointer to the hw struct 4094 * @seid: VSI seid 4095 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4096 * @cmd_details: pointer to command details structure or NULL 4097 **/ 4098 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4099 u16 seid, 4100 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4101 struct i40e_asq_cmd_details *cmd_details) 4102 { 4103 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4104 i40e_aqc_opc_configure_vsi_tc_bw, 4105 cmd_details); 4106 } 4107 4108 /** 4109 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4110 * @hw: pointer to the hw struct 4111 * @seid: seid of the switching component connected to Physical Port 4112 * @ets_data: Buffer holding ETS parameters 4113 * @opcode: Tx scheduler AQ command opcode 4114 * @cmd_details: pointer to command details structure or NULL 4115 **/ 4116 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4117 u16 seid, 4118 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4119 enum i40e_admin_queue_opc opcode, 4120 struct i40e_asq_cmd_details *cmd_details) 4121 { 4122 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4123 sizeof(*ets_data), opcode, cmd_details); 4124 } 4125 4126 /** 4127 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4128 * @hw: pointer to the hw struct 4129 * @seid: seid of the switching component 4130 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4131 * @cmd_details: pointer to command details structure or NULL 4132 **/ 4133 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4134 u16 seid, 4135 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4136 struct i40e_asq_cmd_details *cmd_details) 4137 { 4138 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4139 i40e_aqc_opc_configure_switching_comp_bw_config, 4140 cmd_details); 4141 } 4142 4143 /** 4144 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4145 * @hw: pointer to the hw struct 4146 * @seid: seid of the VSI 4147 * @bw_data: Buffer to hold VSI BW configuration 4148 * @cmd_details: pointer to command details structure or NULL 4149 **/ 4150 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4151 u16 seid, 4152 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4153 struct i40e_asq_cmd_details *cmd_details) 4154 { 4155 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4156 i40e_aqc_opc_query_vsi_bw_config, 4157 cmd_details); 4158 } 4159 4160 /** 4161 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4162 * @hw: pointer to the hw struct 4163 * @seid: seid of the VSI 4164 * @bw_data: Buffer to hold VSI BW configuration per TC 4165 * @cmd_details: pointer to command details structure or NULL 4166 **/ 4167 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4168 u16 seid, 4169 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4170 struct i40e_asq_cmd_details *cmd_details) 4171 { 4172 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4173 i40e_aqc_opc_query_vsi_ets_sla_config, 4174 cmd_details); 4175 } 4176 4177 /** 4178 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4179 * @hw: pointer to the hw struct 4180 * @seid: seid of the switching component 4181 * @bw_data: Buffer to hold switching component's per TC BW config 4182 * @cmd_details: pointer to command details structure or NULL 4183 **/ 4184 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4185 u16 seid, 4186 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4187 struct i40e_asq_cmd_details *cmd_details) 4188 { 4189 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4190 i40e_aqc_opc_query_switching_comp_ets_config, 4191 cmd_details); 4192 } 4193 4194 /** 4195 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4196 * @hw: pointer to the hw struct 4197 * @seid: seid of the VSI or switching component connected to Physical Port 4198 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4199 * @cmd_details: pointer to command details structure or NULL 4200 **/ 4201 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4202 u16 seid, 4203 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4204 struct i40e_asq_cmd_details *cmd_details) 4205 { 4206 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4207 i40e_aqc_opc_query_port_ets_config, 4208 cmd_details); 4209 } 4210 4211 /** 4212 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4213 * @hw: pointer to the hw struct 4214 * @seid: seid of the switching component 4215 * @bw_data: Buffer to hold switching component's BW configuration 4216 * @cmd_details: pointer to command details structure or NULL 4217 **/ 4218 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4219 u16 seid, 4220 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4221 struct i40e_asq_cmd_details *cmd_details) 4222 { 4223 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4224 i40e_aqc_opc_query_switching_comp_bw_config, 4225 cmd_details); 4226 } 4227 4228 /** 4229 * i40e_validate_filter_settings 4230 * @hw: pointer to the hardware structure 4231 * @settings: Filter control settings 4232 * 4233 * Check and validate the filter control settings passed. 4234 * The function checks for the valid filter/context sizes being 4235 * passed for FCoE and PE. 4236 * 4237 * Returns 0 if the values passed are valid and within 4238 * range else returns an error. 4239 **/ 4240 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4241 struct i40e_filter_control_settings *settings) 4242 { 4243 u32 fcoe_cntx_size, fcoe_filt_size; 4244 u32 fcoe_fmax; 4245 u32 val; 4246 4247 /* Validate FCoE settings passed */ 4248 switch (settings->fcoe_filt_num) { 4249 case I40E_HASH_FILTER_SIZE_1K: 4250 case I40E_HASH_FILTER_SIZE_2K: 4251 case I40E_HASH_FILTER_SIZE_4K: 4252 case I40E_HASH_FILTER_SIZE_8K: 4253 case I40E_HASH_FILTER_SIZE_16K: 4254 case I40E_HASH_FILTER_SIZE_32K: 4255 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4256 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4257 break; 4258 default: 4259 return I40E_ERR_PARAM; 4260 } 4261 4262 switch (settings->fcoe_cntx_num) { 4263 case I40E_DMA_CNTX_SIZE_512: 4264 case I40E_DMA_CNTX_SIZE_1K: 4265 case I40E_DMA_CNTX_SIZE_2K: 4266 case I40E_DMA_CNTX_SIZE_4K: 4267 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4268 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4269 break; 4270 default: 4271 return I40E_ERR_PARAM; 4272 } 4273 4274 /* Validate PE settings passed */ 4275 switch (settings->pe_filt_num) { 4276 case I40E_HASH_FILTER_SIZE_1K: 4277 case I40E_HASH_FILTER_SIZE_2K: 4278 case I40E_HASH_FILTER_SIZE_4K: 4279 case I40E_HASH_FILTER_SIZE_8K: 4280 case I40E_HASH_FILTER_SIZE_16K: 4281 case I40E_HASH_FILTER_SIZE_32K: 4282 case I40E_HASH_FILTER_SIZE_64K: 4283 case I40E_HASH_FILTER_SIZE_128K: 4284 case I40E_HASH_FILTER_SIZE_256K: 4285 case I40E_HASH_FILTER_SIZE_512K: 4286 case I40E_HASH_FILTER_SIZE_1M: 4287 break; 4288 default: 4289 return I40E_ERR_PARAM; 4290 } 4291 4292 switch (settings->pe_cntx_num) { 4293 case I40E_DMA_CNTX_SIZE_512: 4294 case I40E_DMA_CNTX_SIZE_1K: 4295 case I40E_DMA_CNTX_SIZE_2K: 4296 case I40E_DMA_CNTX_SIZE_4K: 4297 case I40E_DMA_CNTX_SIZE_8K: 4298 case I40E_DMA_CNTX_SIZE_16K: 4299 case I40E_DMA_CNTX_SIZE_32K: 4300 case I40E_DMA_CNTX_SIZE_64K: 4301 case I40E_DMA_CNTX_SIZE_128K: 4302 case I40E_DMA_CNTX_SIZE_256K: 4303 break; 4304 default: 4305 return I40E_ERR_PARAM; 4306 } 4307 4308 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4309 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4310 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4311 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4312 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4313 return I40E_ERR_INVALID_SIZE; 4314 4315 return 0; 4316 } 4317 4318 /** 4319 * i40e_set_filter_control 4320 * @hw: pointer to the hardware structure 4321 * @settings: Filter control settings 4322 * 4323 * Set the Queue Filters for PE/FCoE and enable filters required 4324 * for a single PF. It is expected that these settings are programmed 4325 * at the driver initialization time. 4326 **/ 4327 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4328 struct i40e_filter_control_settings *settings) 4329 { 4330 i40e_status ret = 0; 4331 u32 hash_lut_size = 0; 4332 u32 val; 4333 4334 if (!settings) 4335 return I40E_ERR_PARAM; 4336 4337 /* Validate the input settings */ 4338 ret = i40e_validate_filter_settings(hw, settings); 4339 if (ret) 4340 return ret; 4341 4342 /* Read the PF Queue Filter control register */ 4343 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4344 4345 /* Program required PE hash buckets for the PF */ 4346 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4347 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4348 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4349 /* Program required PE contexts for the PF */ 4350 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4351 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4352 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4353 4354 /* Program required FCoE hash buckets for the PF */ 4355 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4356 val |= ((u32)settings->fcoe_filt_num << 4357 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4358 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4359 /* Program required FCoE DDP contexts for the PF */ 4360 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4361 val |= ((u32)settings->fcoe_cntx_num << 4362 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4363 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4364 4365 /* Program Hash LUT size for the PF */ 4366 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4367 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4368 hash_lut_size = 1; 4369 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4370 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4371 4372 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4373 if (settings->enable_fdir) 4374 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4375 if (settings->enable_ethtype) 4376 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4377 if (settings->enable_macvlan) 4378 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4379 4380 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4381 4382 return 0; 4383 } 4384 4385 /** 4386 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4387 * @hw: pointer to the hw struct 4388 * @mac_addr: MAC address to use in the filter 4389 * @ethtype: Ethertype to use in the filter 4390 * @flags: Flags that needs to be applied to the filter 4391 * @vsi_seid: seid of the control VSI 4392 * @queue: VSI queue number to send the packet to 4393 * @is_add: Add control packet filter if True else remove 4394 * @stats: Structure to hold information on control filter counts 4395 * @cmd_details: pointer to command details structure or NULL 4396 * 4397 * This command will Add or Remove control packet filter for a control VSI. 4398 * In return it will update the total number of perfect filter count in 4399 * the stats member. 4400 **/ 4401 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4402 u8 *mac_addr, u16 ethtype, u16 flags, 4403 u16 vsi_seid, u16 queue, bool is_add, 4404 struct i40e_control_filter_stats *stats, 4405 struct i40e_asq_cmd_details *cmd_details) 4406 { 4407 struct i40e_aq_desc desc; 4408 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4409 (struct i40e_aqc_add_remove_control_packet_filter *) 4410 &desc.params.raw; 4411 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4412 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4413 &desc.params.raw; 4414 i40e_status status; 4415 4416 if (vsi_seid == 0) 4417 return I40E_ERR_PARAM; 4418 4419 if (is_add) { 4420 i40e_fill_default_direct_cmd_desc(&desc, 4421 i40e_aqc_opc_add_control_packet_filter); 4422 cmd->queue = cpu_to_le16(queue); 4423 } else { 4424 i40e_fill_default_direct_cmd_desc(&desc, 4425 i40e_aqc_opc_remove_control_packet_filter); 4426 } 4427 4428 if (mac_addr) 4429 ether_addr_copy(cmd->mac, mac_addr); 4430 4431 cmd->etype = cpu_to_le16(ethtype); 4432 cmd->flags = cpu_to_le16(flags); 4433 cmd->seid = cpu_to_le16(vsi_seid); 4434 4435 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4436 4437 if (!status && stats) { 4438 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4439 stats->etype_used = le16_to_cpu(resp->etype_used); 4440 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4441 stats->etype_free = le16_to_cpu(resp->etype_free); 4442 } 4443 4444 return status; 4445 } 4446 4447 /** 4448 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4449 * @hw: pointer to the hw struct 4450 * @seid: VSI seid to add ethertype filter from 4451 **/ 4452 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4453 u16 seid) 4454 { 4455 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4456 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4457 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4458 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4459 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4460 i40e_status status; 4461 4462 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4463 seid, 0, true, NULL, 4464 NULL); 4465 if (status) 4466 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4467 } 4468 4469 /** 4470 * i40e_aq_alternate_read 4471 * @hw: pointer to the hardware structure 4472 * @reg_addr0: address of first dword to be read 4473 * @reg_val0: pointer for data read from 'reg_addr0' 4474 * @reg_addr1: address of second dword to be read 4475 * @reg_val1: pointer for data read from 'reg_addr1' 4476 * 4477 * Read one or two dwords from alternate structure. Fields are indicated 4478 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4479 * is not passed then only register at 'reg_addr0' is read. 4480 * 4481 **/ 4482 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4483 u32 reg_addr0, u32 *reg_val0, 4484 u32 reg_addr1, u32 *reg_val1) 4485 { 4486 struct i40e_aq_desc desc; 4487 struct i40e_aqc_alternate_write *cmd_resp = 4488 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4489 i40e_status status; 4490 4491 if (!reg_val0) 4492 return I40E_ERR_PARAM; 4493 4494 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4495 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4496 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4497 4498 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4499 4500 if (!status) { 4501 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4502 4503 if (reg_val1) 4504 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4505 } 4506 4507 return status; 4508 } 4509 4510 /** 4511 * i40e_aq_suspend_port_tx 4512 * @hw: pointer to the hardware structure 4513 * @seid: port seid 4514 * @cmd_details: pointer to command details structure or NULL 4515 * 4516 * Suspend port's Tx traffic 4517 **/ 4518 i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4519 struct i40e_asq_cmd_details *cmd_details) 4520 { 4521 struct i40e_aqc_tx_sched_ind *cmd; 4522 struct i40e_aq_desc desc; 4523 i40e_status status; 4524 4525 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4526 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4527 cmd->vsi_seid = cpu_to_le16(seid); 4528 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4529 4530 return status; 4531 } 4532 4533 /** 4534 * i40e_aq_resume_port_tx 4535 * @hw: pointer to the hardware structure 4536 * @cmd_details: pointer to command details structure or NULL 4537 * 4538 * Resume port's Tx traffic 4539 **/ 4540 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4541 struct i40e_asq_cmd_details *cmd_details) 4542 { 4543 struct i40e_aq_desc desc; 4544 i40e_status status; 4545 4546 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4547 4548 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4549 4550 return status; 4551 } 4552 4553 /** 4554 * i40e_set_pci_config_data - store PCI bus info 4555 * @hw: pointer to hardware structure 4556 * @link_status: the link status word from PCI config space 4557 * 4558 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4559 **/ 4560 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4561 { 4562 hw->bus.type = i40e_bus_type_pci_express; 4563 4564 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4565 case PCI_EXP_LNKSTA_NLW_X1: 4566 hw->bus.width = i40e_bus_width_pcie_x1; 4567 break; 4568 case PCI_EXP_LNKSTA_NLW_X2: 4569 hw->bus.width = i40e_bus_width_pcie_x2; 4570 break; 4571 case PCI_EXP_LNKSTA_NLW_X4: 4572 hw->bus.width = i40e_bus_width_pcie_x4; 4573 break; 4574 case PCI_EXP_LNKSTA_NLW_X8: 4575 hw->bus.width = i40e_bus_width_pcie_x8; 4576 break; 4577 default: 4578 hw->bus.width = i40e_bus_width_unknown; 4579 break; 4580 } 4581 4582 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4583 case PCI_EXP_LNKSTA_CLS_2_5GB: 4584 hw->bus.speed = i40e_bus_speed_2500; 4585 break; 4586 case PCI_EXP_LNKSTA_CLS_5_0GB: 4587 hw->bus.speed = i40e_bus_speed_5000; 4588 break; 4589 case PCI_EXP_LNKSTA_CLS_8_0GB: 4590 hw->bus.speed = i40e_bus_speed_8000; 4591 break; 4592 default: 4593 hw->bus.speed = i40e_bus_speed_unknown; 4594 break; 4595 } 4596 } 4597 4598 /** 4599 * i40e_aq_debug_dump 4600 * @hw: pointer to the hardware structure 4601 * @cluster_id: specific cluster to dump 4602 * @table_id: table id within cluster 4603 * @start_index: index of line in the block to read 4604 * @buff_size: dump buffer size 4605 * @buff: dump buffer 4606 * @ret_buff_size: actual buffer size returned 4607 * @ret_next_table: next block to read 4608 * @ret_next_index: next index to read 4609 * @cmd_details: pointer to command details structure or NULL 4610 * 4611 * Dump internal FW/HW data for debug purposes. 4612 * 4613 **/ 4614 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4615 u8 table_id, u32 start_index, u16 buff_size, 4616 void *buff, u16 *ret_buff_size, 4617 u8 *ret_next_table, u32 *ret_next_index, 4618 struct i40e_asq_cmd_details *cmd_details) 4619 { 4620 struct i40e_aq_desc desc; 4621 struct i40e_aqc_debug_dump_internals *cmd = 4622 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4623 struct i40e_aqc_debug_dump_internals *resp = 4624 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4625 i40e_status status; 4626 4627 if (buff_size == 0 || !buff) 4628 return I40E_ERR_PARAM; 4629 4630 i40e_fill_default_direct_cmd_desc(&desc, 4631 i40e_aqc_opc_debug_dump_internals); 4632 /* Indirect Command */ 4633 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4634 if (buff_size > I40E_AQ_LARGE_BUF) 4635 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4636 4637 cmd->cluster_id = cluster_id; 4638 cmd->table_id = table_id; 4639 cmd->idx = cpu_to_le32(start_index); 4640 4641 desc.datalen = cpu_to_le16(buff_size); 4642 4643 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4644 if (!status) { 4645 if (ret_buff_size) 4646 *ret_buff_size = le16_to_cpu(desc.datalen); 4647 if (ret_next_table) 4648 *ret_next_table = resp->table_id; 4649 if (ret_next_index) 4650 *ret_next_index = le32_to_cpu(resp->idx); 4651 } 4652 4653 return status; 4654 } 4655 4656 /** 4657 * i40e_read_bw_from_alt_ram 4658 * @hw: pointer to the hardware structure 4659 * @max_bw: pointer for max_bw read 4660 * @min_bw: pointer for min_bw read 4661 * @min_valid: pointer for bool that is true if min_bw is a valid value 4662 * @max_valid: pointer for bool that is true if max_bw is a valid value 4663 * 4664 * Read bw from the alternate ram for the given pf 4665 **/ 4666 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4667 u32 *max_bw, u32 *min_bw, 4668 bool *min_valid, bool *max_valid) 4669 { 4670 i40e_status status; 4671 u32 max_bw_addr, min_bw_addr; 4672 4673 /* Calculate the address of the min/max bw registers */ 4674 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4675 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4676 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4677 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4678 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4679 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4680 4681 /* Read the bandwidths from alt ram */ 4682 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4683 min_bw_addr, min_bw); 4684 4685 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4686 *min_valid = true; 4687 else 4688 *min_valid = false; 4689 4690 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4691 *max_valid = true; 4692 else 4693 *max_valid = false; 4694 4695 return status; 4696 } 4697 4698 /** 4699 * i40e_aq_configure_partition_bw 4700 * @hw: pointer to the hardware structure 4701 * @bw_data: Buffer holding valid pfs and bw limits 4702 * @cmd_details: pointer to command details 4703 * 4704 * Configure partitions guaranteed/max bw 4705 **/ 4706 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4707 struct i40e_aqc_configure_partition_bw_data *bw_data, 4708 struct i40e_asq_cmd_details *cmd_details) 4709 { 4710 i40e_status status; 4711 struct i40e_aq_desc desc; 4712 u16 bwd_size = sizeof(*bw_data); 4713 4714 i40e_fill_default_direct_cmd_desc(&desc, 4715 i40e_aqc_opc_configure_partition_bw); 4716 4717 /* Indirect command */ 4718 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4719 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4720 4721 if (bwd_size > I40E_AQ_LARGE_BUF) 4722 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4723 4724 desc.datalen = cpu_to_le16(bwd_size); 4725 4726 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4727 cmd_details); 4728 4729 return status; 4730 } 4731 4732 /** 4733 * i40e_read_phy_register_clause22 4734 * @hw: pointer to the HW structure 4735 * @reg: register address in the page 4736 * @phy_addr: PHY address on MDIO interface 4737 * @value: PHY register value 4738 * 4739 * Reads specified PHY register value 4740 **/ 4741 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4742 u16 reg, u8 phy_addr, u16 *value) 4743 { 4744 i40e_status status = I40E_ERR_TIMEOUT; 4745 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4746 u32 command = 0; 4747 u16 retry = 1000; 4748 4749 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4750 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4751 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4752 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4753 (I40E_GLGEN_MSCA_MDICMD_MASK); 4754 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4755 do { 4756 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4757 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4758 status = 0; 4759 break; 4760 } 4761 udelay(10); 4762 retry--; 4763 } while (retry); 4764 4765 if (status) { 4766 i40e_debug(hw, I40E_DEBUG_PHY, 4767 "PHY: Can't write command to external PHY.\n"); 4768 } else { 4769 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4770 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4771 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4772 } 4773 4774 return status; 4775 } 4776 4777 /** 4778 * i40e_write_phy_register_clause22 4779 * @hw: pointer to the HW structure 4780 * @reg: register address in the page 4781 * @phy_addr: PHY address on MDIO interface 4782 * @value: PHY register value 4783 * 4784 * Writes specified PHY register value 4785 **/ 4786 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4787 u16 reg, u8 phy_addr, u16 value) 4788 { 4789 i40e_status status = I40E_ERR_TIMEOUT; 4790 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4791 u32 command = 0; 4792 u16 retry = 1000; 4793 4794 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4795 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4796 4797 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4798 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4799 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4800 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4801 (I40E_GLGEN_MSCA_MDICMD_MASK); 4802 4803 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4804 do { 4805 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4806 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4807 status = 0; 4808 break; 4809 } 4810 udelay(10); 4811 retry--; 4812 } while (retry); 4813 4814 return status; 4815 } 4816 4817 /** 4818 * i40e_read_phy_register_clause45 4819 * @hw: pointer to the HW structure 4820 * @page: registers page number 4821 * @reg: register address in the page 4822 * @phy_addr: PHY address on MDIO interface 4823 * @value: PHY register value 4824 * 4825 * Reads specified PHY register value 4826 **/ 4827 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4828 u8 page, u16 reg, u8 phy_addr, u16 *value) 4829 { 4830 i40e_status status = I40E_ERR_TIMEOUT; 4831 u32 command = 0; 4832 u16 retry = 1000; 4833 u8 port_num = hw->func_caps.mdio_port_num; 4834 4835 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4836 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4837 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4838 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4839 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4840 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4841 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4842 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4843 do { 4844 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4845 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4846 status = 0; 4847 break; 4848 } 4849 usleep_range(10, 20); 4850 retry--; 4851 } while (retry); 4852 4853 if (status) { 4854 i40e_debug(hw, I40E_DEBUG_PHY, 4855 "PHY: Can't write command to external PHY.\n"); 4856 goto phy_read_end; 4857 } 4858 4859 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4860 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4861 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4862 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4863 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4864 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4865 status = I40E_ERR_TIMEOUT; 4866 retry = 1000; 4867 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4868 do { 4869 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4870 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4871 status = 0; 4872 break; 4873 } 4874 usleep_range(10, 20); 4875 retry--; 4876 } while (retry); 4877 4878 if (!status) { 4879 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4880 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4881 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4882 } else { 4883 i40e_debug(hw, I40E_DEBUG_PHY, 4884 "PHY: Can't read register value from external PHY.\n"); 4885 } 4886 4887 phy_read_end: 4888 return status; 4889 } 4890 4891 /** 4892 * i40e_write_phy_register_clause45 4893 * @hw: pointer to the HW structure 4894 * @page: registers page number 4895 * @reg: register address in the page 4896 * @phy_addr: PHY address on MDIO interface 4897 * @value: PHY register value 4898 * 4899 * Writes value to specified PHY register 4900 **/ 4901 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4902 u8 page, u16 reg, u8 phy_addr, u16 value) 4903 { 4904 i40e_status status = I40E_ERR_TIMEOUT; 4905 u32 command = 0; 4906 u16 retry = 1000; 4907 u8 port_num = hw->func_caps.mdio_port_num; 4908 4909 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4910 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4911 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4912 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4913 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4914 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4915 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4916 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4917 do { 4918 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4919 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4920 status = 0; 4921 break; 4922 } 4923 usleep_range(10, 20); 4924 retry--; 4925 } while (retry); 4926 if (status) { 4927 i40e_debug(hw, I40E_DEBUG_PHY, 4928 "PHY: Can't write command to external PHY.\n"); 4929 goto phy_write_end; 4930 } 4931 4932 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4933 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4934 4935 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4936 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4937 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4938 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4939 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4940 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4941 status = I40E_ERR_TIMEOUT; 4942 retry = 1000; 4943 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4944 do { 4945 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4946 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4947 status = 0; 4948 break; 4949 } 4950 usleep_range(10, 20); 4951 retry--; 4952 } while (retry); 4953 4954 phy_write_end: 4955 return status; 4956 } 4957 4958 /** 4959 * i40e_write_phy_register 4960 * @hw: pointer to the HW structure 4961 * @page: registers page number 4962 * @reg: register address in the page 4963 * @phy_addr: PHY address on MDIO interface 4964 * @value: PHY register value 4965 * 4966 * Writes value to specified PHY register 4967 **/ 4968 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4969 u8 page, u16 reg, u8 phy_addr, u16 value) 4970 { 4971 i40e_status status; 4972 4973 switch (hw->device_id) { 4974 case I40E_DEV_ID_1G_BASE_T_X722: 4975 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4976 value); 4977 break; 4978 case I40E_DEV_ID_1G_BASE_T_BC: 4979 case I40E_DEV_ID_5G_BASE_T_BC: 4980 case I40E_DEV_ID_10G_BASE_T: 4981 case I40E_DEV_ID_10G_BASE_T4: 4982 case I40E_DEV_ID_10G_BASE_T_BC: 4983 case I40E_DEV_ID_10G_BASE_T_X722: 4984 case I40E_DEV_ID_25G_B: 4985 case I40E_DEV_ID_25G_SFP28: 4986 status = i40e_write_phy_register_clause45(hw, page, reg, 4987 phy_addr, value); 4988 break; 4989 default: 4990 status = I40E_ERR_UNKNOWN_PHY; 4991 break; 4992 } 4993 4994 return status; 4995 } 4996 4997 /** 4998 * i40e_read_phy_register 4999 * @hw: pointer to the HW structure 5000 * @page: registers page number 5001 * @reg: register address in the page 5002 * @phy_addr: PHY address on MDIO interface 5003 * @value: PHY register value 5004 * 5005 * Reads specified PHY register value 5006 **/ 5007 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 5008 u8 page, u16 reg, u8 phy_addr, u16 *value) 5009 { 5010 i40e_status status; 5011 5012 switch (hw->device_id) { 5013 case I40E_DEV_ID_1G_BASE_T_X722: 5014 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 5015 value); 5016 break; 5017 case I40E_DEV_ID_1G_BASE_T_BC: 5018 case I40E_DEV_ID_5G_BASE_T_BC: 5019 case I40E_DEV_ID_10G_BASE_T: 5020 case I40E_DEV_ID_10G_BASE_T4: 5021 case I40E_DEV_ID_10G_BASE_T_BC: 5022 case I40E_DEV_ID_10G_BASE_T_X722: 5023 case I40E_DEV_ID_25G_B: 5024 case I40E_DEV_ID_25G_SFP28: 5025 status = i40e_read_phy_register_clause45(hw, page, reg, 5026 phy_addr, value); 5027 break; 5028 default: 5029 status = I40E_ERR_UNKNOWN_PHY; 5030 break; 5031 } 5032 5033 return status; 5034 } 5035 5036 /** 5037 * i40e_get_phy_address 5038 * @hw: pointer to the HW structure 5039 * @dev_num: PHY port num that address we want 5040 * 5041 * Gets PHY address for current port 5042 **/ 5043 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 5044 { 5045 u8 port_num = hw->func_caps.mdio_port_num; 5046 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 5047 5048 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 5049 } 5050 5051 /** 5052 * i40e_blink_phy_link_led 5053 * @hw: pointer to the HW structure 5054 * @time: time how long led will blinks in secs 5055 * @interval: gap between LED on and off in msecs 5056 * 5057 * Blinks PHY link LED 5058 **/ 5059 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 5060 u32 time, u32 interval) 5061 { 5062 i40e_status status = 0; 5063 u32 i; 5064 u16 led_ctl; 5065 u16 gpio_led_port; 5066 u16 led_reg; 5067 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 5068 u8 phy_addr = 0; 5069 u8 port_num; 5070 5071 i = rd32(hw, I40E_PFGEN_PORTNUM); 5072 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5073 phy_addr = i40e_get_phy_address(hw, port_num); 5074 5075 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5076 led_addr++) { 5077 status = i40e_read_phy_register_clause45(hw, 5078 I40E_PHY_COM_REG_PAGE, 5079 led_addr, phy_addr, 5080 &led_reg); 5081 if (status) 5082 goto phy_blinking_end; 5083 led_ctl = led_reg; 5084 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5085 led_reg = 0; 5086 status = i40e_write_phy_register_clause45(hw, 5087 I40E_PHY_COM_REG_PAGE, 5088 led_addr, phy_addr, 5089 led_reg); 5090 if (status) 5091 goto phy_blinking_end; 5092 break; 5093 } 5094 } 5095 5096 if (time > 0 && interval > 0) { 5097 for (i = 0; i < time * 1000; i += interval) { 5098 status = i40e_read_phy_register_clause45(hw, 5099 I40E_PHY_COM_REG_PAGE, 5100 led_addr, phy_addr, &led_reg); 5101 if (status) 5102 goto restore_config; 5103 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5104 led_reg = 0; 5105 else 5106 led_reg = I40E_PHY_LED_MANUAL_ON; 5107 status = i40e_write_phy_register_clause45(hw, 5108 I40E_PHY_COM_REG_PAGE, 5109 led_addr, phy_addr, led_reg); 5110 if (status) 5111 goto restore_config; 5112 msleep(interval); 5113 } 5114 } 5115 5116 restore_config: 5117 status = i40e_write_phy_register_clause45(hw, 5118 I40E_PHY_COM_REG_PAGE, 5119 led_addr, phy_addr, led_ctl); 5120 5121 phy_blinking_end: 5122 return status; 5123 } 5124 5125 /** 5126 * i40e_led_get_reg - read LED register 5127 * @hw: pointer to the HW structure 5128 * @led_addr: LED register address 5129 * @reg_val: read register value 5130 **/ 5131 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5132 u32 *reg_val) 5133 { 5134 enum i40e_status_code status; 5135 u8 phy_addr = 0; 5136 u8 port_num; 5137 u32 i; 5138 5139 *reg_val = 0; 5140 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5141 status = 5142 i40e_aq_get_phy_register(hw, 5143 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5144 I40E_PHY_COM_REG_PAGE, true, 5145 I40E_PHY_LED_PROV_REG_1, 5146 reg_val, NULL); 5147 } else { 5148 i = rd32(hw, I40E_PFGEN_PORTNUM); 5149 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5150 phy_addr = i40e_get_phy_address(hw, port_num); 5151 status = i40e_read_phy_register_clause45(hw, 5152 I40E_PHY_COM_REG_PAGE, 5153 led_addr, phy_addr, 5154 (u16 *)reg_val); 5155 } 5156 return status; 5157 } 5158 5159 /** 5160 * i40e_led_set_reg - write LED register 5161 * @hw: pointer to the HW structure 5162 * @led_addr: LED register address 5163 * @reg_val: register value to write 5164 **/ 5165 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5166 u32 reg_val) 5167 { 5168 enum i40e_status_code status; 5169 u8 phy_addr = 0; 5170 u8 port_num; 5171 u32 i; 5172 5173 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5174 status = 5175 i40e_aq_set_phy_register(hw, 5176 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5177 I40E_PHY_COM_REG_PAGE, true, 5178 I40E_PHY_LED_PROV_REG_1, 5179 reg_val, NULL); 5180 } else { 5181 i = rd32(hw, I40E_PFGEN_PORTNUM); 5182 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5183 phy_addr = i40e_get_phy_address(hw, port_num); 5184 status = i40e_write_phy_register_clause45(hw, 5185 I40E_PHY_COM_REG_PAGE, 5186 led_addr, phy_addr, 5187 (u16)reg_val); 5188 } 5189 5190 return status; 5191 } 5192 5193 /** 5194 * i40e_led_get_phy - return current on/off mode 5195 * @hw: pointer to the hw struct 5196 * @led_addr: address of led register to use 5197 * @val: original value of register to use 5198 * 5199 **/ 5200 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5201 u16 *val) 5202 { 5203 i40e_status status = 0; 5204 u16 gpio_led_port; 5205 u8 phy_addr = 0; 5206 u16 reg_val; 5207 u16 temp_addr; 5208 u8 port_num; 5209 u32 i; 5210 u32 reg_val_aq; 5211 5212 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5213 status = 5214 i40e_aq_get_phy_register(hw, 5215 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5216 I40E_PHY_COM_REG_PAGE, true, 5217 I40E_PHY_LED_PROV_REG_1, 5218 ®_val_aq, NULL); 5219 if (status == I40E_SUCCESS) 5220 *val = (u16)reg_val_aq; 5221 return status; 5222 } 5223 temp_addr = I40E_PHY_LED_PROV_REG_1; 5224 i = rd32(hw, I40E_PFGEN_PORTNUM); 5225 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5226 phy_addr = i40e_get_phy_address(hw, port_num); 5227 5228 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5229 temp_addr++) { 5230 status = i40e_read_phy_register_clause45(hw, 5231 I40E_PHY_COM_REG_PAGE, 5232 temp_addr, phy_addr, 5233 ®_val); 5234 if (status) 5235 return status; 5236 *val = reg_val; 5237 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5238 *led_addr = temp_addr; 5239 break; 5240 } 5241 } 5242 return status; 5243 } 5244 5245 /** 5246 * i40e_led_set_phy 5247 * @hw: pointer to the HW structure 5248 * @on: true or false 5249 * @led_addr: address of led register to use 5250 * @mode: original val plus bit for set or ignore 5251 * 5252 * Set led's on or off when controlled by the PHY 5253 * 5254 **/ 5255 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5256 u16 led_addr, u32 mode) 5257 { 5258 i40e_status status = 0; 5259 u32 led_ctl = 0; 5260 u32 led_reg = 0; 5261 5262 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5263 if (status) 5264 return status; 5265 led_ctl = led_reg; 5266 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5267 led_reg = 0; 5268 status = i40e_led_set_reg(hw, led_addr, led_reg); 5269 if (status) 5270 return status; 5271 } 5272 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5273 if (status) 5274 goto restore_config; 5275 if (on) 5276 led_reg = I40E_PHY_LED_MANUAL_ON; 5277 else 5278 led_reg = 0; 5279 5280 status = i40e_led_set_reg(hw, led_addr, led_reg); 5281 if (status) 5282 goto restore_config; 5283 if (mode & I40E_PHY_LED_MODE_ORIG) { 5284 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5285 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5286 } 5287 return status; 5288 5289 restore_config: 5290 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5291 return status; 5292 } 5293 5294 /** 5295 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5296 * @hw: pointer to the hw struct 5297 * @reg_addr: register address 5298 * @reg_val: ptr to register value 5299 * @cmd_details: pointer to command details structure or NULL 5300 * 5301 * Use the firmware to read the Rx control register, 5302 * especially useful if the Rx unit is under heavy pressure 5303 **/ 5304 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5305 u32 reg_addr, u32 *reg_val, 5306 struct i40e_asq_cmd_details *cmd_details) 5307 { 5308 struct i40e_aq_desc desc; 5309 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5310 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5311 i40e_status status; 5312 5313 if (!reg_val) 5314 return I40E_ERR_PARAM; 5315 5316 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5317 5318 cmd_resp->address = cpu_to_le32(reg_addr); 5319 5320 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5321 5322 if (status == 0) 5323 *reg_val = le32_to_cpu(cmd_resp->value); 5324 5325 return status; 5326 } 5327 5328 /** 5329 * i40e_read_rx_ctl - read from an Rx control register 5330 * @hw: pointer to the hw struct 5331 * @reg_addr: register address 5332 **/ 5333 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5334 { 5335 i40e_status status = 0; 5336 bool use_register; 5337 int retry = 5; 5338 u32 val = 0; 5339 5340 use_register = (((hw->aq.api_maj_ver == 1) && 5341 (hw->aq.api_min_ver < 5)) || 5342 (hw->mac.type == I40E_MAC_X722)); 5343 if (!use_register) { 5344 do_retry: 5345 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5346 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5347 usleep_range(1000, 2000); 5348 retry--; 5349 goto do_retry; 5350 } 5351 } 5352 5353 /* if the AQ access failed, try the old-fashioned way */ 5354 if (status || use_register) 5355 val = rd32(hw, reg_addr); 5356 5357 return val; 5358 } 5359 5360 /** 5361 * i40e_aq_rx_ctl_write_register 5362 * @hw: pointer to the hw struct 5363 * @reg_addr: register address 5364 * @reg_val: register value 5365 * @cmd_details: pointer to command details structure or NULL 5366 * 5367 * Use the firmware to write to an Rx control register, 5368 * especially useful if the Rx unit is under heavy pressure 5369 **/ 5370 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5371 u32 reg_addr, u32 reg_val, 5372 struct i40e_asq_cmd_details *cmd_details) 5373 { 5374 struct i40e_aq_desc desc; 5375 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5376 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5377 i40e_status status; 5378 5379 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5380 5381 cmd->address = cpu_to_le32(reg_addr); 5382 cmd->value = cpu_to_le32(reg_val); 5383 5384 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5385 5386 return status; 5387 } 5388 5389 /** 5390 * i40e_write_rx_ctl - write to an Rx control register 5391 * @hw: pointer to the hw struct 5392 * @reg_addr: register address 5393 * @reg_val: register value 5394 **/ 5395 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5396 { 5397 i40e_status status = 0; 5398 bool use_register; 5399 int retry = 5; 5400 5401 use_register = (((hw->aq.api_maj_ver == 1) && 5402 (hw->aq.api_min_ver < 5)) || 5403 (hw->mac.type == I40E_MAC_X722)); 5404 if (!use_register) { 5405 do_retry: 5406 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5407 reg_val, NULL); 5408 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5409 usleep_range(1000, 2000); 5410 retry--; 5411 goto do_retry; 5412 } 5413 } 5414 5415 /* if the AQ access failed, try the old-fashioned way */ 5416 if (status || use_register) 5417 wr32(hw, reg_addr, reg_val); 5418 } 5419 5420 /** 5421 * i40e_mdio_if_number_selection - MDIO I/F number selection 5422 * @hw: pointer to the hw struct 5423 * @set_mdio: use MDIO I/F number specified by mdio_num 5424 * @mdio_num: MDIO I/F number 5425 * @cmd: pointer to PHY Register command structure 5426 **/ 5427 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5428 u8 mdio_num, 5429 struct i40e_aqc_phy_register_access *cmd) 5430 { 5431 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5432 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5433 cmd->cmd_flags |= 5434 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5435 ((mdio_num << 5436 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5437 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5438 else 5439 i40e_debug(hw, I40E_DEBUG_PHY, 5440 "MDIO I/F number selection not supported by current FW version.\n"); 5441 } 5442 } 5443 5444 /** 5445 * i40e_aq_set_phy_register_ext 5446 * @hw: pointer to the hw struct 5447 * @phy_select: select which phy should be accessed 5448 * @dev_addr: PHY device address 5449 * @page_change: flag to indicate if phy page should be updated 5450 * @set_mdio: use MDIO I/F number specified by mdio_num 5451 * @mdio_num: MDIO I/F number 5452 * @reg_addr: PHY register address 5453 * @reg_val: new register value 5454 * @cmd_details: pointer to command details structure or NULL 5455 * 5456 * Write the external PHY register. 5457 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5458 * may use simple wrapper i40e_aq_set_phy_register. 5459 **/ 5460 enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5461 u8 phy_select, u8 dev_addr, bool page_change, 5462 bool set_mdio, u8 mdio_num, 5463 u32 reg_addr, u32 reg_val, 5464 struct i40e_asq_cmd_details *cmd_details) 5465 { 5466 struct i40e_aq_desc desc; 5467 struct i40e_aqc_phy_register_access *cmd = 5468 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5469 i40e_status status; 5470 5471 i40e_fill_default_direct_cmd_desc(&desc, 5472 i40e_aqc_opc_set_phy_register); 5473 5474 cmd->phy_interface = phy_select; 5475 cmd->dev_address = dev_addr; 5476 cmd->reg_address = cpu_to_le32(reg_addr); 5477 cmd->reg_value = cpu_to_le32(reg_val); 5478 5479 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5480 5481 if (!page_change) 5482 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5483 5484 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5485 5486 return status; 5487 } 5488 5489 /** 5490 * i40e_aq_get_phy_register_ext 5491 * @hw: pointer to the hw struct 5492 * @phy_select: select which phy should be accessed 5493 * @dev_addr: PHY device address 5494 * @page_change: flag to indicate if phy page should be updated 5495 * @set_mdio: use MDIO I/F number specified by mdio_num 5496 * @mdio_num: MDIO I/F number 5497 * @reg_addr: PHY register address 5498 * @reg_val: read register value 5499 * @cmd_details: pointer to command details structure or NULL 5500 * 5501 * Read the external PHY register. 5502 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5503 * may use simple wrapper i40e_aq_get_phy_register. 5504 **/ 5505 enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5506 u8 phy_select, u8 dev_addr, bool page_change, 5507 bool set_mdio, u8 mdio_num, 5508 u32 reg_addr, u32 *reg_val, 5509 struct i40e_asq_cmd_details *cmd_details) 5510 { 5511 struct i40e_aq_desc desc; 5512 struct i40e_aqc_phy_register_access *cmd = 5513 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5514 i40e_status status; 5515 5516 i40e_fill_default_direct_cmd_desc(&desc, 5517 i40e_aqc_opc_get_phy_register); 5518 5519 cmd->phy_interface = phy_select; 5520 cmd->dev_address = dev_addr; 5521 cmd->reg_address = cpu_to_le32(reg_addr); 5522 5523 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5524 5525 if (!page_change) 5526 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5527 5528 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5529 if (!status) 5530 *reg_val = le32_to_cpu(cmd->reg_value); 5531 5532 return status; 5533 } 5534 5535 /** 5536 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5537 * @hw: pointer to the hw struct 5538 * @buff: command buffer (size in bytes = buff_size) 5539 * @buff_size: buffer size in bytes 5540 * @track_id: package tracking id 5541 * @error_offset: returns error offset 5542 * @error_info: returns error information 5543 * @cmd_details: pointer to command details structure or NULL 5544 **/ 5545 enum 5546 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5547 u16 buff_size, u32 track_id, 5548 u32 *error_offset, u32 *error_info, 5549 struct i40e_asq_cmd_details *cmd_details) 5550 { 5551 struct i40e_aq_desc desc; 5552 struct i40e_aqc_write_personalization_profile *cmd = 5553 (struct i40e_aqc_write_personalization_profile *) 5554 &desc.params.raw; 5555 struct i40e_aqc_write_ddp_resp *resp; 5556 i40e_status status; 5557 5558 i40e_fill_default_direct_cmd_desc(&desc, 5559 i40e_aqc_opc_write_personalization_profile); 5560 5561 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5562 if (buff_size > I40E_AQ_LARGE_BUF) 5563 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5564 5565 desc.datalen = cpu_to_le16(buff_size); 5566 5567 cmd->profile_track_id = cpu_to_le32(track_id); 5568 5569 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5570 if (!status) { 5571 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5572 if (error_offset) 5573 *error_offset = le32_to_cpu(resp->error_offset); 5574 if (error_info) 5575 *error_info = le32_to_cpu(resp->error_info); 5576 } 5577 5578 return status; 5579 } 5580 5581 /** 5582 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5583 * @hw: pointer to the hw struct 5584 * @buff: command buffer (size in bytes = buff_size) 5585 * @buff_size: buffer size in bytes 5586 * @flags: AdminQ command flags 5587 * @cmd_details: pointer to command details structure or NULL 5588 **/ 5589 enum 5590 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5591 u16 buff_size, u8 flags, 5592 struct i40e_asq_cmd_details *cmd_details) 5593 { 5594 struct i40e_aq_desc desc; 5595 struct i40e_aqc_get_applied_profiles *cmd = 5596 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5597 i40e_status status; 5598 5599 i40e_fill_default_direct_cmd_desc(&desc, 5600 i40e_aqc_opc_get_personalization_profile_list); 5601 5602 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5603 if (buff_size > I40E_AQ_LARGE_BUF) 5604 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5605 desc.datalen = cpu_to_le16(buff_size); 5606 5607 cmd->flags = flags; 5608 5609 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5610 5611 return status; 5612 } 5613 5614 /** 5615 * i40e_find_segment_in_package 5616 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5617 * @pkg_hdr: pointer to the package header to be searched 5618 * 5619 * This function searches a package file for a particular segment type. On 5620 * success it returns a pointer to the segment header, otherwise it will 5621 * return NULL. 5622 **/ 5623 struct i40e_generic_seg_header * 5624 i40e_find_segment_in_package(u32 segment_type, 5625 struct i40e_package_header *pkg_hdr) 5626 { 5627 struct i40e_generic_seg_header *segment; 5628 u32 i; 5629 5630 /* Search all package segments for the requested segment type */ 5631 for (i = 0; i < pkg_hdr->segment_count; i++) { 5632 segment = 5633 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5634 pkg_hdr->segment_offset[i]); 5635 5636 if (segment->type == segment_type) 5637 return segment; 5638 } 5639 5640 return NULL; 5641 } 5642 5643 /* Get section table in profile */ 5644 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5645 do { \ 5646 struct i40e_profile_segment *p = (profile); \ 5647 u32 count; \ 5648 u32 *nvm; \ 5649 count = p->device_table_count; \ 5650 nvm = (u32 *)&p->device_table[count]; \ 5651 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5652 } while (0) 5653 5654 /* Get section header in profile */ 5655 #define I40E_SECTION_HEADER(profile, offset) \ 5656 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5657 5658 /** 5659 * i40e_find_section_in_profile 5660 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5661 * @profile: pointer to the i40e segment header to be searched 5662 * 5663 * This function searches i40e segment for a particular section type. On 5664 * success it returns a pointer to the section header, otherwise it will 5665 * return NULL. 5666 **/ 5667 struct i40e_profile_section_header * 5668 i40e_find_section_in_profile(u32 section_type, 5669 struct i40e_profile_segment *profile) 5670 { 5671 struct i40e_profile_section_header *sec; 5672 struct i40e_section_table *sec_tbl; 5673 u32 sec_off; 5674 u32 i; 5675 5676 if (profile->header.type != SEGMENT_TYPE_I40E) 5677 return NULL; 5678 5679 I40E_SECTION_TABLE(profile, sec_tbl); 5680 5681 for (i = 0; i < sec_tbl->section_count; i++) { 5682 sec_off = sec_tbl->section_offset[i]; 5683 sec = I40E_SECTION_HEADER(profile, sec_off); 5684 if (sec->section.type == section_type) 5685 return sec; 5686 } 5687 5688 return NULL; 5689 } 5690 5691 /** 5692 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5693 * @hw: pointer to the hw struct 5694 * @aq: command buffer containing all data to execute AQ 5695 **/ 5696 static enum 5697 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5698 struct i40e_profile_aq_section *aq) 5699 { 5700 i40e_status status; 5701 struct i40e_aq_desc desc; 5702 u8 *msg = NULL; 5703 u16 msglen; 5704 5705 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5706 desc.flags |= cpu_to_le16(aq->flags); 5707 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5708 5709 msglen = aq->datalen; 5710 if (msglen) { 5711 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5712 I40E_AQ_FLAG_RD)); 5713 if (msglen > I40E_AQ_LARGE_BUF) 5714 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5715 desc.datalen = cpu_to_le16(msglen); 5716 msg = &aq->data[0]; 5717 } 5718 5719 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5720 5721 if (status) { 5722 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5723 "unable to exec DDP AQ opcode %u, error %d\n", 5724 aq->opcode, status); 5725 return status; 5726 } 5727 5728 /* copy returned desc to aq_buf */ 5729 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5730 5731 return 0; 5732 } 5733 5734 /** 5735 * i40e_validate_profile 5736 * @hw: pointer to the hardware structure 5737 * @profile: pointer to the profile segment of the package to be validated 5738 * @track_id: package tracking id 5739 * @rollback: flag if the profile is for rollback. 5740 * 5741 * Validates supported devices and profile's sections. 5742 */ 5743 static enum i40e_status_code 5744 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5745 u32 track_id, bool rollback) 5746 { 5747 struct i40e_profile_section_header *sec = NULL; 5748 i40e_status status = 0; 5749 struct i40e_section_table *sec_tbl; 5750 u32 vendor_dev_id; 5751 u32 dev_cnt; 5752 u32 sec_off; 5753 u32 i; 5754 5755 if (track_id == I40E_DDP_TRACKID_INVALID) { 5756 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5757 return I40E_NOT_SUPPORTED; 5758 } 5759 5760 dev_cnt = profile->device_table_count; 5761 for (i = 0; i < dev_cnt; i++) { 5762 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5763 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5764 hw->device_id == (vendor_dev_id & 0xFFFF)) 5765 break; 5766 } 5767 if (dev_cnt && i == dev_cnt) { 5768 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5769 "Device doesn't support DDP\n"); 5770 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5771 } 5772 5773 I40E_SECTION_TABLE(profile, sec_tbl); 5774 5775 /* Validate sections types */ 5776 for (i = 0; i < sec_tbl->section_count; i++) { 5777 sec_off = sec_tbl->section_offset[i]; 5778 sec = I40E_SECTION_HEADER(profile, sec_off); 5779 if (rollback) { 5780 if (sec->section.type == SECTION_TYPE_MMIO || 5781 sec->section.type == SECTION_TYPE_AQ || 5782 sec->section.type == SECTION_TYPE_RB_AQ) { 5783 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5784 "Not a roll-back package\n"); 5785 return I40E_NOT_SUPPORTED; 5786 } 5787 } else { 5788 if (sec->section.type == SECTION_TYPE_RB_AQ || 5789 sec->section.type == SECTION_TYPE_RB_MMIO) { 5790 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5791 "Not an original package\n"); 5792 return I40E_NOT_SUPPORTED; 5793 } 5794 } 5795 } 5796 5797 return status; 5798 } 5799 5800 /** 5801 * i40e_write_profile 5802 * @hw: pointer to the hardware structure 5803 * @profile: pointer to the profile segment of the package to be downloaded 5804 * @track_id: package tracking id 5805 * 5806 * Handles the download of a complete package. 5807 */ 5808 enum i40e_status_code 5809 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5810 u32 track_id) 5811 { 5812 i40e_status status = 0; 5813 struct i40e_section_table *sec_tbl; 5814 struct i40e_profile_section_header *sec = NULL; 5815 struct i40e_profile_aq_section *ddp_aq; 5816 u32 section_size = 0; 5817 u32 offset = 0, info = 0; 5818 u32 sec_off; 5819 u32 i; 5820 5821 status = i40e_validate_profile(hw, profile, track_id, false); 5822 if (status) 5823 return status; 5824 5825 I40E_SECTION_TABLE(profile, sec_tbl); 5826 5827 for (i = 0; i < sec_tbl->section_count; i++) { 5828 sec_off = sec_tbl->section_offset[i]; 5829 sec = I40E_SECTION_HEADER(profile, sec_off); 5830 /* Process generic admin command */ 5831 if (sec->section.type == SECTION_TYPE_AQ) { 5832 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5833 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5834 if (status) { 5835 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5836 "Failed to execute aq: section %d, opcode %u\n", 5837 i, ddp_aq->opcode); 5838 break; 5839 } 5840 sec->section.type = SECTION_TYPE_RB_AQ; 5841 } 5842 5843 /* Skip any non-mmio sections */ 5844 if (sec->section.type != SECTION_TYPE_MMIO) 5845 continue; 5846 5847 section_size = sec->section.size + 5848 sizeof(struct i40e_profile_section_header); 5849 5850 /* Write MMIO section */ 5851 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5852 track_id, &offset, &info, NULL); 5853 if (status) { 5854 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5855 "Failed to write profile: section %d, offset %d, info %d\n", 5856 i, offset, info); 5857 break; 5858 } 5859 } 5860 return status; 5861 } 5862 5863 /** 5864 * i40e_rollback_profile 5865 * @hw: pointer to the hardware structure 5866 * @profile: pointer to the profile segment of the package to be removed 5867 * @track_id: package tracking id 5868 * 5869 * Rolls back previously loaded package. 5870 */ 5871 enum i40e_status_code 5872 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5873 u32 track_id) 5874 { 5875 struct i40e_profile_section_header *sec = NULL; 5876 i40e_status status = 0; 5877 struct i40e_section_table *sec_tbl; 5878 u32 offset = 0, info = 0; 5879 u32 section_size = 0; 5880 u32 sec_off; 5881 int i; 5882 5883 status = i40e_validate_profile(hw, profile, track_id, true); 5884 if (status) 5885 return status; 5886 5887 I40E_SECTION_TABLE(profile, sec_tbl); 5888 5889 /* For rollback write sections in reverse */ 5890 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5891 sec_off = sec_tbl->section_offset[i]; 5892 sec = I40E_SECTION_HEADER(profile, sec_off); 5893 5894 /* Skip any non-rollback sections */ 5895 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5896 continue; 5897 5898 section_size = sec->section.size + 5899 sizeof(struct i40e_profile_section_header); 5900 5901 /* Write roll-back MMIO section */ 5902 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5903 track_id, &offset, &info, NULL); 5904 if (status) { 5905 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5906 "Failed to write profile: section %d, offset %d, info %d\n", 5907 i, offset, info); 5908 break; 5909 } 5910 } 5911 return status; 5912 } 5913 5914 /** 5915 * i40e_add_pinfo_to_list 5916 * @hw: pointer to the hardware structure 5917 * @profile: pointer to the profile segment of the package 5918 * @profile_info_sec: buffer for information section 5919 * @track_id: package tracking id 5920 * 5921 * Register a profile to the list of loaded profiles. 5922 */ 5923 enum i40e_status_code 5924 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5925 struct i40e_profile_segment *profile, 5926 u8 *profile_info_sec, u32 track_id) 5927 { 5928 i40e_status status = 0; 5929 struct i40e_profile_section_header *sec = NULL; 5930 struct i40e_profile_info *pinfo; 5931 u32 offset = 0, info = 0; 5932 5933 sec = (struct i40e_profile_section_header *)profile_info_sec; 5934 sec->tbl_size = 1; 5935 sec->data_end = sizeof(struct i40e_profile_section_header) + 5936 sizeof(struct i40e_profile_info); 5937 sec->section.type = SECTION_TYPE_INFO; 5938 sec->section.offset = sizeof(struct i40e_profile_section_header); 5939 sec->section.size = sizeof(struct i40e_profile_info); 5940 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5941 sec->section.offset); 5942 pinfo->track_id = track_id; 5943 pinfo->version = profile->version; 5944 pinfo->op = I40E_DDP_ADD_TRACKID; 5945 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5946 5947 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5948 track_id, &offset, &info, NULL); 5949 5950 return status; 5951 } 5952 5953 /** 5954 * i40e_aq_add_cloud_filters 5955 * @hw: pointer to the hardware structure 5956 * @seid: VSI seid to add cloud filters from 5957 * @filters: Buffer which contains the filters to be added 5958 * @filter_count: number of filters contained in the buffer 5959 * 5960 * Set the cloud filters for a given VSI. The contents of the 5961 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5962 * of the function. 5963 * 5964 **/ 5965 enum i40e_status_code 5966 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5967 struct i40e_aqc_cloud_filters_element_data *filters, 5968 u8 filter_count) 5969 { 5970 struct i40e_aq_desc desc; 5971 struct i40e_aqc_add_remove_cloud_filters *cmd = 5972 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5973 enum i40e_status_code status; 5974 u16 buff_len; 5975 5976 i40e_fill_default_direct_cmd_desc(&desc, 5977 i40e_aqc_opc_add_cloud_filters); 5978 5979 buff_len = filter_count * sizeof(*filters); 5980 desc.datalen = cpu_to_le16(buff_len); 5981 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5982 cmd->num_filters = filter_count; 5983 cmd->seid = cpu_to_le16(seid); 5984 5985 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5986 5987 return status; 5988 } 5989 5990 /** 5991 * i40e_aq_add_cloud_filters_bb 5992 * @hw: pointer to the hardware structure 5993 * @seid: VSI seid to add cloud filters from 5994 * @filters: Buffer which contains the filters in big buffer to be added 5995 * @filter_count: number of filters contained in the buffer 5996 * 5997 * Set the big buffer cloud filters for a given VSI. The contents of the 5998 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5999 * function. 6000 * 6001 **/ 6002 enum i40e_status_code 6003 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6004 struct i40e_aqc_cloud_filters_element_bb *filters, 6005 u8 filter_count) 6006 { 6007 struct i40e_aq_desc desc; 6008 struct i40e_aqc_add_remove_cloud_filters *cmd = 6009 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6010 i40e_status status; 6011 u16 buff_len; 6012 int i; 6013 6014 i40e_fill_default_direct_cmd_desc(&desc, 6015 i40e_aqc_opc_add_cloud_filters); 6016 6017 buff_len = filter_count * sizeof(*filters); 6018 desc.datalen = cpu_to_le16(buff_len); 6019 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6020 cmd->num_filters = filter_count; 6021 cmd->seid = cpu_to_le16(seid); 6022 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6023 6024 for (i = 0; i < filter_count; i++) { 6025 u16 tnl_type; 6026 u32 ti; 6027 6028 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6029 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6030 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6031 6032 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6033 * one more byte further than normally used for Tenant ID in 6034 * other tunnel types. 6035 */ 6036 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6037 ti = le32_to_cpu(filters[i].element.tenant_id); 6038 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6039 } 6040 } 6041 6042 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6043 6044 return status; 6045 } 6046 6047 /** 6048 * i40e_aq_rem_cloud_filters 6049 * @hw: pointer to the hardware structure 6050 * @seid: VSI seid to remove cloud filters from 6051 * @filters: Buffer which contains the filters to be removed 6052 * @filter_count: number of filters contained in the buffer 6053 * 6054 * Remove the cloud filters for a given VSI. The contents of the 6055 * i40e_aqc_cloud_filters_element_data are filled in by the caller 6056 * of the function. 6057 * 6058 **/ 6059 enum i40e_status_code 6060 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 6061 struct i40e_aqc_cloud_filters_element_data *filters, 6062 u8 filter_count) 6063 { 6064 struct i40e_aq_desc desc; 6065 struct i40e_aqc_add_remove_cloud_filters *cmd = 6066 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6067 enum i40e_status_code status; 6068 u16 buff_len; 6069 6070 i40e_fill_default_direct_cmd_desc(&desc, 6071 i40e_aqc_opc_remove_cloud_filters); 6072 6073 buff_len = filter_count * sizeof(*filters); 6074 desc.datalen = cpu_to_le16(buff_len); 6075 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6076 cmd->num_filters = filter_count; 6077 cmd->seid = cpu_to_le16(seid); 6078 6079 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6080 6081 return status; 6082 } 6083 6084 /** 6085 * i40e_aq_rem_cloud_filters_bb 6086 * @hw: pointer to the hardware structure 6087 * @seid: VSI seid to remove cloud filters from 6088 * @filters: Buffer which contains the filters in big buffer to be removed 6089 * @filter_count: number of filters contained in the buffer 6090 * 6091 * Remove the big buffer cloud filters for a given VSI. The contents of the 6092 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6093 * function. 6094 * 6095 **/ 6096 enum i40e_status_code 6097 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6098 struct i40e_aqc_cloud_filters_element_bb *filters, 6099 u8 filter_count) 6100 { 6101 struct i40e_aq_desc desc; 6102 struct i40e_aqc_add_remove_cloud_filters *cmd = 6103 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6104 i40e_status status; 6105 u16 buff_len; 6106 int i; 6107 6108 i40e_fill_default_direct_cmd_desc(&desc, 6109 i40e_aqc_opc_remove_cloud_filters); 6110 6111 buff_len = filter_count * sizeof(*filters); 6112 desc.datalen = cpu_to_le16(buff_len); 6113 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6114 cmd->num_filters = filter_count; 6115 cmd->seid = cpu_to_le16(seid); 6116 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6117 6118 for (i = 0; i < filter_count; i++) { 6119 u16 tnl_type; 6120 u32 ti; 6121 6122 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6123 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6124 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6125 6126 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6127 * one more byte further than normally used for Tenant ID in 6128 * other tunnel types. 6129 */ 6130 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6131 ti = le32_to_cpu(filters[i].element.tenant_id); 6132 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6133 } 6134 } 6135 6136 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6137 6138 return status; 6139 } 6140