1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_type.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 #include <linux/avf/virtchnl.h> 9 10 /** 11 * i40e_set_mac_type - Sets MAC type 12 * @hw: pointer to the HW structure 13 * 14 * This function sets the mac type of the adapter based on the 15 * vendor ID and device ID stored in the hw structure. 16 **/ 17 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 18 { 19 i40e_status status = 0; 20 21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 22 switch (hw->device_id) { 23 case I40E_DEV_ID_SFP_XL710: 24 case I40E_DEV_ID_QEMU: 25 case I40E_DEV_ID_KX_B: 26 case I40E_DEV_ID_KX_C: 27 case I40E_DEV_ID_QSFP_A: 28 case I40E_DEV_ID_QSFP_B: 29 case I40E_DEV_ID_QSFP_C: 30 case I40E_DEV_ID_1G_BASE_T_BC: 31 case I40E_DEV_ID_5G_BASE_T_BC: 32 case I40E_DEV_ID_10G_BASE_T: 33 case I40E_DEV_ID_10G_BASE_T4: 34 case I40E_DEV_ID_10G_BASE_T_BC: 35 case I40E_DEV_ID_10G_B: 36 case I40E_DEV_ID_10G_SFP: 37 case I40E_DEV_ID_20G_KR2: 38 case I40E_DEV_ID_20G_KR2_A: 39 case I40E_DEV_ID_25G_B: 40 case I40E_DEV_ID_25G_SFP28: 41 case I40E_DEV_ID_X710_N3000: 42 case I40E_DEV_ID_XXV710_N3000: 43 hw->mac.type = I40E_MAC_XL710; 44 break; 45 case I40E_DEV_ID_KX_X722: 46 case I40E_DEV_ID_QSFP_X722: 47 case I40E_DEV_ID_SFP_X722: 48 case I40E_DEV_ID_1G_BASE_T_X722: 49 case I40E_DEV_ID_10G_BASE_T_X722: 50 case I40E_DEV_ID_SFP_I_X722: 51 case I40E_DEV_ID_SFP_X722_A: 52 hw->mac.type = I40E_MAC_X722; 53 break; 54 default: 55 hw->mac.type = I40E_MAC_GENERIC; 56 break; 57 } 58 } else { 59 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 60 } 61 62 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 63 hw->mac.type, status); 64 return status; 65 } 66 67 /** 68 * i40e_aq_str - convert AQ err code to a string 69 * @hw: pointer to the HW structure 70 * @aq_err: the AQ error code to convert 71 **/ 72 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 73 { 74 switch (aq_err) { 75 case I40E_AQ_RC_OK: 76 return "OK"; 77 case I40E_AQ_RC_EPERM: 78 return "I40E_AQ_RC_EPERM"; 79 case I40E_AQ_RC_ENOENT: 80 return "I40E_AQ_RC_ENOENT"; 81 case I40E_AQ_RC_ESRCH: 82 return "I40E_AQ_RC_ESRCH"; 83 case I40E_AQ_RC_EINTR: 84 return "I40E_AQ_RC_EINTR"; 85 case I40E_AQ_RC_EIO: 86 return "I40E_AQ_RC_EIO"; 87 case I40E_AQ_RC_ENXIO: 88 return "I40E_AQ_RC_ENXIO"; 89 case I40E_AQ_RC_E2BIG: 90 return "I40E_AQ_RC_E2BIG"; 91 case I40E_AQ_RC_EAGAIN: 92 return "I40E_AQ_RC_EAGAIN"; 93 case I40E_AQ_RC_ENOMEM: 94 return "I40E_AQ_RC_ENOMEM"; 95 case I40E_AQ_RC_EACCES: 96 return "I40E_AQ_RC_EACCES"; 97 case I40E_AQ_RC_EFAULT: 98 return "I40E_AQ_RC_EFAULT"; 99 case I40E_AQ_RC_EBUSY: 100 return "I40E_AQ_RC_EBUSY"; 101 case I40E_AQ_RC_EEXIST: 102 return "I40E_AQ_RC_EEXIST"; 103 case I40E_AQ_RC_EINVAL: 104 return "I40E_AQ_RC_EINVAL"; 105 case I40E_AQ_RC_ENOTTY: 106 return "I40E_AQ_RC_ENOTTY"; 107 case I40E_AQ_RC_ENOSPC: 108 return "I40E_AQ_RC_ENOSPC"; 109 case I40E_AQ_RC_ENOSYS: 110 return "I40E_AQ_RC_ENOSYS"; 111 case I40E_AQ_RC_ERANGE: 112 return "I40E_AQ_RC_ERANGE"; 113 case I40E_AQ_RC_EFLUSHED: 114 return "I40E_AQ_RC_EFLUSHED"; 115 case I40E_AQ_RC_BAD_ADDR: 116 return "I40E_AQ_RC_BAD_ADDR"; 117 case I40E_AQ_RC_EMODE: 118 return "I40E_AQ_RC_EMODE"; 119 case I40E_AQ_RC_EFBIG: 120 return "I40E_AQ_RC_EFBIG"; 121 } 122 123 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 124 return hw->err_str; 125 } 126 127 /** 128 * i40e_stat_str - convert status err code to a string 129 * @hw: pointer to the HW structure 130 * @stat_err: the status error code to convert 131 **/ 132 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 133 { 134 switch (stat_err) { 135 case 0: 136 return "OK"; 137 case I40E_ERR_NVM: 138 return "I40E_ERR_NVM"; 139 case I40E_ERR_NVM_CHECKSUM: 140 return "I40E_ERR_NVM_CHECKSUM"; 141 case I40E_ERR_PHY: 142 return "I40E_ERR_PHY"; 143 case I40E_ERR_CONFIG: 144 return "I40E_ERR_CONFIG"; 145 case I40E_ERR_PARAM: 146 return "I40E_ERR_PARAM"; 147 case I40E_ERR_MAC_TYPE: 148 return "I40E_ERR_MAC_TYPE"; 149 case I40E_ERR_UNKNOWN_PHY: 150 return "I40E_ERR_UNKNOWN_PHY"; 151 case I40E_ERR_LINK_SETUP: 152 return "I40E_ERR_LINK_SETUP"; 153 case I40E_ERR_ADAPTER_STOPPED: 154 return "I40E_ERR_ADAPTER_STOPPED"; 155 case I40E_ERR_INVALID_MAC_ADDR: 156 return "I40E_ERR_INVALID_MAC_ADDR"; 157 case I40E_ERR_DEVICE_NOT_SUPPORTED: 158 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 159 case I40E_ERR_PRIMARY_REQUESTS_PENDING: 160 return "I40E_ERR_PRIMARY_REQUESTS_PENDING"; 161 case I40E_ERR_INVALID_LINK_SETTINGS: 162 return "I40E_ERR_INVALID_LINK_SETTINGS"; 163 case I40E_ERR_AUTONEG_NOT_COMPLETE: 164 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 165 case I40E_ERR_RESET_FAILED: 166 return "I40E_ERR_RESET_FAILED"; 167 case I40E_ERR_SWFW_SYNC: 168 return "I40E_ERR_SWFW_SYNC"; 169 case I40E_ERR_NO_AVAILABLE_VSI: 170 return "I40E_ERR_NO_AVAILABLE_VSI"; 171 case I40E_ERR_NO_MEMORY: 172 return "I40E_ERR_NO_MEMORY"; 173 case I40E_ERR_BAD_PTR: 174 return "I40E_ERR_BAD_PTR"; 175 case I40E_ERR_RING_FULL: 176 return "I40E_ERR_RING_FULL"; 177 case I40E_ERR_INVALID_PD_ID: 178 return "I40E_ERR_INVALID_PD_ID"; 179 case I40E_ERR_INVALID_QP_ID: 180 return "I40E_ERR_INVALID_QP_ID"; 181 case I40E_ERR_INVALID_CQ_ID: 182 return "I40E_ERR_INVALID_CQ_ID"; 183 case I40E_ERR_INVALID_CEQ_ID: 184 return "I40E_ERR_INVALID_CEQ_ID"; 185 case I40E_ERR_INVALID_AEQ_ID: 186 return "I40E_ERR_INVALID_AEQ_ID"; 187 case I40E_ERR_INVALID_SIZE: 188 return "I40E_ERR_INVALID_SIZE"; 189 case I40E_ERR_INVALID_ARP_INDEX: 190 return "I40E_ERR_INVALID_ARP_INDEX"; 191 case I40E_ERR_INVALID_FPM_FUNC_ID: 192 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 193 case I40E_ERR_QP_INVALID_MSG_SIZE: 194 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 195 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 196 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 197 case I40E_ERR_INVALID_FRAG_COUNT: 198 return "I40E_ERR_INVALID_FRAG_COUNT"; 199 case I40E_ERR_QUEUE_EMPTY: 200 return "I40E_ERR_QUEUE_EMPTY"; 201 case I40E_ERR_INVALID_ALIGNMENT: 202 return "I40E_ERR_INVALID_ALIGNMENT"; 203 case I40E_ERR_FLUSHED_QUEUE: 204 return "I40E_ERR_FLUSHED_QUEUE"; 205 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 206 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 207 case I40E_ERR_INVALID_IMM_DATA_SIZE: 208 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 209 case I40E_ERR_TIMEOUT: 210 return "I40E_ERR_TIMEOUT"; 211 case I40E_ERR_OPCODE_MISMATCH: 212 return "I40E_ERR_OPCODE_MISMATCH"; 213 case I40E_ERR_CQP_COMPL_ERROR: 214 return "I40E_ERR_CQP_COMPL_ERROR"; 215 case I40E_ERR_INVALID_VF_ID: 216 return "I40E_ERR_INVALID_VF_ID"; 217 case I40E_ERR_INVALID_HMCFN_ID: 218 return "I40E_ERR_INVALID_HMCFN_ID"; 219 case I40E_ERR_BACKING_PAGE_ERROR: 220 return "I40E_ERR_BACKING_PAGE_ERROR"; 221 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 222 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 223 case I40E_ERR_INVALID_PBLE_INDEX: 224 return "I40E_ERR_INVALID_PBLE_INDEX"; 225 case I40E_ERR_INVALID_SD_INDEX: 226 return "I40E_ERR_INVALID_SD_INDEX"; 227 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 228 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 229 case I40E_ERR_INVALID_SD_TYPE: 230 return "I40E_ERR_INVALID_SD_TYPE"; 231 case I40E_ERR_MEMCPY_FAILED: 232 return "I40E_ERR_MEMCPY_FAILED"; 233 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 234 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 235 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 236 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 237 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 238 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 239 case I40E_ERR_SRQ_ENABLED: 240 return "I40E_ERR_SRQ_ENABLED"; 241 case I40E_ERR_ADMIN_QUEUE_ERROR: 242 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 243 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 244 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 245 case I40E_ERR_BUF_TOO_SHORT: 246 return "I40E_ERR_BUF_TOO_SHORT"; 247 case I40E_ERR_ADMIN_QUEUE_FULL: 248 return "I40E_ERR_ADMIN_QUEUE_FULL"; 249 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 250 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 251 case I40E_ERR_BAD_IWARP_CQE: 252 return "I40E_ERR_BAD_IWARP_CQE"; 253 case I40E_ERR_NVM_BLANK_MODE: 254 return "I40E_ERR_NVM_BLANK_MODE"; 255 case I40E_ERR_NOT_IMPLEMENTED: 256 return "I40E_ERR_NOT_IMPLEMENTED"; 257 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 258 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 259 case I40E_ERR_DIAG_TEST_FAILED: 260 return "I40E_ERR_DIAG_TEST_FAILED"; 261 case I40E_ERR_NOT_READY: 262 return "I40E_ERR_NOT_READY"; 263 case I40E_NOT_SUPPORTED: 264 return "I40E_NOT_SUPPORTED"; 265 case I40E_ERR_FIRMWARE_API_VERSION: 266 return "I40E_ERR_FIRMWARE_API_VERSION"; 267 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 268 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 269 } 270 271 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 272 return hw->err_str; 273 } 274 275 /** 276 * i40e_debug_aq 277 * @hw: debug mask related to admin queue 278 * @mask: debug mask 279 * @desc: pointer to admin queue descriptor 280 * @buffer: pointer to command buffer 281 * @buf_len: max length of buffer 282 * 283 * Dumps debug log about adminq command with descriptor contents. 284 **/ 285 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 286 void *buffer, u16 buf_len) 287 { 288 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 289 u32 effective_mask = hw->debug_mask & mask; 290 char prefix[27]; 291 u16 len; 292 u8 *buf = (u8 *)buffer; 293 294 if (!effective_mask || !desc) 295 return; 296 297 len = le16_to_cpu(aq_desc->datalen); 298 299 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 300 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 301 le16_to_cpu(aq_desc->opcode), 302 le16_to_cpu(aq_desc->flags), 303 le16_to_cpu(aq_desc->datalen), 304 le16_to_cpu(aq_desc->retval)); 305 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 306 "\tcookie (h,l) 0x%08X 0x%08X\n", 307 le32_to_cpu(aq_desc->cookie_high), 308 le32_to_cpu(aq_desc->cookie_low)); 309 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 310 "\tparam (0,1) 0x%08X 0x%08X\n", 311 le32_to_cpu(aq_desc->params.internal.param0), 312 le32_to_cpu(aq_desc->params.internal.param1)); 313 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 314 "\taddr (h,l) 0x%08X 0x%08X\n", 315 le32_to_cpu(aq_desc->params.external.addr_high), 316 le32_to_cpu(aq_desc->params.external.addr_low)); 317 318 if (buffer && buf_len != 0 && len != 0 && 319 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 320 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 321 if (buf_len < len) 322 len = buf_len; 323 324 snprintf(prefix, sizeof(prefix), 325 "i40e %02x:%02x.%x: \t0x", 326 hw->bus.bus_id, 327 hw->bus.device, 328 hw->bus.func); 329 330 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 331 16, 1, buf, len, false); 332 } 333 } 334 335 /** 336 * i40e_check_asq_alive 337 * @hw: pointer to the hw struct 338 * 339 * Returns true if Queue is enabled else false. 340 **/ 341 bool i40e_check_asq_alive(struct i40e_hw *hw) 342 { 343 if (hw->aq.asq.len) 344 return !!(rd32(hw, hw->aq.asq.len) & 345 I40E_PF_ATQLEN_ATQENABLE_MASK); 346 else 347 return false; 348 } 349 350 /** 351 * i40e_aq_queue_shutdown 352 * @hw: pointer to the hw struct 353 * @unloading: is the driver unloading itself 354 * 355 * Tell the Firmware that we're shutting down the AdminQ and whether 356 * or not the driver is unloading as well. 357 **/ 358 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 359 bool unloading) 360 { 361 struct i40e_aq_desc desc; 362 struct i40e_aqc_queue_shutdown *cmd = 363 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 364 i40e_status status; 365 366 i40e_fill_default_direct_cmd_desc(&desc, 367 i40e_aqc_opc_queue_shutdown); 368 369 if (unloading) 370 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 371 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 372 373 return status; 374 } 375 376 /** 377 * i40e_aq_get_set_rss_lut 378 * @hw: pointer to the hardware structure 379 * @vsi_id: vsi fw index 380 * @pf_lut: for PF table set true, for VSI table set false 381 * @lut: pointer to the lut buffer provided by the caller 382 * @lut_size: size of the lut buffer 383 * @set: set true to set the table, false to get the table 384 * 385 * Internal function to get or set RSS look up table 386 **/ 387 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 388 u16 vsi_id, bool pf_lut, 389 u8 *lut, u16 lut_size, 390 bool set) 391 { 392 i40e_status status; 393 struct i40e_aq_desc desc; 394 struct i40e_aqc_get_set_rss_lut *cmd_resp = 395 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 396 397 if (set) 398 i40e_fill_default_direct_cmd_desc(&desc, 399 i40e_aqc_opc_set_rss_lut); 400 else 401 i40e_fill_default_direct_cmd_desc(&desc, 402 i40e_aqc_opc_get_rss_lut); 403 404 /* Indirect command */ 405 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 406 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 407 408 cmd_resp->vsi_id = 409 cpu_to_le16((u16)((vsi_id << 410 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 411 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 412 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 413 414 if (pf_lut) 415 cmd_resp->flags |= cpu_to_le16((u16) 416 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 419 else 420 cmd_resp->flags |= cpu_to_le16((u16) 421 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 422 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 423 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 424 425 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 426 427 return status; 428 } 429 430 /** 431 * i40e_aq_get_rss_lut 432 * @hw: pointer to the hardware structure 433 * @vsi_id: vsi fw index 434 * @pf_lut: for PF table set true, for VSI table set false 435 * @lut: pointer to the lut buffer provided by the caller 436 * @lut_size: size of the lut buffer 437 * 438 * get the RSS lookup table, PF or VSI type 439 **/ 440 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 441 bool pf_lut, u8 *lut, u16 lut_size) 442 { 443 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 444 false); 445 } 446 447 /** 448 * i40e_aq_set_rss_lut 449 * @hw: pointer to the hardware structure 450 * @vsi_id: vsi fw index 451 * @pf_lut: for PF table set true, for VSI table set false 452 * @lut: pointer to the lut buffer provided by the caller 453 * @lut_size: size of the lut buffer 454 * 455 * set the RSS lookup table, PF or VSI type 456 **/ 457 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 458 bool pf_lut, u8 *lut, u16 lut_size) 459 { 460 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 461 } 462 463 /** 464 * i40e_aq_get_set_rss_key 465 * @hw: pointer to the hw struct 466 * @vsi_id: vsi fw index 467 * @key: pointer to key info struct 468 * @set: set true to set the key, false to get the key 469 * 470 * get the RSS key per VSI 471 **/ 472 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 473 u16 vsi_id, 474 struct i40e_aqc_get_set_rss_key_data *key, 475 bool set) 476 { 477 i40e_status status; 478 struct i40e_aq_desc desc; 479 struct i40e_aqc_get_set_rss_key *cmd_resp = 480 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 481 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 482 483 if (set) 484 i40e_fill_default_direct_cmd_desc(&desc, 485 i40e_aqc_opc_set_rss_key); 486 else 487 i40e_fill_default_direct_cmd_desc(&desc, 488 i40e_aqc_opc_get_rss_key); 489 490 /* Indirect command */ 491 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 492 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 493 494 cmd_resp->vsi_id = 495 cpu_to_le16((u16)((vsi_id << 496 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 497 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 498 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 499 500 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 501 502 return status; 503 } 504 505 /** 506 * i40e_aq_get_rss_key 507 * @hw: pointer to the hw struct 508 * @vsi_id: vsi fw index 509 * @key: pointer to key info struct 510 * 511 **/ 512 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 513 u16 vsi_id, 514 struct i40e_aqc_get_set_rss_key_data *key) 515 { 516 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 517 } 518 519 /** 520 * i40e_aq_set_rss_key 521 * @hw: pointer to the hw struct 522 * @vsi_id: vsi fw index 523 * @key: pointer to key info struct 524 * 525 * set the RSS key per VSI 526 **/ 527 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 528 u16 vsi_id, 529 struct i40e_aqc_get_set_rss_key_data *key) 530 { 531 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 532 } 533 534 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 535 * hardware to a bit-field that can be used by SW to more easily determine the 536 * packet type. 537 * 538 * Macros are used to shorten the table lines and make this table human 539 * readable. 540 * 541 * We store the PTYPE in the top byte of the bit field - this is just so that 542 * we can check that the table doesn't have a row missing, as the index into 543 * the table should be the PTYPE. 544 * 545 * Typical work flow: 546 * 547 * IF NOT i40e_ptype_lookup[ptype].known 548 * THEN 549 * Packet is unknown 550 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 551 * Use the rest of the fields to look at the tunnels, inner protocols, etc 552 * ELSE 553 * Use the enum i40e_rx_l2_ptype to decode the packet type 554 * ENDIF 555 */ 556 557 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 558 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 559 [PTYPE] = { \ 560 1, \ 561 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 562 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 563 I40E_RX_PTYPE_##OUTER_FRAG, \ 564 I40E_RX_PTYPE_TUNNEL_##T, \ 565 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 566 I40E_RX_PTYPE_##TEF, \ 567 I40E_RX_PTYPE_INNER_PROT_##I, \ 568 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 569 570 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 571 572 /* shorter macros makes the table fit but are terse */ 573 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 574 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 575 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 576 577 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 578 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 579 /* L2 Packet types */ 580 I40E_PTT_UNUSED_ENTRY(0), 581 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 582 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 583 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 584 I40E_PTT_UNUSED_ENTRY(4), 585 I40E_PTT_UNUSED_ENTRY(5), 586 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 587 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 588 I40E_PTT_UNUSED_ENTRY(8), 589 I40E_PTT_UNUSED_ENTRY(9), 590 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 591 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 592 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 599 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 600 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 601 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 602 603 /* Non Tunneled IPv4 */ 604 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 605 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 606 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 607 I40E_PTT_UNUSED_ENTRY(25), 608 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 609 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 610 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 611 612 /* IPv4 --> IPv4 */ 613 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 614 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 615 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 616 I40E_PTT_UNUSED_ENTRY(32), 617 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 618 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 619 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 620 621 /* IPv4 --> IPv6 */ 622 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 623 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 624 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 625 I40E_PTT_UNUSED_ENTRY(39), 626 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 627 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 628 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 629 630 /* IPv4 --> GRE/NAT */ 631 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 632 633 /* IPv4 --> GRE/NAT --> IPv4 */ 634 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 635 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 636 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 637 I40E_PTT_UNUSED_ENTRY(47), 638 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 639 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 640 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 641 642 /* IPv4 --> GRE/NAT --> IPv6 */ 643 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 644 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 645 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 646 I40E_PTT_UNUSED_ENTRY(54), 647 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 648 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 649 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 650 651 /* IPv4 --> GRE/NAT --> MAC */ 652 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 653 654 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 655 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 656 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 657 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 658 I40E_PTT_UNUSED_ENTRY(62), 659 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 660 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 661 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 662 663 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 664 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 665 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 666 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 667 I40E_PTT_UNUSED_ENTRY(69), 668 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 669 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 670 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 671 672 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 673 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 674 675 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 676 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 677 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 678 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 679 I40E_PTT_UNUSED_ENTRY(77), 680 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 681 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 682 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 683 684 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 685 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 686 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 687 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 688 I40E_PTT_UNUSED_ENTRY(84), 689 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 690 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 691 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 692 693 /* Non Tunneled IPv6 */ 694 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 695 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 696 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 697 I40E_PTT_UNUSED_ENTRY(91), 698 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 699 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 700 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 701 702 /* IPv6 --> IPv4 */ 703 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 704 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 705 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 706 I40E_PTT_UNUSED_ENTRY(98), 707 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 708 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 709 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 710 711 /* IPv6 --> IPv6 */ 712 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 713 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 714 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 715 I40E_PTT_UNUSED_ENTRY(105), 716 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 717 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 718 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 719 720 /* IPv6 --> GRE/NAT */ 721 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 722 723 /* IPv6 --> GRE/NAT -> IPv4 */ 724 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 725 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 726 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 727 I40E_PTT_UNUSED_ENTRY(113), 728 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 729 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 730 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 731 732 /* IPv6 --> GRE/NAT -> IPv6 */ 733 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 734 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 735 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 736 I40E_PTT_UNUSED_ENTRY(120), 737 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 738 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 739 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 740 741 /* IPv6 --> GRE/NAT -> MAC */ 742 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 743 744 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 745 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 746 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 747 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 748 I40E_PTT_UNUSED_ENTRY(128), 749 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 750 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 751 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 752 753 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 754 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 755 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 756 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 757 I40E_PTT_UNUSED_ENTRY(135), 758 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 759 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 760 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 761 762 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 763 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 764 765 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 766 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 767 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 768 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 769 I40E_PTT_UNUSED_ENTRY(143), 770 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 771 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 772 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 773 774 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 775 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 776 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 777 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 778 I40E_PTT_UNUSED_ENTRY(150), 779 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 780 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 781 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 782 783 /* unused entries */ 784 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 785 }; 786 787 /** 788 * i40e_init_shared_code - Initialize the shared code 789 * @hw: pointer to hardware structure 790 * 791 * This assigns the MAC type and PHY code and inits the NVM. 792 * Does not touch the hardware. This function must be called prior to any 793 * other function in the shared code. The i40e_hw structure should be 794 * memset to 0 prior to calling this function. The following fields in 795 * hw structure should be filled in prior to calling this function: 796 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 797 * subsystem_vendor_id, and revision_id 798 **/ 799 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 800 { 801 i40e_status status = 0; 802 u32 port, ari, func_rid; 803 804 i40e_set_mac_type(hw); 805 806 switch (hw->mac.type) { 807 case I40E_MAC_XL710: 808 case I40E_MAC_X722: 809 break; 810 default: 811 return I40E_ERR_DEVICE_NOT_SUPPORTED; 812 } 813 814 hw->phy.get_link_info = true; 815 816 /* Determine port number and PF number*/ 817 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 818 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 819 hw->port = (u8)port; 820 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 821 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 822 func_rid = rd32(hw, I40E_PF_FUNC_RID); 823 if (ari) 824 hw->pf_id = (u8)(func_rid & 0xff); 825 else 826 hw->pf_id = (u8)(func_rid & 0x7); 827 828 status = i40e_init_nvm(hw); 829 return status; 830 } 831 832 /** 833 * i40e_aq_mac_address_read - Retrieve the MAC addresses 834 * @hw: pointer to the hw struct 835 * @flags: a return indicator of what addresses were added to the addr store 836 * @addrs: the requestor's mac addr store 837 * @cmd_details: pointer to command details structure or NULL 838 **/ 839 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 840 u16 *flags, 841 struct i40e_aqc_mac_address_read_data *addrs, 842 struct i40e_asq_cmd_details *cmd_details) 843 { 844 struct i40e_aq_desc desc; 845 struct i40e_aqc_mac_address_read *cmd_data = 846 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 847 i40e_status status; 848 849 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 850 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 851 852 status = i40e_asq_send_command(hw, &desc, addrs, 853 sizeof(*addrs), cmd_details); 854 *flags = le16_to_cpu(cmd_data->command_flags); 855 856 return status; 857 } 858 859 /** 860 * i40e_aq_mac_address_write - Change the MAC addresses 861 * @hw: pointer to the hw struct 862 * @flags: indicates which MAC to be written 863 * @mac_addr: address to write 864 * @cmd_details: pointer to command details structure or NULL 865 **/ 866 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 867 u16 flags, u8 *mac_addr, 868 struct i40e_asq_cmd_details *cmd_details) 869 { 870 struct i40e_aq_desc desc; 871 struct i40e_aqc_mac_address_write *cmd_data = 872 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 873 i40e_status status; 874 875 i40e_fill_default_direct_cmd_desc(&desc, 876 i40e_aqc_opc_mac_address_write); 877 cmd_data->command_flags = cpu_to_le16(flags); 878 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 879 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 880 ((u32)mac_addr[3] << 16) | 881 ((u32)mac_addr[4] << 8) | 882 mac_addr[5]); 883 884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 885 886 return status; 887 } 888 889 /** 890 * i40e_get_mac_addr - get MAC address 891 * @hw: pointer to the HW structure 892 * @mac_addr: pointer to MAC address 893 * 894 * Reads the adapter's MAC address from register 895 **/ 896 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 897 { 898 struct i40e_aqc_mac_address_read_data addrs; 899 i40e_status status; 900 u16 flags = 0; 901 902 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 903 904 if (flags & I40E_AQC_LAN_ADDR_VALID) 905 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 906 907 return status; 908 } 909 910 /** 911 * i40e_get_port_mac_addr - get Port MAC address 912 * @hw: pointer to the HW structure 913 * @mac_addr: pointer to Port MAC address 914 * 915 * Reads the adapter's Port MAC address 916 **/ 917 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 918 { 919 struct i40e_aqc_mac_address_read_data addrs; 920 i40e_status status; 921 u16 flags = 0; 922 923 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 924 if (status) 925 return status; 926 927 if (flags & I40E_AQC_PORT_ADDR_VALID) 928 ether_addr_copy(mac_addr, addrs.port_mac); 929 else 930 status = I40E_ERR_INVALID_MAC_ADDR; 931 932 return status; 933 } 934 935 /** 936 * i40e_pre_tx_queue_cfg - pre tx queue configure 937 * @hw: pointer to the HW structure 938 * @queue: target PF queue index 939 * @enable: state change request 940 * 941 * Handles hw requirement to indicate intention to enable 942 * or disable target queue. 943 **/ 944 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 945 { 946 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 947 u32 reg_block = 0; 948 u32 reg_val; 949 950 if (abs_queue_idx >= 128) { 951 reg_block = abs_queue_idx / 128; 952 abs_queue_idx %= 128; 953 } 954 955 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 956 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 957 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 958 959 if (enable) 960 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 961 else 962 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 963 964 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 965 } 966 967 /** 968 * i40e_read_pba_string - Reads part number string from EEPROM 969 * @hw: pointer to hardware structure 970 * @pba_num: stores the part number string from the EEPROM 971 * @pba_num_size: part number string buffer length 972 * 973 * Reads the part number string from the EEPROM. 974 **/ 975 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 976 u32 pba_num_size) 977 { 978 i40e_status status = 0; 979 u16 pba_word = 0; 980 u16 pba_size = 0; 981 u16 pba_ptr = 0; 982 u16 i = 0; 983 984 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 985 if (status || (pba_word != 0xFAFA)) { 986 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 987 return status; 988 } 989 990 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 991 if (status) { 992 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 993 return status; 994 } 995 996 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 997 if (status) { 998 hw_dbg(hw, "Failed to read PBA Block size.\n"); 999 return status; 1000 } 1001 1002 /* Subtract one to get PBA word count (PBA Size word is included in 1003 * total size) 1004 */ 1005 pba_size--; 1006 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1007 hw_dbg(hw, "Buffer too small for PBA data.\n"); 1008 return I40E_ERR_PARAM; 1009 } 1010 1011 for (i = 0; i < pba_size; i++) { 1012 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1013 if (status) { 1014 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1015 return status; 1016 } 1017 1018 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1019 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1020 } 1021 pba_num[(pba_size * 2)] = '\0'; 1022 1023 return status; 1024 } 1025 1026 /** 1027 * i40e_get_media_type - Gets media type 1028 * @hw: pointer to the hardware structure 1029 **/ 1030 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1031 { 1032 enum i40e_media_type media; 1033 1034 switch (hw->phy.link_info.phy_type) { 1035 case I40E_PHY_TYPE_10GBASE_SR: 1036 case I40E_PHY_TYPE_10GBASE_LR: 1037 case I40E_PHY_TYPE_1000BASE_SX: 1038 case I40E_PHY_TYPE_1000BASE_LX: 1039 case I40E_PHY_TYPE_40GBASE_SR4: 1040 case I40E_PHY_TYPE_40GBASE_LR4: 1041 case I40E_PHY_TYPE_25GBASE_LR: 1042 case I40E_PHY_TYPE_25GBASE_SR: 1043 media = I40E_MEDIA_TYPE_FIBER; 1044 break; 1045 case I40E_PHY_TYPE_100BASE_TX: 1046 case I40E_PHY_TYPE_1000BASE_T: 1047 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 1048 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 1049 case I40E_PHY_TYPE_10GBASE_T: 1050 media = I40E_MEDIA_TYPE_BASET; 1051 break; 1052 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1053 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1054 case I40E_PHY_TYPE_10GBASE_CR1: 1055 case I40E_PHY_TYPE_40GBASE_CR4: 1056 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1057 case I40E_PHY_TYPE_40GBASE_AOC: 1058 case I40E_PHY_TYPE_10GBASE_AOC: 1059 case I40E_PHY_TYPE_25GBASE_CR: 1060 case I40E_PHY_TYPE_25GBASE_AOC: 1061 case I40E_PHY_TYPE_25GBASE_ACC: 1062 media = I40E_MEDIA_TYPE_DA; 1063 break; 1064 case I40E_PHY_TYPE_1000BASE_KX: 1065 case I40E_PHY_TYPE_10GBASE_KX4: 1066 case I40E_PHY_TYPE_10GBASE_KR: 1067 case I40E_PHY_TYPE_40GBASE_KR4: 1068 case I40E_PHY_TYPE_20GBASE_KR2: 1069 case I40E_PHY_TYPE_25GBASE_KR: 1070 media = I40E_MEDIA_TYPE_BACKPLANE; 1071 break; 1072 case I40E_PHY_TYPE_SGMII: 1073 case I40E_PHY_TYPE_XAUI: 1074 case I40E_PHY_TYPE_XFI: 1075 case I40E_PHY_TYPE_XLAUI: 1076 case I40E_PHY_TYPE_XLPPI: 1077 default: 1078 media = I40E_MEDIA_TYPE_UNKNOWN; 1079 break; 1080 } 1081 1082 return media; 1083 } 1084 1085 /** 1086 * i40e_poll_globr - Poll for Global Reset completion 1087 * @hw: pointer to the hardware structure 1088 * @retry_limit: how many times to retry before failure 1089 **/ 1090 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1091 u32 retry_limit) 1092 { 1093 u32 cnt, reg = 0; 1094 1095 for (cnt = 0; cnt < retry_limit; cnt++) { 1096 reg = rd32(hw, I40E_GLGEN_RSTAT); 1097 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1098 return 0; 1099 msleep(100); 1100 } 1101 1102 hw_dbg(hw, "Global reset failed.\n"); 1103 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1104 1105 return I40E_ERR_RESET_FAILED; 1106 } 1107 1108 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1109 #define I40E_PF_RESET_WAIT_COUNT 200 1110 /** 1111 * i40e_pf_reset - Reset the PF 1112 * @hw: pointer to the hardware structure 1113 * 1114 * Assuming someone else has triggered a global reset, 1115 * assure the global reset is complete and then reset the PF 1116 **/ 1117 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1118 { 1119 u32 cnt = 0; 1120 u32 cnt1 = 0; 1121 u32 reg = 0; 1122 u32 grst_del; 1123 1124 /* Poll for Global Reset steady state in case of recent GRST. 1125 * The grst delay value is in 100ms units, and we'll wait a 1126 * couple counts longer to be sure we don't just miss the end. 1127 */ 1128 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1129 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1130 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1131 1132 /* It can take upto 15 secs for GRST steady state. 1133 * Bump it to 16 secs max to be safe. 1134 */ 1135 grst_del = grst_del * 20; 1136 1137 for (cnt = 0; cnt < grst_del; cnt++) { 1138 reg = rd32(hw, I40E_GLGEN_RSTAT); 1139 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1140 break; 1141 msleep(100); 1142 } 1143 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1144 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1145 return I40E_ERR_RESET_FAILED; 1146 } 1147 1148 /* Now Wait for the FW to be ready */ 1149 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1150 reg = rd32(hw, I40E_GLNVM_ULD); 1151 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1152 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1153 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1154 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1155 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1156 break; 1157 } 1158 usleep_range(10000, 20000); 1159 } 1160 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1161 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1162 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1163 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1164 return I40E_ERR_RESET_FAILED; 1165 } 1166 1167 /* If there was a Global Reset in progress when we got here, 1168 * we don't need to do the PF Reset 1169 */ 1170 if (!cnt) { 1171 u32 reg2 = 0; 1172 if (hw->revision_id == 0) 1173 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1174 else 1175 cnt = I40E_PF_RESET_WAIT_COUNT; 1176 reg = rd32(hw, I40E_PFGEN_CTRL); 1177 wr32(hw, I40E_PFGEN_CTRL, 1178 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1179 for (; cnt; cnt--) { 1180 reg = rd32(hw, I40E_PFGEN_CTRL); 1181 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1182 break; 1183 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1184 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1185 break; 1186 usleep_range(1000, 2000); 1187 } 1188 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1189 if (i40e_poll_globr(hw, grst_del)) 1190 return I40E_ERR_RESET_FAILED; 1191 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1192 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1193 return I40E_ERR_RESET_FAILED; 1194 } 1195 } 1196 1197 i40e_clear_pxe_mode(hw); 1198 1199 return 0; 1200 } 1201 1202 /** 1203 * i40e_clear_hw - clear out any left over hw state 1204 * @hw: pointer to the hw struct 1205 * 1206 * Clear queues and interrupts, typically called at init time, 1207 * but after the capabilities have been found so we know how many 1208 * queues and msix vectors have been allocated. 1209 **/ 1210 void i40e_clear_hw(struct i40e_hw *hw) 1211 { 1212 u32 num_queues, base_queue; 1213 u32 num_pf_int; 1214 u32 num_vf_int; 1215 u32 num_vfs; 1216 u32 i, j; 1217 u32 val; 1218 u32 eol = 0x7ff; 1219 1220 /* get number of interrupts, queues, and VFs */ 1221 val = rd32(hw, I40E_GLPCI_CNF2); 1222 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1223 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1224 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1225 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1226 1227 val = rd32(hw, I40E_PFLAN_QALLOC); 1228 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1229 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1230 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1231 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1232 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1233 num_queues = (j - base_queue) + 1; 1234 else 1235 num_queues = 0; 1236 1237 val = rd32(hw, I40E_PF_VT_PFALLOC); 1238 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1239 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1240 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1241 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1242 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1243 num_vfs = (j - i) + 1; 1244 else 1245 num_vfs = 0; 1246 1247 /* stop all the interrupts */ 1248 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1249 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1250 for (i = 0; i < num_pf_int - 2; i++) 1251 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1252 1253 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1254 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1255 wr32(hw, I40E_PFINT_LNKLST0, val); 1256 for (i = 0; i < num_pf_int - 2; i++) 1257 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1258 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1259 for (i = 0; i < num_vfs; i++) 1260 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1261 for (i = 0; i < num_vf_int - 2; i++) 1262 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1263 1264 /* warn the HW of the coming Tx disables */ 1265 for (i = 0; i < num_queues; i++) { 1266 u32 abs_queue_idx = base_queue + i; 1267 u32 reg_block = 0; 1268 1269 if (abs_queue_idx >= 128) { 1270 reg_block = abs_queue_idx / 128; 1271 abs_queue_idx %= 128; 1272 } 1273 1274 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1275 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1276 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1277 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1278 1279 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1280 } 1281 udelay(400); 1282 1283 /* stop all the queues */ 1284 for (i = 0; i < num_queues; i++) { 1285 wr32(hw, I40E_QINT_TQCTL(i), 0); 1286 wr32(hw, I40E_QTX_ENA(i), 0); 1287 wr32(hw, I40E_QINT_RQCTL(i), 0); 1288 wr32(hw, I40E_QRX_ENA(i), 0); 1289 } 1290 1291 /* short wait for all queue disables to settle */ 1292 udelay(50); 1293 } 1294 1295 /** 1296 * i40e_clear_pxe_mode - clear pxe operations mode 1297 * @hw: pointer to the hw struct 1298 * 1299 * Make sure all PXE mode settings are cleared, including things 1300 * like descriptor fetch/write-back mode. 1301 **/ 1302 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1303 { 1304 u32 reg; 1305 1306 if (i40e_check_asq_alive(hw)) 1307 i40e_aq_clear_pxe_mode(hw, NULL); 1308 1309 /* Clear single descriptor fetch/write-back mode */ 1310 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1311 1312 if (hw->revision_id == 0) { 1313 /* As a work around clear PXE_MODE instead of setting it */ 1314 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1315 } else { 1316 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1317 } 1318 } 1319 1320 /** 1321 * i40e_led_is_mine - helper to find matching led 1322 * @hw: pointer to the hw struct 1323 * @idx: index into GPIO registers 1324 * 1325 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1326 */ 1327 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1328 { 1329 u32 gpio_val = 0; 1330 u32 port; 1331 1332 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1333 !hw->func_caps.led[idx]) 1334 return 0; 1335 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1336 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1337 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1338 1339 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1340 * if it is not our port then ignore 1341 */ 1342 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1343 (port != hw->port)) 1344 return 0; 1345 1346 return gpio_val; 1347 } 1348 1349 #define I40E_FW_LED BIT(4) 1350 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1351 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1352 1353 #define I40E_LED0 22 1354 1355 #define I40E_PIN_FUNC_SDP 0x0 1356 #define I40E_PIN_FUNC_LED 0x1 1357 1358 /** 1359 * i40e_led_get - return current on/off mode 1360 * @hw: pointer to the hw struct 1361 * 1362 * The value returned is the 'mode' field as defined in the 1363 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1364 * values are variations of possible behaviors relating to 1365 * blink, link, and wire. 1366 **/ 1367 u32 i40e_led_get(struct i40e_hw *hw) 1368 { 1369 u32 mode = 0; 1370 int i; 1371 1372 /* as per the documentation GPIO 22-29 are the LED 1373 * GPIO pins named LED0..LED7 1374 */ 1375 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1376 u32 gpio_val = i40e_led_is_mine(hw, i); 1377 1378 if (!gpio_val) 1379 continue; 1380 1381 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1382 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1383 break; 1384 } 1385 1386 return mode; 1387 } 1388 1389 /** 1390 * i40e_led_set - set new on/off mode 1391 * @hw: pointer to the hw struct 1392 * @mode: 0=off, 0xf=on (else see manual for mode details) 1393 * @blink: true if the LED should blink when on, false if steady 1394 * 1395 * if this function is used to turn on the blink it should 1396 * be used to disable the blink when restoring the original state. 1397 **/ 1398 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1399 { 1400 int i; 1401 1402 if (mode & ~I40E_LED_MODE_VALID) { 1403 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1404 return; 1405 } 1406 1407 /* as per the documentation GPIO 22-29 are the LED 1408 * GPIO pins named LED0..LED7 1409 */ 1410 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1411 u32 gpio_val = i40e_led_is_mine(hw, i); 1412 1413 if (!gpio_val) 1414 continue; 1415 1416 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1417 u32 pin_func = 0; 1418 1419 if (mode & I40E_FW_LED) 1420 pin_func = I40E_PIN_FUNC_SDP; 1421 else 1422 pin_func = I40E_PIN_FUNC_LED; 1423 1424 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1425 gpio_val |= ((pin_func << 1426 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1427 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1428 } 1429 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1430 /* this & is a bit of paranoia, but serves as a range check */ 1431 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1432 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1433 1434 if (blink) 1435 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1436 else 1437 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1438 1439 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1440 break; 1441 } 1442 } 1443 1444 /* Admin command wrappers */ 1445 1446 /** 1447 * i40e_aq_get_phy_capabilities 1448 * @hw: pointer to the hw struct 1449 * @abilities: structure for PHY capabilities to be filled 1450 * @qualified_modules: report Qualified Modules 1451 * @report_init: report init capabilities (active are default) 1452 * @cmd_details: pointer to command details structure or NULL 1453 * 1454 * Returns the various PHY abilities supported on the Port. 1455 **/ 1456 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1457 bool qualified_modules, bool report_init, 1458 struct i40e_aq_get_phy_abilities_resp *abilities, 1459 struct i40e_asq_cmd_details *cmd_details) 1460 { 1461 struct i40e_aq_desc desc; 1462 i40e_status status; 1463 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1464 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1465 1466 if (!abilities) 1467 return I40E_ERR_PARAM; 1468 1469 do { 1470 i40e_fill_default_direct_cmd_desc(&desc, 1471 i40e_aqc_opc_get_phy_abilities); 1472 1473 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1474 if (abilities_size > I40E_AQ_LARGE_BUF) 1475 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1476 1477 if (qualified_modules) 1478 desc.params.external.param0 |= 1479 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1480 1481 if (report_init) 1482 desc.params.external.param0 |= 1483 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1484 1485 status = i40e_asq_send_command(hw, &desc, abilities, 1486 abilities_size, cmd_details); 1487 1488 switch (hw->aq.asq_last_status) { 1489 case I40E_AQ_RC_EIO: 1490 status = I40E_ERR_UNKNOWN_PHY; 1491 break; 1492 case I40E_AQ_RC_EAGAIN: 1493 usleep_range(1000, 2000); 1494 total_delay++; 1495 status = I40E_ERR_TIMEOUT; 1496 break; 1497 /* also covers I40E_AQ_RC_OK */ 1498 default: 1499 break; 1500 } 1501 1502 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1503 (total_delay < max_delay)); 1504 1505 if (status) 1506 return status; 1507 1508 if (report_init) { 1509 if (hw->mac.type == I40E_MAC_XL710 && 1510 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1511 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1512 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1513 } else { 1514 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1515 hw->phy.phy_types |= 1516 ((u64)abilities->phy_type_ext << 32); 1517 } 1518 } 1519 1520 return status; 1521 } 1522 1523 /** 1524 * i40e_aq_set_phy_config 1525 * @hw: pointer to the hw struct 1526 * @config: structure with PHY configuration to be set 1527 * @cmd_details: pointer to command details structure or NULL 1528 * 1529 * Set the various PHY configuration parameters 1530 * supported on the Port.One or more of the Set PHY config parameters may be 1531 * ignored in an MFP mode as the PF may not have the privilege to set some 1532 * of the PHY Config parameters. This status will be indicated by the 1533 * command response. 1534 **/ 1535 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1536 struct i40e_aq_set_phy_config *config, 1537 struct i40e_asq_cmd_details *cmd_details) 1538 { 1539 struct i40e_aq_desc desc; 1540 struct i40e_aq_set_phy_config *cmd = 1541 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1542 enum i40e_status_code status; 1543 1544 if (!config) 1545 return I40E_ERR_PARAM; 1546 1547 i40e_fill_default_direct_cmd_desc(&desc, 1548 i40e_aqc_opc_set_phy_config); 1549 1550 *cmd = *config; 1551 1552 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1553 1554 return status; 1555 } 1556 1557 static noinline_for_stack enum i40e_status_code 1558 i40e_set_fc_status(struct i40e_hw *hw, 1559 struct i40e_aq_get_phy_abilities_resp *abilities, 1560 bool atomic_restart) 1561 { 1562 struct i40e_aq_set_phy_config config; 1563 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1564 u8 pause_mask = 0x0; 1565 1566 switch (fc_mode) { 1567 case I40E_FC_FULL: 1568 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1569 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1570 break; 1571 case I40E_FC_RX_PAUSE: 1572 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1573 break; 1574 case I40E_FC_TX_PAUSE: 1575 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1576 break; 1577 default: 1578 break; 1579 } 1580 1581 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1582 /* clear the old pause settings */ 1583 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1584 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1585 /* set the new abilities */ 1586 config.abilities |= pause_mask; 1587 /* If the abilities have changed, then set the new config */ 1588 if (config.abilities == abilities->abilities) 1589 return 0; 1590 1591 /* Auto restart link so settings take effect */ 1592 if (atomic_restart) 1593 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1594 /* Copy over all the old settings */ 1595 config.phy_type = abilities->phy_type; 1596 config.phy_type_ext = abilities->phy_type_ext; 1597 config.link_speed = abilities->link_speed; 1598 config.eee_capability = abilities->eee_capability; 1599 config.eeer = abilities->eeer_val; 1600 config.low_power_ctrl = abilities->d3_lpan; 1601 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1602 I40E_AQ_PHY_FEC_CONFIG_MASK; 1603 1604 return i40e_aq_set_phy_config(hw, &config, NULL); 1605 } 1606 1607 /** 1608 * i40e_set_fc 1609 * @hw: pointer to the hw struct 1610 * @aq_failures: buffer to return AdminQ failure information 1611 * @atomic_restart: whether to enable atomic link restart 1612 * 1613 * Set the requested flow control mode using set_phy_config. 1614 **/ 1615 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1616 bool atomic_restart) 1617 { 1618 struct i40e_aq_get_phy_abilities_resp abilities; 1619 enum i40e_status_code status; 1620 1621 *aq_failures = 0x0; 1622 1623 /* Get the current phy config */ 1624 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1625 NULL); 1626 if (status) { 1627 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1628 return status; 1629 } 1630 1631 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1632 if (status) 1633 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1634 1635 /* Update the link info */ 1636 status = i40e_update_link_info(hw); 1637 if (status) { 1638 /* Wait a little bit (on 40G cards it sometimes takes a really 1639 * long time for link to come back from the atomic reset) 1640 * and try once more 1641 */ 1642 msleep(1000); 1643 status = i40e_update_link_info(hw); 1644 } 1645 if (status) 1646 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1647 1648 return status; 1649 } 1650 1651 /** 1652 * i40e_aq_clear_pxe_mode 1653 * @hw: pointer to the hw struct 1654 * @cmd_details: pointer to command details structure or NULL 1655 * 1656 * Tell the firmware that the driver is taking over from PXE 1657 **/ 1658 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1659 struct i40e_asq_cmd_details *cmd_details) 1660 { 1661 i40e_status status; 1662 struct i40e_aq_desc desc; 1663 struct i40e_aqc_clear_pxe *cmd = 1664 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1665 1666 i40e_fill_default_direct_cmd_desc(&desc, 1667 i40e_aqc_opc_clear_pxe_mode); 1668 1669 cmd->rx_cnt = 0x2; 1670 1671 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1672 1673 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1674 1675 return status; 1676 } 1677 1678 /** 1679 * i40e_aq_set_link_restart_an 1680 * @hw: pointer to the hw struct 1681 * @enable_link: if true: enable link, if false: disable link 1682 * @cmd_details: pointer to command details structure or NULL 1683 * 1684 * Sets up the link and restarts the Auto-Negotiation over the link. 1685 **/ 1686 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1687 bool enable_link, 1688 struct i40e_asq_cmd_details *cmd_details) 1689 { 1690 struct i40e_aq_desc desc; 1691 struct i40e_aqc_set_link_restart_an *cmd = 1692 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1693 i40e_status status; 1694 1695 i40e_fill_default_direct_cmd_desc(&desc, 1696 i40e_aqc_opc_set_link_restart_an); 1697 1698 cmd->command = I40E_AQ_PHY_RESTART_AN; 1699 if (enable_link) 1700 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1701 else 1702 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1703 1704 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1705 1706 return status; 1707 } 1708 1709 /** 1710 * i40e_aq_get_link_info 1711 * @hw: pointer to the hw struct 1712 * @enable_lse: enable/disable LinkStatusEvent reporting 1713 * @link: pointer to link status structure - optional 1714 * @cmd_details: pointer to command details structure or NULL 1715 * 1716 * Returns the link status of the adapter. 1717 **/ 1718 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1719 bool enable_lse, struct i40e_link_status *link, 1720 struct i40e_asq_cmd_details *cmd_details) 1721 { 1722 struct i40e_aq_desc desc; 1723 struct i40e_aqc_get_link_status *resp = 1724 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1725 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1726 i40e_status status; 1727 bool tx_pause, rx_pause; 1728 u16 command_flags; 1729 1730 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1731 1732 if (enable_lse) 1733 command_flags = I40E_AQ_LSE_ENABLE; 1734 else 1735 command_flags = I40E_AQ_LSE_DISABLE; 1736 resp->command_flags = cpu_to_le16(command_flags); 1737 1738 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1739 1740 if (status) 1741 goto aq_get_link_info_exit; 1742 1743 /* save off old link status information */ 1744 hw->phy.link_info_old = *hw_link_info; 1745 1746 /* update link status */ 1747 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1748 hw->phy.media_type = i40e_get_media_type(hw); 1749 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1750 hw_link_info->link_info = resp->link_info; 1751 hw_link_info->an_info = resp->an_info; 1752 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1753 I40E_AQ_CONFIG_FEC_RS_ENA); 1754 hw_link_info->ext_info = resp->ext_info; 1755 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1756 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1757 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1758 1759 /* update fc info */ 1760 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1761 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1762 if (tx_pause & rx_pause) 1763 hw->fc.current_mode = I40E_FC_FULL; 1764 else if (tx_pause) 1765 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1766 else if (rx_pause) 1767 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1768 else 1769 hw->fc.current_mode = I40E_FC_NONE; 1770 1771 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1772 hw_link_info->crc_enable = true; 1773 else 1774 hw_link_info->crc_enable = false; 1775 1776 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1777 hw_link_info->lse_enable = true; 1778 else 1779 hw_link_info->lse_enable = false; 1780 1781 if ((hw->mac.type == I40E_MAC_XL710) && 1782 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1783 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1784 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1785 1786 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1787 hw->mac.type != I40E_MAC_X722) { 1788 __le32 tmp; 1789 1790 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1791 hw->phy.phy_types = le32_to_cpu(tmp); 1792 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1793 } 1794 1795 /* save link status information */ 1796 if (link) 1797 *link = *hw_link_info; 1798 1799 /* flag cleared so helper functions don't call AQ again */ 1800 hw->phy.get_link_info = false; 1801 1802 aq_get_link_info_exit: 1803 return status; 1804 } 1805 1806 /** 1807 * i40e_aq_set_phy_int_mask 1808 * @hw: pointer to the hw struct 1809 * @mask: interrupt mask to be set 1810 * @cmd_details: pointer to command details structure or NULL 1811 * 1812 * Set link interrupt mask. 1813 **/ 1814 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1815 u16 mask, 1816 struct i40e_asq_cmd_details *cmd_details) 1817 { 1818 struct i40e_aq_desc desc; 1819 struct i40e_aqc_set_phy_int_mask *cmd = 1820 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1821 i40e_status status; 1822 1823 i40e_fill_default_direct_cmd_desc(&desc, 1824 i40e_aqc_opc_set_phy_int_mask); 1825 1826 cmd->event_mask = cpu_to_le16(mask); 1827 1828 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1829 1830 return status; 1831 } 1832 1833 /** 1834 * i40e_aq_set_mac_loopback 1835 * @hw: pointer to the HW struct 1836 * @ena_lpbk: Enable or Disable loopback 1837 * @cmd_details: pointer to command details structure or NULL 1838 * 1839 * Enable/disable loopback on a given port 1840 */ 1841 i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, 1842 struct i40e_asq_cmd_details *cmd_details) 1843 { 1844 struct i40e_aq_desc desc; 1845 struct i40e_aqc_set_lb_mode *cmd = 1846 (struct i40e_aqc_set_lb_mode *)&desc.params.raw; 1847 1848 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); 1849 if (ena_lpbk) { 1850 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) 1851 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); 1852 else 1853 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); 1854 } 1855 1856 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1857 } 1858 1859 /** 1860 * i40e_aq_set_phy_debug 1861 * @hw: pointer to the hw struct 1862 * @cmd_flags: debug command flags 1863 * @cmd_details: pointer to command details structure or NULL 1864 * 1865 * Reset the external PHY. 1866 **/ 1867 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1868 struct i40e_asq_cmd_details *cmd_details) 1869 { 1870 struct i40e_aq_desc desc; 1871 struct i40e_aqc_set_phy_debug *cmd = 1872 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1873 i40e_status status; 1874 1875 i40e_fill_default_direct_cmd_desc(&desc, 1876 i40e_aqc_opc_set_phy_debug); 1877 1878 cmd->command_flags = cmd_flags; 1879 1880 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1881 1882 return status; 1883 } 1884 1885 /** 1886 * i40e_is_aq_api_ver_ge 1887 * @aq: pointer to AdminQ info containing HW API version to compare 1888 * @maj: API major value 1889 * @min: API minor value 1890 * 1891 * Assert whether current HW API version is greater/equal than provided. 1892 **/ 1893 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, 1894 u16 min) 1895 { 1896 return (aq->api_maj_ver > maj || 1897 (aq->api_maj_ver == maj && aq->api_min_ver >= min)); 1898 } 1899 1900 /** 1901 * i40e_aq_add_vsi 1902 * @hw: pointer to the hw struct 1903 * @vsi_ctx: pointer to a vsi context struct 1904 * @cmd_details: pointer to command details structure or NULL 1905 * 1906 * Add a VSI context to the hardware. 1907 **/ 1908 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1909 struct i40e_vsi_context *vsi_ctx, 1910 struct i40e_asq_cmd_details *cmd_details) 1911 { 1912 struct i40e_aq_desc desc; 1913 struct i40e_aqc_add_get_update_vsi *cmd = 1914 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1915 struct i40e_aqc_add_get_update_vsi_completion *resp = 1916 (struct i40e_aqc_add_get_update_vsi_completion *) 1917 &desc.params.raw; 1918 i40e_status status; 1919 1920 i40e_fill_default_direct_cmd_desc(&desc, 1921 i40e_aqc_opc_add_vsi); 1922 1923 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1924 cmd->connection_type = vsi_ctx->connection_type; 1925 cmd->vf_id = vsi_ctx->vf_num; 1926 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1927 1928 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1929 1930 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1931 sizeof(vsi_ctx->info), 1932 cmd_details, true); 1933 1934 if (status) 1935 goto aq_add_vsi_exit; 1936 1937 vsi_ctx->seid = le16_to_cpu(resp->seid); 1938 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1939 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1940 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1941 1942 aq_add_vsi_exit: 1943 return status; 1944 } 1945 1946 /** 1947 * i40e_aq_set_default_vsi 1948 * @hw: pointer to the hw struct 1949 * @seid: vsi number 1950 * @cmd_details: pointer to command details structure or NULL 1951 **/ 1952 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 1953 u16 seid, 1954 struct i40e_asq_cmd_details *cmd_details) 1955 { 1956 struct i40e_aq_desc desc; 1957 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1958 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1959 &desc.params.raw; 1960 i40e_status status; 1961 1962 i40e_fill_default_direct_cmd_desc(&desc, 1963 i40e_aqc_opc_set_vsi_promiscuous_modes); 1964 1965 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1966 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1967 cmd->seid = cpu_to_le16(seid); 1968 1969 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1970 1971 return status; 1972 } 1973 1974 /** 1975 * i40e_aq_clear_default_vsi 1976 * @hw: pointer to the hw struct 1977 * @seid: vsi number 1978 * @cmd_details: pointer to command details structure or NULL 1979 **/ 1980 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1981 u16 seid, 1982 struct i40e_asq_cmd_details *cmd_details) 1983 { 1984 struct i40e_aq_desc desc; 1985 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1986 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1987 &desc.params.raw; 1988 i40e_status status; 1989 1990 i40e_fill_default_direct_cmd_desc(&desc, 1991 i40e_aqc_opc_set_vsi_promiscuous_modes); 1992 1993 cmd->promiscuous_flags = cpu_to_le16(0); 1994 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1995 cmd->seid = cpu_to_le16(seid); 1996 1997 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1998 1999 return status; 2000 } 2001 2002 /** 2003 * i40e_aq_set_vsi_unicast_promiscuous 2004 * @hw: pointer to the hw struct 2005 * @seid: vsi number 2006 * @set: set unicast promiscuous enable/disable 2007 * @cmd_details: pointer to command details structure or NULL 2008 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2009 **/ 2010 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2011 u16 seid, bool set, 2012 struct i40e_asq_cmd_details *cmd_details, 2013 bool rx_only_promisc) 2014 { 2015 struct i40e_aq_desc desc; 2016 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2017 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2018 i40e_status status; 2019 u16 flags = 0; 2020 2021 i40e_fill_default_direct_cmd_desc(&desc, 2022 i40e_aqc_opc_set_vsi_promiscuous_modes); 2023 2024 if (set) { 2025 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2026 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2027 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2028 } 2029 2030 cmd->promiscuous_flags = cpu_to_le16(flags); 2031 2032 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2033 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2034 cmd->valid_flags |= 2035 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2036 2037 cmd->seid = cpu_to_le16(seid); 2038 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2039 2040 return status; 2041 } 2042 2043 /** 2044 * i40e_aq_set_vsi_multicast_promiscuous 2045 * @hw: pointer to the hw struct 2046 * @seid: vsi number 2047 * @set: set multicast promiscuous enable/disable 2048 * @cmd_details: pointer to command details structure or NULL 2049 **/ 2050 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2051 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2052 { 2053 struct i40e_aq_desc desc; 2054 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2055 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2056 i40e_status status; 2057 u16 flags = 0; 2058 2059 i40e_fill_default_direct_cmd_desc(&desc, 2060 i40e_aqc_opc_set_vsi_promiscuous_modes); 2061 2062 if (set) 2063 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2064 2065 cmd->promiscuous_flags = cpu_to_le16(flags); 2066 2067 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2068 2069 cmd->seid = cpu_to_le16(seid); 2070 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2071 2072 return status; 2073 } 2074 2075 /** 2076 * i40e_aq_set_vsi_mc_promisc_on_vlan 2077 * @hw: pointer to the hw struct 2078 * @seid: vsi number 2079 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2080 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2081 * @cmd_details: pointer to command details structure or NULL 2082 **/ 2083 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2084 u16 seid, bool enable, 2085 u16 vid, 2086 struct i40e_asq_cmd_details *cmd_details) 2087 { 2088 struct i40e_aq_desc desc; 2089 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2090 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2091 enum i40e_status_code status; 2092 u16 flags = 0; 2093 2094 i40e_fill_default_direct_cmd_desc(&desc, 2095 i40e_aqc_opc_set_vsi_promiscuous_modes); 2096 2097 if (enable) 2098 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2099 2100 cmd->promiscuous_flags = cpu_to_le16(flags); 2101 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2102 cmd->seid = cpu_to_le16(seid); 2103 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2104 2105 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2106 cmd_details, true); 2107 2108 return status; 2109 } 2110 2111 /** 2112 * i40e_aq_set_vsi_uc_promisc_on_vlan 2113 * @hw: pointer to the hw struct 2114 * @seid: vsi number 2115 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2116 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2117 * @cmd_details: pointer to command details structure or NULL 2118 **/ 2119 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2120 u16 seid, bool enable, 2121 u16 vid, 2122 struct i40e_asq_cmd_details *cmd_details) 2123 { 2124 struct i40e_aq_desc desc; 2125 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2126 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2127 enum i40e_status_code status; 2128 u16 flags = 0; 2129 2130 i40e_fill_default_direct_cmd_desc(&desc, 2131 i40e_aqc_opc_set_vsi_promiscuous_modes); 2132 2133 if (enable) { 2134 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2135 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2136 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2137 } 2138 2139 cmd->promiscuous_flags = cpu_to_le16(flags); 2140 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2141 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2142 cmd->valid_flags |= 2143 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2144 cmd->seid = cpu_to_le16(seid); 2145 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2146 2147 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2148 cmd_details, true); 2149 2150 return status; 2151 } 2152 2153 /** 2154 * i40e_aq_set_vsi_bc_promisc_on_vlan 2155 * @hw: pointer to the hw struct 2156 * @seid: vsi number 2157 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2158 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2159 * @cmd_details: pointer to command details structure or NULL 2160 **/ 2161 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2162 u16 seid, bool enable, u16 vid, 2163 struct i40e_asq_cmd_details *cmd_details) 2164 { 2165 struct i40e_aq_desc desc; 2166 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2167 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2168 i40e_status status; 2169 u16 flags = 0; 2170 2171 i40e_fill_default_direct_cmd_desc(&desc, 2172 i40e_aqc_opc_set_vsi_promiscuous_modes); 2173 2174 if (enable) 2175 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2176 2177 cmd->promiscuous_flags = cpu_to_le16(flags); 2178 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2179 cmd->seid = cpu_to_le16(seid); 2180 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2181 2182 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2183 2184 return status; 2185 } 2186 2187 /** 2188 * i40e_aq_set_vsi_broadcast 2189 * @hw: pointer to the hw struct 2190 * @seid: vsi number 2191 * @set_filter: true to set filter, false to clear filter 2192 * @cmd_details: pointer to command details structure or NULL 2193 * 2194 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2195 **/ 2196 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2197 u16 seid, bool set_filter, 2198 struct i40e_asq_cmd_details *cmd_details) 2199 { 2200 struct i40e_aq_desc desc; 2201 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2202 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2203 i40e_status status; 2204 2205 i40e_fill_default_direct_cmd_desc(&desc, 2206 i40e_aqc_opc_set_vsi_promiscuous_modes); 2207 2208 if (set_filter) 2209 cmd->promiscuous_flags 2210 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2211 else 2212 cmd->promiscuous_flags 2213 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2214 2215 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2216 cmd->seid = cpu_to_le16(seid); 2217 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2218 2219 return status; 2220 } 2221 2222 /** 2223 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2224 * @hw: pointer to the hw struct 2225 * @seid: vsi number 2226 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2227 * @cmd_details: pointer to command details structure or NULL 2228 **/ 2229 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2230 u16 seid, bool enable, 2231 struct i40e_asq_cmd_details *cmd_details) 2232 { 2233 struct i40e_aq_desc desc; 2234 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2235 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2236 i40e_status status; 2237 u16 flags = 0; 2238 2239 i40e_fill_default_direct_cmd_desc(&desc, 2240 i40e_aqc_opc_set_vsi_promiscuous_modes); 2241 if (enable) 2242 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2243 2244 cmd->promiscuous_flags = cpu_to_le16(flags); 2245 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2246 cmd->seid = cpu_to_le16(seid); 2247 2248 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2249 2250 return status; 2251 } 2252 2253 /** 2254 * i40e_aq_get_vsi_params - get VSI configuration info 2255 * @hw: pointer to the hw struct 2256 * @vsi_ctx: pointer to a vsi context struct 2257 * @cmd_details: pointer to command details structure or NULL 2258 **/ 2259 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2260 struct i40e_vsi_context *vsi_ctx, 2261 struct i40e_asq_cmd_details *cmd_details) 2262 { 2263 struct i40e_aq_desc desc; 2264 struct i40e_aqc_add_get_update_vsi *cmd = 2265 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2266 struct i40e_aqc_add_get_update_vsi_completion *resp = 2267 (struct i40e_aqc_add_get_update_vsi_completion *) 2268 &desc.params.raw; 2269 i40e_status status; 2270 2271 i40e_fill_default_direct_cmd_desc(&desc, 2272 i40e_aqc_opc_get_vsi_parameters); 2273 2274 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2275 2276 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2277 2278 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2279 sizeof(vsi_ctx->info), NULL); 2280 2281 if (status) 2282 goto aq_get_vsi_params_exit; 2283 2284 vsi_ctx->seid = le16_to_cpu(resp->seid); 2285 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2286 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2287 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2288 2289 aq_get_vsi_params_exit: 2290 return status; 2291 } 2292 2293 /** 2294 * i40e_aq_update_vsi_params 2295 * @hw: pointer to the hw struct 2296 * @vsi_ctx: pointer to a vsi context struct 2297 * @cmd_details: pointer to command details structure or NULL 2298 * 2299 * Update a VSI context. 2300 **/ 2301 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2302 struct i40e_vsi_context *vsi_ctx, 2303 struct i40e_asq_cmd_details *cmd_details) 2304 { 2305 struct i40e_aq_desc desc; 2306 struct i40e_aqc_add_get_update_vsi *cmd = 2307 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2308 struct i40e_aqc_add_get_update_vsi_completion *resp = 2309 (struct i40e_aqc_add_get_update_vsi_completion *) 2310 &desc.params.raw; 2311 i40e_status status; 2312 2313 i40e_fill_default_direct_cmd_desc(&desc, 2314 i40e_aqc_opc_update_vsi_parameters); 2315 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2316 2317 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2318 2319 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2320 sizeof(vsi_ctx->info), 2321 cmd_details, true); 2322 2323 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2324 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2325 2326 return status; 2327 } 2328 2329 /** 2330 * i40e_aq_get_switch_config 2331 * @hw: pointer to the hardware structure 2332 * @buf: pointer to the result buffer 2333 * @buf_size: length of input buffer 2334 * @start_seid: seid to start for the report, 0 == beginning 2335 * @cmd_details: pointer to command details structure or NULL 2336 * 2337 * Fill the buf with switch configuration returned from AdminQ command 2338 **/ 2339 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2340 struct i40e_aqc_get_switch_config_resp *buf, 2341 u16 buf_size, u16 *start_seid, 2342 struct i40e_asq_cmd_details *cmd_details) 2343 { 2344 struct i40e_aq_desc desc; 2345 struct i40e_aqc_switch_seid *scfg = 2346 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2347 i40e_status status; 2348 2349 i40e_fill_default_direct_cmd_desc(&desc, 2350 i40e_aqc_opc_get_switch_config); 2351 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2352 if (buf_size > I40E_AQ_LARGE_BUF) 2353 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2354 scfg->seid = cpu_to_le16(*start_seid); 2355 2356 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2357 *start_seid = le16_to_cpu(scfg->seid); 2358 2359 return status; 2360 } 2361 2362 /** 2363 * i40e_aq_set_switch_config 2364 * @hw: pointer to the hardware structure 2365 * @flags: bit flag values to set 2366 * @mode: cloud filter mode 2367 * @valid_flags: which bit flags to set 2368 * @mode: cloud filter mode 2369 * @cmd_details: pointer to command details structure or NULL 2370 * 2371 * Set switch configuration bits 2372 **/ 2373 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2374 u16 flags, 2375 u16 valid_flags, u8 mode, 2376 struct i40e_asq_cmd_details *cmd_details) 2377 { 2378 struct i40e_aq_desc desc; 2379 struct i40e_aqc_set_switch_config *scfg = 2380 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2381 enum i40e_status_code status; 2382 2383 i40e_fill_default_direct_cmd_desc(&desc, 2384 i40e_aqc_opc_set_switch_config); 2385 scfg->flags = cpu_to_le16(flags); 2386 scfg->valid_flags = cpu_to_le16(valid_flags); 2387 scfg->mode = mode; 2388 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2389 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2390 scfg->first_tag = cpu_to_le16(hw->first_tag); 2391 scfg->second_tag = cpu_to_le16(hw->second_tag); 2392 } 2393 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2394 2395 return status; 2396 } 2397 2398 /** 2399 * i40e_aq_get_firmware_version 2400 * @hw: pointer to the hw struct 2401 * @fw_major_version: firmware major version 2402 * @fw_minor_version: firmware minor version 2403 * @fw_build: firmware build number 2404 * @api_major_version: major queue version 2405 * @api_minor_version: minor queue version 2406 * @cmd_details: pointer to command details structure or NULL 2407 * 2408 * Get the firmware version from the admin queue commands 2409 **/ 2410 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2411 u16 *fw_major_version, u16 *fw_minor_version, 2412 u32 *fw_build, 2413 u16 *api_major_version, u16 *api_minor_version, 2414 struct i40e_asq_cmd_details *cmd_details) 2415 { 2416 struct i40e_aq_desc desc; 2417 struct i40e_aqc_get_version *resp = 2418 (struct i40e_aqc_get_version *)&desc.params.raw; 2419 i40e_status status; 2420 2421 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2422 2423 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2424 2425 if (!status) { 2426 if (fw_major_version) 2427 *fw_major_version = le16_to_cpu(resp->fw_major); 2428 if (fw_minor_version) 2429 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2430 if (fw_build) 2431 *fw_build = le32_to_cpu(resp->fw_build); 2432 if (api_major_version) 2433 *api_major_version = le16_to_cpu(resp->api_major); 2434 if (api_minor_version) 2435 *api_minor_version = le16_to_cpu(resp->api_minor); 2436 } 2437 2438 return status; 2439 } 2440 2441 /** 2442 * i40e_aq_send_driver_version 2443 * @hw: pointer to the hw struct 2444 * @dv: driver's major, minor version 2445 * @cmd_details: pointer to command details structure or NULL 2446 * 2447 * Send the driver version to the firmware 2448 **/ 2449 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2450 struct i40e_driver_version *dv, 2451 struct i40e_asq_cmd_details *cmd_details) 2452 { 2453 struct i40e_aq_desc desc; 2454 struct i40e_aqc_driver_version *cmd = 2455 (struct i40e_aqc_driver_version *)&desc.params.raw; 2456 i40e_status status; 2457 u16 len; 2458 2459 if (dv == NULL) 2460 return I40E_ERR_PARAM; 2461 2462 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2463 2464 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2465 cmd->driver_major_ver = dv->major_version; 2466 cmd->driver_minor_ver = dv->minor_version; 2467 cmd->driver_build_ver = dv->build_version; 2468 cmd->driver_subbuild_ver = dv->subbuild_version; 2469 2470 len = 0; 2471 while (len < sizeof(dv->driver_string) && 2472 (dv->driver_string[len] < 0x80) && 2473 dv->driver_string[len]) 2474 len++; 2475 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2476 len, cmd_details); 2477 2478 return status; 2479 } 2480 2481 /** 2482 * i40e_get_link_status - get status of the HW network link 2483 * @hw: pointer to the hw struct 2484 * @link_up: pointer to bool (true/false = linkup/linkdown) 2485 * 2486 * Variable link_up true if link is up, false if link is down. 2487 * The variable link_up is invalid if returned value of status != 0 2488 * 2489 * Side effect: LinkStatusEvent reporting becomes enabled 2490 **/ 2491 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2492 { 2493 i40e_status status = 0; 2494 2495 if (hw->phy.get_link_info) { 2496 status = i40e_update_link_info(hw); 2497 2498 if (status) 2499 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2500 status); 2501 } 2502 2503 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2504 2505 return status; 2506 } 2507 2508 /** 2509 * i40e_update_link_info - update status of the HW network link 2510 * @hw: pointer to the hw struct 2511 **/ 2512 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2513 { 2514 struct i40e_aq_get_phy_abilities_resp abilities; 2515 i40e_status status = 0; 2516 2517 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2518 if (status) 2519 return status; 2520 2521 /* extra checking needed to ensure link info to user is timely */ 2522 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2523 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2524 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2525 status = i40e_aq_get_phy_capabilities(hw, false, false, 2526 &abilities, NULL); 2527 if (status) 2528 return status; 2529 2530 if (abilities.fec_cfg_curr_mod_ext_info & 2531 I40E_AQ_ENABLE_FEC_AUTO) 2532 hw->phy.link_info.req_fec_info = 2533 (I40E_AQ_REQUEST_FEC_KR | 2534 I40E_AQ_REQUEST_FEC_RS); 2535 else 2536 hw->phy.link_info.req_fec_info = 2537 abilities.fec_cfg_curr_mod_ext_info & 2538 (I40E_AQ_REQUEST_FEC_KR | 2539 I40E_AQ_REQUEST_FEC_RS); 2540 2541 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2542 sizeof(hw->phy.link_info.module_type)); 2543 } 2544 2545 return status; 2546 } 2547 2548 /** 2549 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2550 * @hw: pointer to the hw struct 2551 * @uplink_seid: the MAC or other gizmo SEID 2552 * @downlink_seid: the VSI SEID 2553 * @enabled_tc: bitmap of TCs to be enabled 2554 * @default_port: true for default port VSI, false for control port 2555 * @veb_seid: pointer to where to put the resulting VEB SEID 2556 * @enable_stats: true to turn on VEB stats 2557 * @cmd_details: pointer to command details structure or NULL 2558 * 2559 * This asks the FW to add a VEB between the uplink and downlink 2560 * elements. If the uplink SEID is 0, this will be a floating VEB. 2561 **/ 2562 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2563 u16 downlink_seid, u8 enabled_tc, 2564 bool default_port, u16 *veb_seid, 2565 bool enable_stats, 2566 struct i40e_asq_cmd_details *cmd_details) 2567 { 2568 struct i40e_aq_desc desc; 2569 struct i40e_aqc_add_veb *cmd = 2570 (struct i40e_aqc_add_veb *)&desc.params.raw; 2571 struct i40e_aqc_add_veb_completion *resp = 2572 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2573 i40e_status status; 2574 u16 veb_flags = 0; 2575 2576 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2577 if (!!uplink_seid != !!downlink_seid) 2578 return I40E_ERR_PARAM; 2579 2580 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2581 2582 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2583 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2584 cmd->enable_tcs = enabled_tc; 2585 if (!uplink_seid) 2586 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2587 if (default_port) 2588 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2589 else 2590 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2591 2592 /* reverse logic here: set the bitflag to disable the stats */ 2593 if (!enable_stats) 2594 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2595 2596 cmd->veb_flags = cpu_to_le16(veb_flags); 2597 2598 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2599 2600 if (!status && veb_seid) 2601 *veb_seid = le16_to_cpu(resp->veb_seid); 2602 2603 return status; 2604 } 2605 2606 /** 2607 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2608 * @hw: pointer to the hw struct 2609 * @veb_seid: the SEID of the VEB to query 2610 * @switch_id: the uplink switch id 2611 * @floating: set to true if the VEB is floating 2612 * @statistic_index: index of the stats counter block for this VEB 2613 * @vebs_used: number of VEB's used by function 2614 * @vebs_free: total VEB's not reserved by any function 2615 * @cmd_details: pointer to command details structure or NULL 2616 * 2617 * This retrieves the parameters for a particular VEB, specified by 2618 * uplink_seid, and returns them to the caller. 2619 **/ 2620 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2621 u16 veb_seid, u16 *switch_id, 2622 bool *floating, u16 *statistic_index, 2623 u16 *vebs_used, u16 *vebs_free, 2624 struct i40e_asq_cmd_details *cmd_details) 2625 { 2626 struct i40e_aq_desc desc; 2627 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2628 (struct i40e_aqc_get_veb_parameters_completion *) 2629 &desc.params.raw; 2630 i40e_status status; 2631 2632 if (veb_seid == 0) 2633 return I40E_ERR_PARAM; 2634 2635 i40e_fill_default_direct_cmd_desc(&desc, 2636 i40e_aqc_opc_get_veb_parameters); 2637 cmd_resp->seid = cpu_to_le16(veb_seid); 2638 2639 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2640 if (status) 2641 goto get_veb_exit; 2642 2643 if (switch_id) 2644 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2645 if (statistic_index) 2646 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2647 if (vebs_used) 2648 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2649 if (vebs_free) 2650 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2651 if (floating) { 2652 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2653 2654 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2655 *floating = true; 2656 else 2657 *floating = false; 2658 } 2659 2660 get_veb_exit: 2661 return status; 2662 } 2663 2664 /** 2665 * i40e_prepare_add_macvlan 2666 * @mv_list: list of macvlans to be added 2667 * @desc: pointer to AQ descriptor structure 2668 * @count: length of the list 2669 * @seid: VSI for the mac address 2670 * 2671 * Internal helper function that prepares the add macvlan request 2672 * and returns the buffer size. 2673 **/ 2674 static u16 2675 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2676 struct i40e_aq_desc *desc, u16 count, u16 seid) 2677 { 2678 struct i40e_aqc_macvlan *cmd = 2679 (struct i40e_aqc_macvlan *)&desc->params.raw; 2680 u16 buf_size; 2681 int i; 2682 2683 buf_size = count * sizeof(*mv_list); 2684 2685 /* prep the rest of the request */ 2686 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2687 cmd->num_addresses = cpu_to_le16(count); 2688 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2689 cmd->seid[1] = 0; 2690 cmd->seid[2] = 0; 2691 2692 for (i = 0; i < count; i++) 2693 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2694 mv_list[i].flags |= 2695 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2696 2697 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2698 if (buf_size > I40E_AQ_LARGE_BUF) 2699 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2700 2701 return buf_size; 2702 } 2703 2704 /** 2705 * i40e_aq_add_macvlan 2706 * @hw: pointer to the hw struct 2707 * @seid: VSI for the mac address 2708 * @mv_list: list of macvlans to be added 2709 * @count: length of the list 2710 * @cmd_details: pointer to command details structure or NULL 2711 * 2712 * Add MAC/VLAN addresses to the HW filtering 2713 **/ 2714 i40e_status 2715 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2716 struct i40e_aqc_add_macvlan_element_data *mv_list, 2717 u16 count, struct i40e_asq_cmd_details *cmd_details) 2718 { 2719 struct i40e_aq_desc desc; 2720 u16 buf_size; 2721 2722 if (count == 0 || !mv_list || !hw) 2723 return I40E_ERR_PARAM; 2724 2725 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2726 2727 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2728 cmd_details, true); 2729 } 2730 2731 /** 2732 * i40e_aq_add_macvlan_v2 2733 * @hw: pointer to the hw struct 2734 * @seid: VSI for the mac address 2735 * @mv_list: list of macvlans to be added 2736 * @count: length of the list 2737 * @cmd_details: pointer to command details structure or NULL 2738 * @aq_status: pointer to Admin Queue status return value 2739 * 2740 * Add MAC/VLAN addresses to the HW filtering. 2741 * The _v2 version returns the last Admin Queue status in aq_status 2742 * to avoid race conditions in access to hw->aq.asq_last_status. 2743 * It also calls _v2 versions of asq_send_command functions to 2744 * get the aq_status on the stack. 2745 **/ 2746 i40e_status 2747 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2748 struct i40e_aqc_add_macvlan_element_data *mv_list, 2749 u16 count, struct i40e_asq_cmd_details *cmd_details, 2750 enum i40e_admin_queue_err *aq_status) 2751 { 2752 struct i40e_aq_desc desc; 2753 u16 buf_size; 2754 2755 if (count == 0 || !mv_list || !hw) 2756 return I40E_ERR_PARAM; 2757 2758 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2759 2760 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2761 cmd_details, true, aq_status); 2762 } 2763 2764 /** 2765 * i40e_aq_remove_macvlan 2766 * @hw: pointer to the hw struct 2767 * @seid: VSI for the mac address 2768 * @mv_list: list of macvlans to be removed 2769 * @count: length of the list 2770 * @cmd_details: pointer to command details structure or NULL 2771 * 2772 * Remove MAC/VLAN addresses from the HW filtering 2773 **/ 2774 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2775 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2776 u16 count, struct i40e_asq_cmd_details *cmd_details) 2777 { 2778 struct i40e_aq_desc desc; 2779 struct i40e_aqc_macvlan *cmd = 2780 (struct i40e_aqc_macvlan *)&desc.params.raw; 2781 i40e_status status; 2782 u16 buf_size; 2783 2784 if (count == 0 || !mv_list || !hw) 2785 return I40E_ERR_PARAM; 2786 2787 buf_size = count * sizeof(*mv_list); 2788 2789 /* prep the rest of the request */ 2790 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2791 cmd->num_addresses = cpu_to_le16(count); 2792 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2793 cmd->seid[1] = 0; 2794 cmd->seid[2] = 0; 2795 2796 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2797 if (buf_size > I40E_AQ_LARGE_BUF) 2798 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2799 2800 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2801 cmd_details, true); 2802 2803 return status; 2804 } 2805 2806 /** 2807 * i40e_aq_remove_macvlan_v2 2808 * @hw: pointer to the hw struct 2809 * @seid: VSI for the mac address 2810 * @mv_list: list of macvlans to be removed 2811 * @count: length of the list 2812 * @cmd_details: pointer to command details structure or NULL 2813 * @aq_status: pointer to Admin Queue status return value 2814 * 2815 * Remove MAC/VLAN addresses from the HW filtering. 2816 * The _v2 version returns the last Admin Queue status in aq_status 2817 * to avoid race conditions in access to hw->aq.asq_last_status. 2818 * It also calls _v2 versions of asq_send_command functions to 2819 * get the aq_status on the stack. 2820 **/ 2821 i40e_status 2822 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2823 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2824 u16 count, struct i40e_asq_cmd_details *cmd_details, 2825 enum i40e_admin_queue_err *aq_status) 2826 { 2827 struct i40e_aqc_macvlan *cmd; 2828 struct i40e_aq_desc desc; 2829 u16 buf_size; 2830 2831 if (count == 0 || !mv_list || !hw) 2832 return I40E_ERR_PARAM; 2833 2834 buf_size = count * sizeof(*mv_list); 2835 2836 /* prep the rest of the request */ 2837 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2838 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2839 cmd->num_addresses = cpu_to_le16(count); 2840 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2841 cmd->seid[1] = 0; 2842 cmd->seid[2] = 0; 2843 2844 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2845 if (buf_size > I40E_AQ_LARGE_BUF) 2846 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2847 2848 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2849 cmd_details, true, aq_status); 2850 } 2851 2852 /** 2853 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2854 * @hw: pointer to the hw struct 2855 * @opcode: AQ opcode for add or delete mirror rule 2856 * @sw_seid: Switch SEID (to which rule refers) 2857 * @rule_type: Rule Type (ingress/egress/VLAN) 2858 * @id: Destination VSI SEID or Rule ID 2859 * @count: length of the list 2860 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2861 * @cmd_details: pointer to command details structure or NULL 2862 * @rule_id: Rule ID returned from FW 2863 * @rules_used: Number of rules used in internal switch 2864 * @rules_free: Number of rules free in internal switch 2865 * 2866 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2867 * VEBs/VEPA elements only 2868 **/ 2869 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2870 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2871 u16 count, __le16 *mr_list, 2872 struct i40e_asq_cmd_details *cmd_details, 2873 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2874 { 2875 struct i40e_aq_desc desc; 2876 struct i40e_aqc_add_delete_mirror_rule *cmd = 2877 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2878 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2879 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2880 i40e_status status; 2881 u16 buf_size; 2882 2883 buf_size = count * sizeof(*mr_list); 2884 2885 /* prep the rest of the request */ 2886 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2887 cmd->seid = cpu_to_le16(sw_seid); 2888 cmd->rule_type = cpu_to_le16(rule_type & 2889 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2890 cmd->num_entries = cpu_to_le16(count); 2891 /* Dest VSI for add, rule_id for delete */ 2892 cmd->destination = cpu_to_le16(id); 2893 if (mr_list) { 2894 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2895 I40E_AQ_FLAG_RD)); 2896 if (buf_size > I40E_AQ_LARGE_BUF) 2897 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2898 } 2899 2900 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2901 cmd_details); 2902 if (!status || 2903 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2904 if (rule_id) 2905 *rule_id = le16_to_cpu(resp->rule_id); 2906 if (rules_used) 2907 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2908 if (rules_free) 2909 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2910 } 2911 return status; 2912 } 2913 2914 /** 2915 * i40e_aq_add_mirrorrule - add a mirror rule 2916 * @hw: pointer to the hw struct 2917 * @sw_seid: Switch SEID (to which rule refers) 2918 * @rule_type: Rule Type (ingress/egress/VLAN) 2919 * @dest_vsi: SEID of VSI to which packets will be mirrored 2920 * @count: length of the list 2921 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2922 * @cmd_details: pointer to command details structure or NULL 2923 * @rule_id: Rule ID returned from FW 2924 * @rules_used: Number of rules used in internal switch 2925 * @rules_free: Number of rules free in internal switch 2926 * 2927 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2928 **/ 2929 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2930 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2931 struct i40e_asq_cmd_details *cmd_details, 2932 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2933 { 2934 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2935 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2936 if (count == 0 || !mr_list) 2937 return I40E_ERR_PARAM; 2938 } 2939 2940 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2941 rule_type, dest_vsi, count, mr_list, 2942 cmd_details, rule_id, rules_used, rules_free); 2943 } 2944 2945 /** 2946 * i40e_aq_delete_mirrorrule - delete a mirror rule 2947 * @hw: pointer to the hw struct 2948 * @sw_seid: Switch SEID (to which rule refers) 2949 * @rule_type: Rule Type (ingress/egress/VLAN) 2950 * @count: length of the list 2951 * @rule_id: Rule ID that is returned in the receive desc as part of 2952 * add_mirrorrule. 2953 * @mr_list: list of mirrored VLAN IDs to be removed 2954 * @cmd_details: pointer to command details structure or NULL 2955 * @rules_used: Number of rules used in internal switch 2956 * @rules_free: Number of rules free in internal switch 2957 * 2958 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2959 **/ 2960 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2961 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2962 struct i40e_asq_cmd_details *cmd_details, 2963 u16 *rules_used, u16 *rules_free) 2964 { 2965 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2966 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2967 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2968 * mirroring. For other rule_type, count and rule_type should 2969 * not matter. 2970 */ 2971 if (count == 0 || !mr_list) 2972 return I40E_ERR_PARAM; 2973 } 2974 2975 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2976 rule_type, rule_id, count, mr_list, 2977 cmd_details, NULL, rules_used, rules_free); 2978 } 2979 2980 /** 2981 * i40e_aq_send_msg_to_vf 2982 * @hw: pointer to the hardware structure 2983 * @vfid: VF id to send msg 2984 * @v_opcode: opcodes for VF-PF communication 2985 * @v_retval: return error code 2986 * @msg: pointer to the msg buffer 2987 * @msglen: msg length 2988 * @cmd_details: pointer to command details 2989 * 2990 * send msg to vf 2991 **/ 2992 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2993 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2994 struct i40e_asq_cmd_details *cmd_details) 2995 { 2996 struct i40e_aq_desc desc; 2997 struct i40e_aqc_pf_vf_message *cmd = 2998 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2999 i40e_status status; 3000 3001 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 3002 cmd->id = cpu_to_le32(vfid); 3003 desc.cookie_high = cpu_to_le32(v_opcode); 3004 desc.cookie_low = cpu_to_le32(v_retval); 3005 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 3006 if (msglen) { 3007 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 3008 I40E_AQ_FLAG_RD)); 3009 if (msglen > I40E_AQ_LARGE_BUF) 3010 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3011 desc.datalen = cpu_to_le16(msglen); 3012 } 3013 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 3014 3015 return status; 3016 } 3017 3018 /** 3019 * i40e_aq_debug_read_register 3020 * @hw: pointer to the hw struct 3021 * @reg_addr: register address 3022 * @reg_val: register value 3023 * @cmd_details: pointer to command details structure or NULL 3024 * 3025 * Read the register using the admin queue commands 3026 **/ 3027 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 3028 u32 reg_addr, u64 *reg_val, 3029 struct i40e_asq_cmd_details *cmd_details) 3030 { 3031 struct i40e_aq_desc desc; 3032 struct i40e_aqc_debug_reg_read_write *cmd_resp = 3033 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3034 i40e_status status; 3035 3036 if (reg_val == NULL) 3037 return I40E_ERR_PARAM; 3038 3039 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 3040 3041 cmd_resp->address = cpu_to_le32(reg_addr); 3042 3043 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3044 3045 if (!status) { 3046 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 3047 (u64)le32_to_cpu(cmd_resp->value_low); 3048 } 3049 3050 return status; 3051 } 3052 3053 /** 3054 * i40e_aq_debug_write_register 3055 * @hw: pointer to the hw struct 3056 * @reg_addr: register address 3057 * @reg_val: register value 3058 * @cmd_details: pointer to command details structure or NULL 3059 * 3060 * Write to a register using the admin queue commands 3061 **/ 3062 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3063 u32 reg_addr, u64 reg_val, 3064 struct i40e_asq_cmd_details *cmd_details) 3065 { 3066 struct i40e_aq_desc desc; 3067 struct i40e_aqc_debug_reg_read_write *cmd = 3068 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3069 i40e_status status; 3070 3071 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3072 3073 cmd->address = cpu_to_le32(reg_addr); 3074 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3075 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3076 3077 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3078 3079 return status; 3080 } 3081 3082 /** 3083 * i40e_aq_request_resource 3084 * @hw: pointer to the hw struct 3085 * @resource: resource id 3086 * @access: access type 3087 * @sdp_number: resource number 3088 * @timeout: the maximum time in ms that the driver may hold the resource 3089 * @cmd_details: pointer to command details structure or NULL 3090 * 3091 * requests common resource using the admin queue commands 3092 **/ 3093 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3094 enum i40e_aq_resources_ids resource, 3095 enum i40e_aq_resource_access_type access, 3096 u8 sdp_number, u64 *timeout, 3097 struct i40e_asq_cmd_details *cmd_details) 3098 { 3099 struct i40e_aq_desc desc; 3100 struct i40e_aqc_request_resource *cmd_resp = 3101 (struct i40e_aqc_request_resource *)&desc.params.raw; 3102 i40e_status status; 3103 3104 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3105 3106 cmd_resp->resource_id = cpu_to_le16(resource); 3107 cmd_resp->access_type = cpu_to_le16(access); 3108 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3109 3110 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3111 /* The completion specifies the maximum time in ms that the driver 3112 * may hold the resource in the Timeout field. 3113 * If the resource is held by someone else, the command completes with 3114 * busy return value and the timeout field indicates the maximum time 3115 * the current owner of the resource has to free it. 3116 */ 3117 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3118 *timeout = le32_to_cpu(cmd_resp->timeout); 3119 3120 return status; 3121 } 3122 3123 /** 3124 * i40e_aq_release_resource 3125 * @hw: pointer to the hw struct 3126 * @resource: resource id 3127 * @sdp_number: resource number 3128 * @cmd_details: pointer to command details structure or NULL 3129 * 3130 * release common resource using the admin queue commands 3131 **/ 3132 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3133 enum i40e_aq_resources_ids resource, 3134 u8 sdp_number, 3135 struct i40e_asq_cmd_details *cmd_details) 3136 { 3137 struct i40e_aq_desc desc; 3138 struct i40e_aqc_request_resource *cmd = 3139 (struct i40e_aqc_request_resource *)&desc.params.raw; 3140 i40e_status status; 3141 3142 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3143 3144 cmd->resource_id = cpu_to_le16(resource); 3145 cmd->resource_number = cpu_to_le32(sdp_number); 3146 3147 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3148 3149 return status; 3150 } 3151 3152 /** 3153 * i40e_aq_read_nvm 3154 * @hw: pointer to the hw struct 3155 * @module_pointer: module pointer location in words from the NVM beginning 3156 * @offset: byte offset from the module beginning 3157 * @length: length of the section to be read (in bytes from the offset) 3158 * @data: command buffer (size [bytes] = length) 3159 * @last_command: tells if this is the last command in a series 3160 * @cmd_details: pointer to command details structure or NULL 3161 * 3162 * Read the NVM using the admin queue commands 3163 **/ 3164 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3165 u32 offset, u16 length, void *data, 3166 bool last_command, 3167 struct i40e_asq_cmd_details *cmd_details) 3168 { 3169 struct i40e_aq_desc desc; 3170 struct i40e_aqc_nvm_update *cmd = 3171 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3172 i40e_status status; 3173 3174 /* In offset the highest byte must be zeroed. */ 3175 if (offset & 0xFF000000) { 3176 status = I40E_ERR_PARAM; 3177 goto i40e_aq_read_nvm_exit; 3178 } 3179 3180 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3181 3182 /* If this is the last command in a series, set the proper flag. */ 3183 if (last_command) 3184 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3185 cmd->module_pointer = module_pointer; 3186 cmd->offset = cpu_to_le32(offset); 3187 cmd->length = cpu_to_le16(length); 3188 3189 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3190 if (length > I40E_AQ_LARGE_BUF) 3191 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3192 3193 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3194 3195 i40e_aq_read_nvm_exit: 3196 return status; 3197 } 3198 3199 /** 3200 * i40e_aq_erase_nvm 3201 * @hw: pointer to the hw struct 3202 * @module_pointer: module pointer location in words from the NVM beginning 3203 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3204 * @length: length of the section to be erased (expressed in 4 KB) 3205 * @last_command: tells if this is the last command in a series 3206 * @cmd_details: pointer to command details structure or NULL 3207 * 3208 * Erase the NVM sector using the admin queue commands 3209 **/ 3210 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3211 u32 offset, u16 length, bool last_command, 3212 struct i40e_asq_cmd_details *cmd_details) 3213 { 3214 struct i40e_aq_desc desc; 3215 struct i40e_aqc_nvm_update *cmd = 3216 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3217 i40e_status status; 3218 3219 /* In offset the highest byte must be zeroed. */ 3220 if (offset & 0xFF000000) { 3221 status = I40E_ERR_PARAM; 3222 goto i40e_aq_erase_nvm_exit; 3223 } 3224 3225 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3226 3227 /* If this is the last command in a series, set the proper flag. */ 3228 if (last_command) 3229 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3230 cmd->module_pointer = module_pointer; 3231 cmd->offset = cpu_to_le32(offset); 3232 cmd->length = cpu_to_le16(length); 3233 3234 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3235 3236 i40e_aq_erase_nvm_exit: 3237 return status; 3238 } 3239 3240 /** 3241 * i40e_parse_discover_capabilities 3242 * @hw: pointer to the hw struct 3243 * @buff: pointer to a buffer containing device/function capability records 3244 * @cap_count: number of capability records in the list 3245 * @list_type_opc: type of capabilities list to parse 3246 * 3247 * Parse the device/function capabilities list. 3248 **/ 3249 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3250 u32 cap_count, 3251 enum i40e_admin_queue_opc list_type_opc) 3252 { 3253 struct i40e_aqc_list_capabilities_element_resp *cap; 3254 u32 valid_functions, num_functions; 3255 u32 number, logical_id, phys_id; 3256 struct i40e_hw_capabilities *p; 3257 u16 id, ocp_cfg_word0; 3258 i40e_status status; 3259 u8 major_rev; 3260 u32 i = 0; 3261 3262 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3263 3264 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3265 p = &hw->dev_caps; 3266 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3267 p = &hw->func_caps; 3268 else 3269 return; 3270 3271 for (i = 0; i < cap_count; i++, cap++) { 3272 id = le16_to_cpu(cap->id); 3273 number = le32_to_cpu(cap->number); 3274 logical_id = le32_to_cpu(cap->logical_id); 3275 phys_id = le32_to_cpu(cap->phys_id); 3276 major_rev = cap->major_rev; 3277 3278 switch (id) { 3279 case I40E_AQ_CAP_ID_SWITCH_MODE: 3280 p->switch_mode = number; 3281 break; 3282 case I40E_AQ_CAP_ID_MNG_MODE: 3283 p->management_mode = number; 3284 if (major_rev > 1) { 3285 p->mng_protocols_over_mctp = logical_id; 3286 i40e_debug(hw, I40E_DEBUG_INIT, 3287 "HW Capability: Protocols over MCTP = %d\n", 3288 p->mng_protocols_over_mctp); 3289 } else { 3290 p->mng_protocols_over_mctp = 0; 3291 } 3292 break; 3293 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3294 p->npar_enable = number; 3295 break; 3296 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3297 p->os2bmc = number; 3298 break; 3299 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3300 p->valid_functions = number; 3301 break; 3302 case I40E_AQ_CAP_ID_SRIOV: 3303 if (number == 1) 3304 p->sr_iov_1_1 = true; 3305 break; 3306 case I40E_AQ_CAP_ID_VF: 3307 p->num_vfs = number; 3308 p->vf_base_id = logical_id; 3309 break; 3310 case I40E_AQ_CAP_ID_VMDQ: 3311 if (number == 1) 3312 p->vmdq = true; 3313 break; 3314 case I40E_AQ_CAP_ID_8021QBG: 3315 if (number == 1) 3316 p->evb_802_1_qbg = true; 3317 break; 3318 case I40E_AQ_CAP_ID_8021QBR: 3319 if (number == 1) 3320 p->evb_802_1_qbh = true; 3321 break; 3322 case I40E_AQ_CAP_ID_VSI: 3323 p->num_vsis = number; 3324 break; 3325 case I40E_AQ_CAP_ID_DCB: 3326 if (number == 1) { 3327 p->dcb = true; 3328 p->enabled_tcmap = logical_id; 3329 p->maxtc = phys_id; 3330 } 3331 break; 3332 case I40E_AQ_CAP_ID_FCOE: 3333 if (number == 1) 3334 p->fcoe = true; 3335 break; 3336 case I40E_AQ_CAP_ID_ISCSI: 3337 if (number == 1) 3338 p->iscsi = true; 3339 break; 3340 case I40E_AQ_CAP_ID_RSS: 3341 p->rss = true; 3342 p->rss_table_size = number; 3343 p->rss_table_entry_width = logical_id; 3344 break; 3345 case I40E_AQ_CAP_ID_RXQ: 3346 p->num_rx_qp = number; 3347 p->base_queue = phys_id; 3348 break; 3349 case I40E_AQ_CAP_ID_TXQ: 3350 p->num_tx_qp = number; 3351 p->base_queue = phys_id; 3352 break; 3353 case I40E_AQ_CAP_ID_MSIX: 3354 p->num_msix_vectors = number; 3355 i40e_debug(hw, I40E_DEBUG_INIT, 3356 "HW Capability: MSIX vector count = %d\n", 3357 p->num_msix_vectors); 3358 break; 3359 case I40E_AQ_CAP_ID_VF_MSIX: 3360 p->num_msix_vectors_vf = number; 3361 break; 3362 case I40E_AQ_CAP_ID_FLEX10: 3363 if (major_rev == 1) { 3364 if (number == 1) { 3365 p->flex10_enable = true; 3366 p->flex10_capable = true; 3367 } 3368 } else { 3369 /* Capability revision >= 2 */ 3370 if (number & 1) 3371 p->flex10_enable = true; 3372 if (number & 2) 3373 p->flex10_capable = true; 3374 } 3375 p->flex10_mode = logical_id; 3376 p->flex10_status = phys_id; 3377 break; 3378 case I40E_AQ_CAP_ID_CEM: 3379 if (number == 1) 3380 p->mgmt_cem = true; 3381 break; 3382 case I40E_AQ_CAP_ID_IWARP: 3383 if (number == 1) 3384 p->iwarp = true; 3385 break; 3386 case I40E_AQ_CAP_ID_LED: 3387 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3388 p->led[phys_id] = true; 3389 break; 3390 case I40E_AQ_CAP_ID_SDP: 3391 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3392 p->sdp[phys_id] = true; 3393 break; 3394 case I40E_AQ_CAP_ID_MDIO: 3395 if (number == 1) { 3396 p->mdio_port_num = phys_id; 3397 p->mdio_port_mode = logical_id; 3398 } 3399 break; 3400 case I40E_AQ_CAP_ID_1588: 3401 if (number == 1) 3402 p->ieee_1588 = true; 3403 break; 3404 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3405 p->fd = true; 3406 p->fd_filters_guaranteed = number; 3407 p->fd_filters_best_effort = logical_id; 3408 break; 3409 case I40E_AQ_CAP_ID_WSR_PROT: 3410 p->wr_csr_prot = (u64)number; 3411 p->wr_csr_prot |= (u64)logical_id << 32; 3412 break; 3413 case I40E_AQ_CAP_ID_NVM_MGMT: 3414 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3415 p->sec_rev_disabled = true; 3416 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3417 p->update_disabled = true; 3418 break; 3419 default: 3420 break; 3421 } 3422 } 3423 3424 if (p->fcoe) 3425 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3426 3427 /* Software override ensuring FCoE is disabled if npar or mfp 3428 * mode because it is not supported in these modes. 3429 */ 3430 if (p->npar_enable || p->flex10_enable) 3431 p->fcoe = false; 3432 3433 /* count the enabled ports (aka the "not disabled" ports) */ 3434 hw->num_ports = 0; 3435 for (i = 0; i < 4; i++) { 3436 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3437 u64 port_cfg = 0; 3438 3439 /* use AQ read to get the physical register offset instead 3440 * of the port relative offset 3441 */ 3442 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3443 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3444 hw->num_ports++; 3445 } 3446 3447 /* OCP cards case: if a mezz is removed the Ethernet port is at 3448 * disabled state in PRTGEN_CNF register. Additional NVM read is 3449 * needed in order to check if we are dealing with OCP card. 3450 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3451 * physical ports results in wrong partition id calculation and thus 3452 * not supporting WoL. 3453 */ 3454 if (hw->mac.type == I40E_MAC_X722) { 3455 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3456 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3457 2 * I40E_SR_OCP_CFG_WORD0, 3458 sizeof(ocp_cfg_word0), 3459 &ocp_cfg_word0, true, NULL); 3460 if (!status && 3461 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3462 hw->num_ports = 4; 3463 i40e_release_nvm(hw); 3464 } 3465 } 3466 3467 valid_functions = p->valid_functions; 3468 num_functions = 0; 3469 while (valid_functions) { 3470 if (valid_functions & 1) 3471 num_functions++; 3472 valid_functions >>= 1; 3473 } 3474 3475 /* partition id is 1-based, and functions are evenly spread 3476 * across the ports as partitions 3477 */ 3478 if (hw->num_ports != 0) { 3479 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3480 hw->num_partitions = num_functions / hw->num_ports; 3481 } 3482 3483 /* additional HW specific goodies that might 3484 * someday be HW version specific 3485 */ 3486 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3487 } 3488 3489 /** 3490 * i40e_aq_discover_capabilities 3491 * @hw: pointer to the hw struct 3492 * @buff: a virtual buffer to hold the capabilities 3493 * @buff_size: Size of the virtual buffer 3494 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3495 * @list_type_opc: capabilities type to discover - pass in the command opcode 3496 * @cmd_details: pointer to command details structure or NULL 3497 * 3498 * Get the device capabilities descriptions from the firmware 3499 **/ 3500 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3501 void *buff, u16 buff_size, u16 *data_size, 3502 enum i40e_admin_queue_opc list_type_opc, 3503 struct i40e_asq_cmd_details *cmd_details) 3504 { 3505 struct i40e_aqc_list_capabilites *cmd; 3506 struct i40e_aq_desc desc; 3507 i40e_status status = 0; 3508 3509 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3510 3511 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3512 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3513 status = I40E_ERR_PARAM; 3514 goto exit; 3515 } 3516 3517 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3518 3519 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3520 if (buff_size > I40E_AQ_LARGE_BUF) 3521 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3522 3523 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3524 *data_size = le16_to_cpu(desc.datalen); 3525 3526 if (status) 3527 goto exit; 3528 3529 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3530 list_type_opc); 3531 3532 exit: 3533 return status; 3534 } 3535 3536 /** 3537 * i40e_aq_update_nvm 3538 * @hw: pointer to the hw struct 3539 * @module_pointer: module pointer location in words from the NVM beginning 3540 * @offset: byte offset from the module beginning 3541 * @length: length of the section to be written (in bytes from the offset) 3542 * @data: command buffer (size [bytes] = length) 3543 * @last_command: tells if this is the last command in a series 3544 * @preservation_flags: Preservation mode flags 3545 * @cmd_details: pointer to command details structure or NULL 3546 * 3547 * Update the NVM using the admin queue commands 3548 **/ 3549 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3550 u32 offset, u16 length, void *data, 3551 bool last_command, u8 preservation_flags, 3552 struct i40e_asq_cmd_details *cmd_details) 3553 { 3554 struct i40e_aq_desc desc; 3555 struct i40e_aqc_nvm_update *cmd = 3556 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3557 i40e_status status; 3558 3559 /* In offset the highest byte must be zeroed. */ 3560 if (offset & 0xFF000000) { 3561 status = I40E_ERR_PARAM; 3562 goto i40e_aq_update_nvm_exit; 3563 } 3564 3565 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3566 3567 /* If this is the last command in a series, set the proper flag. */ 3568 if (last_command) 3569 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3570 if (hw->mac.type == I40E_MAC_X722) { 3571 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3572 cmd->command_flags |= 3573 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3574 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3575 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3576 cmd->command_flags |= 3577 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3578 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3579 } 3580 cmd->module_pointer = module_pointer; 3581 cmd->offset = cpu_to_le32(offset); 3582 cmd->length = cpu_to_le16(length); 3583 3584 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3585 if (length > I40E_AQ_LARGE_BUF) 3586 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3587 3588 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3589 3590 i40e_aq_update_nvm_exit: 3591 return status; 3592 } 3593 3594 /** 3595 * i40e_aq_rearrange_nvm 3596 * @hw: pointer to the hw struct 3597 * @rearrange_nvm: defines direction of rearrangement 3598 * @cmd_details: pointer to command details structure or NULL 3599 * 3600 * Rearrange NVM structure, available only for transition FW 3601 **/ 3602 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3603 u8 rearrange_nvm, 3604 struct i40e_asq_cmd_details *cmd_details) 3605 { 3606 struct i40e_aqc_nvm_update *cmd; 3607 i40e_status status; 3608 struct i40e_aq_desc desc; 3609 3610 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3611 3612 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3613 3614 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3615 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3616 3617 if (!rearrange_nvm) { 3618 status = I40E_ERR_PARAM; 3619 goto i40e_aq_rearrange_nvm_exit; 3620 } 3621 3622 cmd->command_flags |= rearrange_nvm; 3623 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3624 3625 i40e_aq_rearrange_nvm_exit: 3626 return status; 3627 } 3628 3629 /** 3630 * i40e_aq_get_lldp_mib 3631 * @hw: pointer to the hw struct 3632 * @bridge_type: type of bridge requested 3633 * @mib_type: Local, Remote or both Local and Remote MIBs 3634 * @buff: pointer to a user supplied buffer to store the MIB block 3635 * @buff_size: size of the buffer (in bytes) 3636 * @local_len : length of the returned Local LLDP MIB 3637 * @remote_len: length of the returned Remote LLDP MIB 3638 * @cmd_details: pointer to command details structure or NULL 3639 * 3640 * Requests the complete LLDP MIB (entire packet). 3641 **/ 3642 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3643 u8 mib_type, void *buff, u16 buff_size, 3644 u16 *local_len, u16 *remote_len, 3645 struct i40e_asq_cmd_details *cmd_details) 3646 { 3647 struct i40e_aq_desc desc; 3648 struct i40e_aqc_lldp_get_mib *cmd = 3649 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3650 struct i40e_aqc_lldp_get_mib *resp = 3651 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3652 i40e_status status; 3653 3654 if (buff_size == 0 || !buff) 3655 return I40E_ERR_PARAM; 3656 3657 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3658 /* Indirect Command */ 3659 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3660 3661 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3662 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3663 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3664 3665 desc.datalen = cpu_to_le16(buff_size); 3666 3667 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3668 if (buff_size > I40E_AQ_LARGE_BUF) 3669 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3670 3671 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3672 if (!status) { 3673 if (local_len != NULL) 3674 *local_len = le16_to_cpu(resp->local_len); 3675 if (remote_len != NULL) 3676 *remote_len = le16_to_cpu(resp->remote_len); 3677 } 3678 3679 return status; 3680 } 3681 3682 /** 3683 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3684 * @hw: pointer to the hw struct 3685 * @mib_type: Local, Remote or both Local and Remote MIBs 3686 * @buff: pointer to a user supplied buffer to store the MIB block 3687 * @buff_size: size of the buffer (in bytes) 3688 * @cmd_details: pointer to command details structure or NULL 3689 * 3690 * Set the LLDP MIB. 3691 **/ 3692 enum i40e_status_code 3693 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3694 u8 mib_type, void *buff, u16 buff_size, 3695 struct i40e_asq_cmd_details *cmd_details) 3696 { 3697 struct i40e_aqc_lldp_set_local_mib *cmd; 3698 enum i40e_status_code status; 3699 struct i40e_aq_desc desc; 3700 3701 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3702 if (buff_size == 0 || !buff) 3703 return I40E_ERR_PARAM; 3704 3705 i40e_fill_default_direct_cmd_desc(&desc, 3706 i40e_aqc_opc_lldp_set_local_mib); 3707 /* Indirect Command */ 3708 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3709 if (buff_size > I40E_AQ_LARGE_BUF) 3710 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3711 desc.datalen = cpu_to_le16(buff_size); 3712 3713 cmd->type = mib_type; 3714 cmd->length = cpu_to_le16(buff_size); 3715 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3716 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3717 3718 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3719 return status; 3720 } 3721 3722 /** 3723 * i40e_aq_cfg_lldp_mib_change_event 3724 * @hw: pointer to the hw struct 3725 * @enable_update: Enable or Disable event posting 3726 * @cmd_details: pointer to command details structure or NULL 3727 * 3728 * Enable or Disable posting of an event on ARQ when LLDP MIB 3729 * associated with the interface changes 3730 **/ 3731 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3732 bool enable_update, 3733 struct i40e_asq_cmd_details *cmd_details) 3734 { 3735 struct i40e_aq_desc desc; 3736 struct i40e_aqc_lldp_update_mib *cmd = 3737 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3738 i40e_status status; 3739 3740 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3741 3742 if (!enable_update) 3743 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3744 3745 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3746 3747 return status; 3748 } 3749 3750 /** 3751 * i40e_aq_restore_lldp 3752 * @hw: pointer to the hw struct 3753 * @setting: pointer to factory setting variable or NULL 3754 * @restore: True if factory settings should be restored 3755 * @cmd_details: pointer to command details structure or NULL 3756 * 3757 * Restore LLDP Agent factory settings if @restore set to True. In other case 3758 * only returns factory setting in AQ response. 3759 **/ 3760 enum i40e_status_code 3761 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3762 struct i40e_asq_cmd_details *cmd_details) 3763 { 3764 struct i40e_aq_desc desc; 3765 struct i40e_aqc_lldp_restore *cmd = 3766 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3767 i40e_status status; 3768 3769 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3770 i40e_debug(hw, I40E_DEBUG_ALL, 3771 "Restore LLDP not supported by current FW version.\n"); 3772 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3773 } 3774 3775 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3776 3777 if (restore) 3778 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3779 3780 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3781 3782 if (setting) 3783 *setting = cmd->command & 1; 3784 3785 return status; 3786 } 3787 3788 /** 3789 * i40e_aq_stop_lldp 3790 * @hw: pointer to the hw struct 3791 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3792 * @persist: True if stop of LLDP should be persistent across power cycles 3793 * @cmd_details: pointer to command details structure or NULL 3794 * 3795 * Stop or Shutdown the embedded LLDP Agent 3796 **/ 3797 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3798 bool persist, 3799 struct i40e_asq_cmd_details *cmd_details) 3800 { 3801 struct i40e_aq_desc desc; 3802 struct i40e_aqc_lldp_stop *cmd = 3803 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3804 i40e_status status; 3805 3806 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3807 3808 if (shutdown_agent) 3809 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3810 3811 if (persist) { 3812 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3813 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3814 else 3815 i40e_debug(hw, I40E_DEBUG_ALL, 3816 "Persistent Stop LLDP not supported by current FW version.\n"); 3817 } 3818 3819 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3820 3821 return status; 3822 } 3823 3824 /** 3825 * i40e_aq_start_lldp 3826 * @hw: pointer to the hw struct 3827 * @persist: True if start of LLDP should be persistent across power cycles 3828 * @cmd_details: pointer to command details structure or NULL 3829 * 3830 * Start the embedded LLDP Agent on all ports. 3831 **/ 3832 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3833 struct i40e_asq_cmd_details *cmd_details) 3834 { 3835 struct i40e_aq_desc desc; 3836 struct i40e_aqc_lldp_start *cmd = 3837 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3838 i40e_status status; 3839 3840 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3841 3842 cmd->command = I40E_AQ_LLDP_AGENT_START; 3843 3844 if (persist) { 3845 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3846 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3847 else 3848 i40e_debug(hw, I40E_DEBUG_ALL, 3849 "Persistent Start LLDP not supported by current FW version.\n"); 3850 } 3851 3852 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3853 3854 return status; 3855 } 3856 3857 /** 3858 * i40e_aq_set_dcb_parameters 3859 * @hw: pointer to the hw struct 3860 * @cmd_details: pointer to command details structure or NULL 3861 * @dcb_enable: True if DCB configuration needs to be applied 3862 * 3863 **/ 3864 enum i40e_status_code 3865 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3866 struct i40e_asq_cmd_details *cmd_details) 3867 { 3868 struct i40e_aq_desc desc; 3869 struct i40e_aqc_set_dcb_parameters *cmd = 3870 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3871 i40e_status status; 3872 3873 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3874 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3875 3876 i40e_fill_default_direct_cmd_desc(&desc, 3877 i40e_aqc_opc_set_dcb_parameters); 3878 3879 if (dcb_enable) { 3880 cmd->valid_flags = I40E_DCB_VALID; 3881 cmd->command = I40E_AQ_DCB_SET_AGENT; 3882 } 3883 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3884 3885 return status; 3886 } 3887 3888 /** 3889 * i40e_aq_get_cee_dcb_config 3890 * @hw: pointer to the hw struct 3891 * @buff: response buffer that stores CEE operational configuration 3892 * @buff_size: size of the buffer passed 3893 * @cmd_details: pointer to command details structure or NULL 3894 * 3895 * Get CEE DCBX mode operational configuration from firmware 3896 **/ 3897 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3898 void *buff, u16 buff_size, 3899 struct i40e_asq_cmd_details *cmd_details) 3900 { 3901 struct i40e_aq_desc desc; 3902 i40e_status status; 3903 3904 if (buff_size == 0 || !buff) 3905 return I40E_ERR_PARAM; 3906 3907 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3908 3909 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3910 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3911 cmd_details); 3912 3913 return status; 3914 } 3915 3916 /** 3917 * i40e_aq_add_udp_tunnel 3918 * @hw: pointer to the hw struct 3919 * @udp_port: the UDP port to add in Host byte order 3920 * @protocol_index: protocol index type 3921 * @filter_index: pointer to filter index 3922 * @cmd_details: pointer to command details structure or NULL 3923 * 3924 * Note: Firmware expects the udp_port value to be in Little Endian format, 3925 * and this function will call cpu_to_le16 to convert from Host byte order to 3926 * Little Endian order. 3927 **/ 3928 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3929 u16 udp_port, u8 protocol_index, 3930 u8 *filter_index, 3931 struct i40e_asq_cmd_details *cmd_details) 3932 { 3933 struct i40e_aq_desc desc; 3934 struct i40e_aqc_add_udp_tunnel *cmd = 3935 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3936 struct i40e_aqc_del_udp_tunnel_completion *resp = 3937 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3938 i40e_status status; 3939 3940 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3941 3942 cmd->udp_port = cpu_to_le16(udp_port); 3943 cmd->protocol_type = protocol_index; 3944 3945 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3946 3947 if (!status && filter_index) 3948 *filter_index = resp->index; 3949 3950 return status; 3951 } 3952 3953 /** 3954 * i40e_aq_del_udp_tunnel 3955 * @hw: pointer to the hw struct 3956 * @index: filter index 3957 * @cmd_details: pointer to command details structure or NULL 3958 **/ 3959 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3960 struct i40e_asq_cmd_details *cmd_details) 3961 { 3962 struct i40e_aq_desc desc; 3963 struct i40e_aqc_remove_udp_tunnel *cmd = 3964 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3965 i40e_status status; 3966 3967 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3968 3969 cmd->index = index; 3970 3971 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3972 3973 return status; 3974 } 3975 3976 /** 3977 * i40e_aq_delete_element - Delete switch element 3978 * @hw: pointer to the hw struct 3979 * @seid: the SEID to delete from the switch 3980 * @cmd_details: pointer to command details structure or NULL 3981 * 3982 * This deletes a switch element from the switch. 3983 **/ 3984 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3985 struct i40e_asq_cmd_details *cmd_details) 3986 { 3987 struct i40e_aq_desc desc; 3988 struct i40e_aqc_switch_seid *cmd = 3989 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3990 i40e_status status; 3991 3992 if (seid == 0) 3993 return I40E_ERR_PARAM; 3994 3995 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3996 3997 cmd->seid = cpu_to_le16(seid); 3998 3999 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 4000 cmd_details, true); 4001 4002 return status; 4003 } 4004 4005 /** 4006 * i40e_aq_dcb_updated - DCB Updated Command 4007 * @hw: pointer to the hw struct 4008 * @cmd_details: pointer to command details structure or NULL 4009 * 4010 * EMP will return when the shared RPB settings have been 4011 * recomputed and modified. The retval field in the descriptor 4012 * will be set to 0 when RPB is modified. 4013 **/ 4014 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 4015 struct i40e_asq_cmd_details *cmd_details) 4016 { 4017 struct i40e_aq_desc desc; 4018 i40e_status status; 4019 4020 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 4021 4022 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4023 4024 return status; 4025 } 4026 4027 /** 4028 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 4029 * @hw: pointer to the hw struct 4030 * @seid: seid for the physical port/switching component/vsi 4031 * @buff: Indirect buffer to hold data parameters and response 4032 * @buff_size: Indirect buffer size 4033 * @opcode: Tx scheduler AQ command opcode 4034 * @cmd_details: pointer to command details structure or NULL 4035 * 4036 * Generic command handler for Tx scheduler AQ commands 4037 **/ 4038 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 4039 void *buff, u16 buff_size, 4040 enum i40e_admin_queue_opc opcode, 4041 struct i40e_asq_cmd_details *cmd_details) 4042 { 4043 struct i40e_aq_desc desc; 4044 struct i40e_aqc_tx_sched_ind *cmd = 4045 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4046 i40e_status status; 4047 bool cmd_param_flag = false; 4048 4049 switch (opcode) { 4050 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 4051 case i40e_aqc_opc_configure_vsi_tc_bw: 4052 case i40e_aqc_opc_enable_switching_comp_ets: 4053 case i40e_aqc_opc_modify_switching_comp_ets: 4054 case i40e_aqc_opc_disable_switching_comp_ets: 4055 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 4056 case i40e_aqc_opc_configure_switching_comp_bw_config: 4057 cmd_param_flag = true; 4058 break; 4059 case i40e_aqc_opc_query_vsi_bw_config: 4060 case i40e_aqc_opc_query_vsi_ets_sla_config: 4061 case i40e_aqc_opc_query_switching_comp_ets_config: 4062 case i40e_aqc_opc_query_port_ets_config: 4063 case i40e_aqc_opc_query_switching_comp_bw_config: 4064 cmd_param_flag = false; 4065 break; 4066 default: 4067 return I40E_ERR_PARAM; 4068 } 4069 4070 i40e_fill_default_direct_cmd_desc(&desc, opcode); 4071 4072 /* Indirect command */ 4073 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4074 if (cmd_param_flag) 4075 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4076 if (buff_size > I40E_AQ_LARGE_BUF) 4077 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4078 4079 desc.datalen = cpu_to_le16(buff_size); 4080 4081 cmd->vsi_seid = cpu_to_le16(seid); 4082 4083 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4084 4085 return status; 4086 } 4087 4088 /** 4089 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 4090 * @hw: pointer to the hw struct 4091 * @seid: VSI seid 4092 * @credit: BW limit credits (0 = disabled) 4093 * @max_credit: Max BW limit credits 4094 * @cmd_details: pointer to command details structure or NULL 4095 **/ 4096 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 4097 u16 seid, u16 credit, u8 max_credit, 4098 struct i40e_asq_cmd_details *cmd_details) 4099 { 4100 struct i40e_aq_desc desc; 4101 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4102 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4103 i40e_status status; 4104 4105 i40e_fill_default_direct_cmd_desc(&desc, 4106 i40e_aqc_opc_configure_vsi_bw_limit); 4107 4108 cmd->vsi_seid = cpu_to_le16(seid); 4109 cmd->credit = cpu_to_le16(credit); 4110 cmd->max_credit = max_credit; 4111 4112 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4113 4114 return status; 4115 } 4116 4117 /** 4118 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4119 * @hw: pointer to the hw struct 4120 * @seid: VSI seid 4121 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4122 * @cmd_details: pointer to command details structure or NULL 4123 **/ 4124 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4125 u16 seid, 4126 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4127 struct i40e_asq_cmd_details *cmd_details) 4128 { 4129 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4130 i40e_aqc_opc_configure_vsi_tc_bw, 4131 cmd_details); 4132 } 4133 4134 /** 4135 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4136 * @hw: pointer to the hw struct 4137 * @seid: seid of the switching component connected to Physical Port 4138 * @ets_data: Buffer holding ETS parameters 4139 * @opcode: Tx scheduler AQ command opcode 4140 * @cmd_details: pointer to command details structure or NULL 4141 **/ 4142 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4143 u16 seid, 4144 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4145 enum i40e_admin_queue_opc opcode, 4146 struct i40e_asq_cmd_details *cmd_details) 4147 { 4148 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4149 sizeof(*ets_data), opcode, cmd_details); 4150 } 4151 4152 /** 4153 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4154 * @hw: pointer to the hw struct 4155 * @seid: seid of the switching component 4156 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4157 * @cmd_details: pointer to command details structure or NULL 4158 **/ 4159 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4160 u16 seid, 4161 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4162 struct i40e_asq_cmd_details *cmd_details) 4163 { 4164 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4165 i40e_aqc_opc_configure_switching_comp_bw_config, 4166 cmd_details); 4167 } 4168 4169 /** 4170 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4171 * @hw: pointer to the hw struct 4172 * @seid: seid of the VSI 4173 * @bw_data: Buffer to hold VSI BW configuration 4174 * @cmd_details: pointer to command details structure or NULL 4175 **/ 4176 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4177 u16 seid, 4178 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4179 struct i40e_asq_cmd_details *cmd_details) 4180 { 4181 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4182 i40e_aqc_opc_query_vsi_bw_config, 4183 cmd_details); 4184 } 4185 4186 /** 4187 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4188 * @hw: pointer to the hw struct 4189 * @seid: seid of the VSI 4190 * @bw_data: Buffer to hold VSI BW configuration per TC 4191 * @cmd_details: pointer to command details structure or NULL 4192 **/ 4193 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4194 u16 seid, 4195 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4196 struct i40e_asq_cmd_details *cmd_details) 4197 { 4198 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4199 i40e_aqc_opc_query_vsi_ets_sla_config, 4200 cmd_details); 4201 } 4202 4203 /** 4204 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4205 * @hw: pointer to the hw struct 4206 * @seid: seid of the switching component 4207 * @bw_data: Buffer to hold switching component's per TC BW config 4208 * @cmd_details: pointer to command details structure or NULL 4209 **/ 4210 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4211 u16 seid, 4212 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4213 struct i40e_asq_cmd_details *cmd_details) 4214 { 4215 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4216 i40e_aqc_opc_query_switching_comp_ets_config, 4217 cmd_details); 4218 } 4219 4220 /** 4221 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4222 * @hw: pointer to the hw struct 4223 * @seid: seid of the VSI or switching component connected to Physical Port 4224 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4225 * @cmd_details: pointer to command details structure or NULL 4226 **/ 4227 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4228 u16 seid, 4229 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4230 struct i40e_asq_cmd_details *cmd_details) 4231 { 4232 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4233 i40e_aqc_opc_query_port_ets_config, 4234 cmd_details); 4235 } 4236 4237 /** 4238 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4239 * @hw: pointer to the hw struct 4240 * @seid: seid of the switching component 4241 * @bw_data: Buffer to hold switching component's BW configuration 4242 * @cmd_details: pointer to command details structure or NULL 4243 **/ 4244 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4245 u16 seid, 4246 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4247 struct i40e_asq_cmd_details *cmd_details) 4248 { 4249 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4250 i40e_aqc_opc_query_switching_comp_bw_config, 4251 cmd_details); 4252 } 4253 4254 /** 4255 * i40e_validate_filter_settings 4256 * @hw: pointer to the hardware structure 4257 * @settings: Filter control settings 4258 * 4259 * Check and validate the filter control settings passed. 4260 * The function checks for the valid filter/context sizes being 4261 * passed for FCoE and PE. 4262 * 4263 * Returns 0 if the values passed are valid and within 4264 * range else returns an error. 4265 **/ 4266 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4267 struct i40e_filter_control_settings *settings) 4268 { 4269 u32 fcoe_cntx_size, fcoe_filt_size; 4270 u32 fcoe_fmax; 4271 u32 val; 4272 4273 /* Validate FCoE settings passed */ 4274 switch (settings->fcoe_filt_num) { 4275 case I40E_HASH_FILTER_SIZE_1K: 4276 case I40E_HASH_FILTER_SIZE_2K: 4277 case I40E_HASH_FILTER_SIZE_4K: 4278 case I40E_HASH_FILTER_SIZE_8K: 4279 case I40E_HASH_FILTER_SIZE_16K: 4280 case I40E_HASH_FILTER_SIZE_32K: 4281 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4282 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4283 break; 4284 default: 4285 return I40E_ERR_PARAM; 4286 } 4287 4288 switch (settings->fcoe_cntx_num) { 4289 case I40E_DMA_CNTX_SIZE_512: 4290 case I40E_DMA_CNTX_SIZE_1K: 4291 case I40E_DMA_CNTX_SIZE_2K: 4292 case I40E_DMA_CNTX_SIZE_4K: 4293 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4294 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4295 break; 4296 default: 4297 return I40E_ERR_PARAM; 4298 } 4299 4300 /* Validate PE settings passed */ 4301 switch (settings->pe_filt_num) { 4302 case I40E_HASH_FILTER_SIZE_1K: 4303 case I40E_HASH_FILTER_SIZE_2K: 4304 case I40E_HASH_FILTER_SIZE_4K: 4305 case I40E_HASH_FILTER_SIZE_8K: 4306 case I40E_HASH_FILTER_SIZE_16K: 4307 case I40E_HASH_FILTER_SIZE_32K: 4308 case I40E_HASH_FILTER_SIZE_64K: 4309 case I40E_HASH_FILTER_SIZE_128K: 4310 case I40E_HASH_FILTER_SIZE_256K: 4311 case I40E_HASH_FILTER_SIZE_512K: 4312 case I40E_HASH_FILTER_SIZE_1M: 4313 break; 4314 default: 4315 return I40E_ERR_PARAM; 4316 } 4317 4318 switch (settings->pe_cntx_num) { 4319 case I40E_DMA_CNTX_SIZE_512: 4320 case I40E_DMA_CNTX_SIZE_1K: 4321 case I40E_DMA_CNTX_SIZE_2K: 4322 case I40E_DMA_CNTX_SIZE_4K: 4323 case I40E_DMA_CNTX_SIZE_8K: 4324 case I40E_DMA_CNTX_SIZE_16K: 4325 case I40E_DMA_CNTX_SIZE_32K: 4326 case I40E_DMA_CNTX_SIZE_64K: 4327 case I40E_DMA_CNTX_SIZE_128K: 4328 case I40E_DMA_CNTX_SIZE_256K: 4329 break; 4330 default: 4331 return I40E_ERR_PARAM; 4332 } 4333 4334 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4335 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4336 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4337 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4338 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4339 return I40E_ERR_INVALID_SIZE; 4340 4341 return 0; 4342 } 4343 4344 /** 4345 * i40e_set_filter_control 4346 * @hw: pointer to the hardware structure 4347 * @settings: Filter control settings 4348 * 4349 * Set the Queue Filters for PE/FCoE and enable filters required 4350 * for a single PF. It is expected that these settings are programmed 4351 * at the driver initialization time. 4352 **/ 4353 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4354 struct i40e_filter_control_settings *settings) 4355 { 4356 i40e_status ret = 0; 4357 u32 hash_lut_size = 0; 4358 u32 val; 4359 4360 if (!settings) 4361 return I40E_ERR_PARAM; 4362 4363 /* Validate the input settings */ 4364 ret = i40e_validate_filter_settings(hw, settings); 4365 if (ret) 4366 return ret; 4367 4368 /* Read the PF Queue Filter control register */ 4369 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4370 4371 /* Program required PE hash buckets for the PF */ 4372 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4373 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4374 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4375 /* Program required PE contexts for the PF */ 4376 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4377 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4378 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4379 4380 /* Program required FCoE hash buckets for the PF */ 4381 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4382 val |= ((u32)settings->fcoe_filt_num << 4383 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4384 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4385 /* Program required FCoE DDP contexts for the PF */ 4386 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4387 val |= ((u32)settings->fcoe_cntx_num << 4388 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4389 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4390 4391 /* Program Hash LUT size for the PF */ 4392 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4393 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4394 hash_lut_size = 1; 4395 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4396 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4397 4398 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4399 if (settings->enable_fdir) 4400 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4401 if (settings->enable_ethtype) 4402 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4403 if (settings->enable_macvlan) 4404 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4405 4406 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4407 4408 return 0; 4409 } 4410 4411 /** 4412 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4413 * @hw: pointer to the hw struct 4414 * @mac_addr: MAC address to use in the filter 4415 * @ethtype: Ethertype to use in the filter 4416 * @flags: Flags that needs to be applied to the filter 4417 * @vsi_seid: seid of the control VSI 4418 * @queue: VSI queue number to send the packet to 4419 * @is_add: Add control packet filter if True else remove 4420 * @stats: Structure to hold information on control filter counts 4421 * @cmd_details: pointer to command details structure or NULL 4422 * 4423 * This command will Add or Remove control packet filter for a control VSI. 4424 * In return it will update the total number of perfect filter count in 4425 * the stats member. 4426 **/ 4427 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4428 u8 *mac_addr, u16 ethtype, u16 flags, 4429 u16 vsi_seid, u16 queue, bool is_add, 4430 struct i40e_control_filter_stats *stats, 4431 struct i40e_asq_cmd_details *cmd_details) 4432 { 4433 struct i40e_aq_desc desc; 4434 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4435 (struct i40e_aqc_add_remove_control_packet_filter *) 4436 &desc.params.raw; 4437 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4438 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4439 &desc.params.raw; 4440 i40e_status status; 4441 4442 if (vsi_seid == 0) 4443 return I40E_ERR_PARAM; 4444 4445 if (is_add) { 4446 i40e_fill_default_direct_cmd_desc(&desc, 4447 i40e_aqc_opc_add_control_packet_filter); 4448 cmd->queue = cpu_to_le16(queue); 4449 } else { 4450 i40e_fill_default_direct_cmd_desc(&desc, 4451 i40e_aqc_opc_remove_control_packet_filter); 4452 } 4453 4454 if (mac_addr) 4455 ether_addr_copy(cmd->mac, mac_addr); 4456 4457 cmd->etype = cpu_to_le16(ethtype); 4458 cmd->flags = cpu_to_le16(flags); 4459 cmd->seid = cpu_to_le16(vsi_seid); 4460 4461 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4462 4463 if (!status && stats) { 4464 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4465 stats->etype_used = le16_to_cpu(resp->etype_used); 4466 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4467 stats->etype_free = le16_to_cpu(resp->etype_free); 4468 } 4469 4470 return status; 4471 } 4472 4473 /** 4474 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4475 * @hw: pointer to the hw struct 4476 * @seid: VSI seid to add ethertype filter from 4477 **/ 4478 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4479 u16 seid) 4480 { 4481 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4482 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4483 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4484 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4485 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4486 i40e_status status; 4487 4488 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4489 seid, 0, true, NULL, 4490 NULL); 4491 if (status) 4492 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4493 } 4494 4495 /** 4496 * i40e_aq_alternate_read 4497 * @hw: pointer to the hardware structure 4498 * @reg_addr0: address of first dword to be read 4499 * @reg_val0: pointer for data read from 'reg_addr0' 4500 * @reg_addr1: address of second dword to be read 4501 * @reg_val1: pointer for data read from 'reg_addr1' 4502 * 4503 * Read one or two dwords from alternate structure. Fields are indicated 4504 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4505 * is not passed then only register at 'reg_addr0' is read. 4506 * 4507 **/ 4508 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4509 u32 reg_addr0, u32 *reg_val0, 4510 u32 reg_addr1, u32 *reg_val1) 4511 { 4512 struct i40e_aq_desc desc; 4513 struct i40e_aqc_alternate_write *cmd_resp = 4514 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4515 i40e_status status; 4516 4517 if (!reg_val0) 4518 return I40E_ERR_PARAM; 4519 4520 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4521 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4522 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4523 4524 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4525 4526 if (!status) { 4527 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4528 4529 if (reg_val1) 4530 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4531 } 4532 4533 return status; 4534 } 4535 4536 /** 4537 * i40e_aq_suspend_port_tx 4538 * @hw: pointer to the hardware structure 4539 * @seid: port seid 4540 * @cmd_details: pointer to command details structure or NULL 4541 * 4542 * Suspend port's Tx traffic 4543 **/ 4544 i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4545 struct i40e_asq_cmd_details *cmd_details) 4546 { 4547 struct i40e_aqc_tx_sched_ind *cmd; 4548 struct i40e_aq_desc desc; 4549 i40e_status status; 4550 4551 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4552 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4553 cmd->vsi_seid = cpu_to_le16(seid); 4554 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4555 4556 return status; 4557 } 4558 4559 /** 4560 * i40e_aq_resume_port_tx 4561 * @hw: pointer to the hardware structure 4562 * @cmd_details: pointer to command details structure or NULL 4563 * 4564 * Resume port's Tx traffic 4565 **/ 4566 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4567 struct i40e_asq_cmd_details *cmd_details) 4568 { 4569 struct i40e_aq_desc desc; 4570 i40e_status status; 4571 4572 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4573 4574 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4575 4576 return status; 4577 } 4578 4579 /** 4580 * i40e_set_pci_config_data - store PCI bus info 4581 * @hw: pointer to hardware structure 4582 * @link_status: the link status word from PCI config space 4583 * 4584 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4585 **/ 4586 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4587 { 4588 hw->bus.type = i40e_bus_type_pci_express; 4589 4590 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4591 case PCI_EXP_LNKSTA_NLW_X1: 4592 hw->bus.width = i40e_bus_width_pcie_x1; 4593 break; 4594 case PCI_EXP_LNKSTA_NLW_X2: 4595 hw->bus.width = i40e_bus_width_pcie_x2; 4596 break; 4597 case PCI_EXP_LNKSTA_NLW_X4: 4598 hw->bus.width = i40e_bus_width_pcie_x4; 4599 break; 4600 case PCI_EXP_LNKSTA_NLW_X8: 4601 hw->bus.width = i40e_bus_width_pcie_x8; 4602 break; 4603 default: 4604 hw->bus.width = i40e_bus_width_unknown; 4605 break; 4606 } 4607 4608 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4609 case PCI_EXP_LNKSTA_CLS_2_5GB: 4610 hw->bus.speed = i40e_bus_speed_2500; 4611 break; 4612 case PCI_EXP_LNKSTA_CLS_5_0GB: 4613 hw->bus.speed = i40e_bus_speed_5000; 4614 break; 4615 case PCI_EXP_LNKSTA_CLS_8_0GB: 4616 hw->bus.speed = i40e_bus_speed_8000; 4617 break; 4618 default: 4619 hw->bus.speed = i40e_bus_speed_unknown; 4620 break; 4621 } 4622 } 4623 4624 /** 4625 * i40e_aq_debug_dump 4626 * @hw: pointer to the hardware structure 4627 * @cluster_id: specific cluster to dump 4628 * @table_id: table id within cluster 4629 * @start_index: index of line in the block to read 4630 * @buff_size: dump buffer size 4631 * @buff: dump buffer 4632 * @ret_buff_size: actual buffer size returned 4633 * @ret_next_table: next block to read 4634 * @ret_next_index: next index to read 4635 * @cmd_details: pointer to command details structure or NULL 4636 * 4637 * Dump internal FW/HW data for debug purposes. 4638 * 4639 **/ 4640 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4641 u8 table_id, u32 start_index, u16 buff_size, 4642 void *buff, u16 *ret_buff_size, 4643 u8 *ret_next_table, u32 *ret_next_index, 4644 struct i40e_asq_cmd_details *cmd_details) 4645 { 4646 struct i40e_aq_desc desc; 4647 struct i40e_aqc_debug_dump_internals *cmd = 4648 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4649 struct i40e_aqc_debug_dump_internals *resp = 4650 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4651 i40e_status status; 4652 4653 if (buff_size == 0 || !buff) 4654 return I40E_ERR_PARAM; 4655 4656 i40e_fill_default_direct_cmd_desc(&desc, 4657 i40e_aqc_opc_debug_dump_internals); 4658 /* Indirect Command */ 4659 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4660 if (buff_size > I40E_AQ_LARGE_BUF) 4661 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4662 4663 cmd->cluster_id = cluster_id; 4664 cmd->table_id = table_id; 4665 cmd->idx = cpu_to_le32(start_index); 4666 4667 desc.datalen = cpu_to_le16(buff_size); 4668 4669 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4670 if (!status) { 4671 if (ret_buff_size) 4672 *ret_buff_size = le16_to_cpu(desc.datalen); 4673 if (ret_next_table) 4674 *ret_next_table = resp->table_id; 4675 if (ret_next_index) 4676 *ret_next_index = le32_to_cpu(resp->idx); 4677 } 4678 4679 return status; 4680 } 4681 4682 /** 4683 * i40e_read_bw_from_alt_ram 4684 * @hw: pointer to the hardware structure 4685 * @max_bw: pointer for max_bw read 4686 * @min_bw: pointer for min_bw read 4687 * @min_valid: pointer for bool that is true if min_bw is a valid value 4688 * @max_valid: pointer for bool that is true if max_bw is a valid value 4689 * 4690 * Read bw from the alternate ram for the given pf 4691 **/ 4692 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4693 u32 *max_bw, u32 *min_bw, 4694 bool *min_valid, bool *max_valid) 4695 { 4696 i40e_status status; 4697 u32 max_bw_addr, min_bw_addr; 4698 4699 /* Calculate the address of the min/max bw registers */ 4700 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4701 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4702 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4703 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4704 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4705 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4706 4707 /* Read the bandwidths from alt ram */ 4708 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4709 min_bw_addr, min_bw); 4710 4711 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4712 *min_valid = true; 4713 else 4714 *min_valid = false; 4715 4716 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4717 *max_valid = true; 4718 else 4719 *max_valid = false; 4720 4721 return status; 4722 } 4723 4724 /** 4725 * i40e_aq_configure_partition_bw 4726 * @hw: pointer to the hardware structure 4727 * @bw_data: Buffer holding valid pfs and bw limits 4728 * @cmd_details: pointer to command details 4729 * 4730 * Configure partitions guaranteed/max bw 4731 **/ 4732 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4733 struct i40e_aqc_configure_partition_bw_data *bw_data, 4734 struct i40e_asq_cmd_details *cmd_details) 4735 { 4736 i40e_status status; 4737 struct i40e_aq_desc desc; 4738 u16 bwd_size = sizeof(*bw_data); 4739 4740 i40e_fill_default_direct_cmd_desc(&desc, 4741 i40e_aqc_opc_configure_partition_bw); 4742 4743 /* Indirect command */ 4744 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4745 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4746 4747 if (bwd_size > I40E_AQ_LARGE_BUF) 4748 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4749 4750 desc.datalen = cpu_to_le16(bwd_size); 4751 4752 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4753 cmd_details); 4754 4755 return status; 4756 } 4757 4758 /** 4759 * i40e_read_phy_register_clause22 4760 * @hw: pointer to the HW structure 4761 * @reg: register address in the page 4762 * @phy_addr: PHY address on MDIO interface 4763 * @value: PHY register value 4764 * 4765 * Reads specified PHY register value 4766 **/ 4767 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4768 u16 reg, u8 phy_addr, u16 *value) 4769 { 4770 i40e_status status = I40E_ERR_TIMEOUT; 4771 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4772 u32 command = 0; 4773 u16 retry = 1000; 4774 4775 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4776 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4777 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4778 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4779 (I40E_GLGEN_MSCA_MDICMD_MASK); 4780 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4781 do { 4782 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4783 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4784 status = 0; 4785 break; 4786 } 4787 udelay(10); 4788 retry--; 4789 } while (retry); 4790 4791 if (status) { 4792 i40e_debug(hw, I40E_DEBUG_PHY, 4793 "PHY: Can't write command to external PHY.\n"); 4794 } else { 4795 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4796 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4797 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4798 } 4799 4800 return status; 4801 } 4802 4803 /** 4804 * i40e_write_phy_register_clause22 4805 * @hw: pointer to the HW structure 4806 * @reg: register address in the page 4807 * @phy_addr: PHY address on MDIO interface 4808 * @value: PHY register value 4809 * 4810 * Writes specified PHY register value 4811 **/ 4812 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4813 u16 reg, u8 phy_addr, u16 value) 4814 { 4815 i40e_status status = I40E_ERR_TIMEOUT; 4816 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4817 u32 command = 0; 4818 u16 retry = 1000; 4819 4820 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4821 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4822 4823 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4824 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4825 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4826 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4827 (I40E_GLGEN_MSCA_MDICMD_MASK); 4828 4829 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4830 do { 4831 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4832 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4833 status = 0; 4834 break; 4835 } 4836 udelay(10); 4837 retry--; 4838 } while (retry); 4839 4840 return status; 4841 } 4842 4843 /** 4844 * i40e_read_phy_register_clause45 4845 * @hw: pointer to the HW structure 4846 * @page: registers page number 4847 * @reg: register address in the page 4848 * @phy_addr: PHY address on MDIO interface 4849 * @value: PHY register value 4850 * 4851 * Reads specified PHY register value 4852 **/ 4853 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4854 u8 page, u16 reg, u8 phy_addr, u16 *value) 4855 { 4856 i40e_status status = I40E_ERR_TIMEOUT; 4857 u32 command = 0; 4858 u16 retry = 1000; 4859 u8 port_num = hw->func_caps.mdio_port_num; 4860 4861 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4862 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4863 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4864 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4865 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4866 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4867 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4868 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4869 do { 4870 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4871 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4872 status = 0; 4873 break; 4874 } 4875 usleep_range(10, 20); 4876 retry--; 4877 } while (retry); 4878 4879 if (status) { 4880 i40e_debug(hw, I40E_DEBUG_PHY, 4881 "PHY: Can't write command to external PHY.\n"); 4882 goto phy_read_end; 4883 } 4884 4885 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4886 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4887 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4888 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4889 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4890 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4891 status = I40E_ERR_TIMEOUT; 4892 retry = 1000; 4893 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4894 do { 4895 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4896 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4897 status = 0; 4898 break; 4899 } 4900 usleep_range(10, 20); 4901 retry--; 4902 } while (retry); 4903 4904 if (!status) { 4905 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4906 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4907 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4908 } else { 4909 i40e_debug(hw, I40E_DEBUG_PHY, 4910 "PHY: Can't read register value from external PHY.\n"); 4911 } 4912 4913 phy_read_end: 4914 return status; 4915 } 4916 4917 /** 4918 * i40e_write_phy_register_clause45 4919 * @hw: pointer to the HW structure 4920 * @page: registers page number 4921 * @reg: register address in the page 4922 * @phy_addr: PHY address on MDIO interface 4923 * @value: PHY register value 4924 * 4925 * Writes value to specified PHY register 4926 **/ 4927 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4928 u8 page, u16 reg, u8 phy_addr, u16 value) 4929 { 4930 i40e_status status = I40E_ERR_TIMEOUT; 4931 u32 command = 0; 4932 u16 retry = 1000; 4933 u8 port_num = hw->func_caps.mdio_port_num; 4934 4935 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4936 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4937 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4938 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4939 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4940 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4941 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4942 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4943 do { 4944 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4945 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4946 status = 0; 4947 break; 4948 } 4949 usleep_range(10, 20); 4950 retry--; 4951 } while (retry); 4952 if (status) { 4953 i40e_debug(hw, I40E_DEBUG_PHY, 4954 "PHY: Can't write command to external PHY.\n"); 4955 goto phy_write_end; 4956 } 4957 4958 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4959 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4960 4961 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4962 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4963 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4964 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4965 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4966 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4967 status = I40E_ERR_TIMEOUT; 4968 retry = 1000; 4969 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4970 do { 4971 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4972 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4973 status = 0; 4974 break; 4975 } 4976 usleep_range(10, 20); 4977 retry--; 4978 } while (retry); 4979 4980 phy_write_end: 4981 return status; 4982 } 4983 4984 /** 4985 * i40e_write_phy_register 4986 * @hw: pointer to the HW structure 4987 * @page: registers page number 4988 * @reg: register address in the page 4989 * @phy_addr: PHY address on MDIO interface 4990 * @value: PHY register value 4991 * 4992 * Writes value to specified PHY register 4993 **/ 4994 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4995 u8 page, u16 reg, u8 phy_addr, u16 value) 4996 { 4997 i40e_status status; 4998 4999 switch (hw->device_id) { 5000 case I40E_DEV_ID_1G_BASE_T_X722: 5001 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 5002 value); 5003 break; 5004 case I40E_DEV_ID_1G_BASE_T_BC: 5005 case I40E_DEV_ID_5G_BASE_T_BC: 5006 case I40E_DEV_ID_10G_BASE_T: 5007 case I40E_DEV_ID_10G_BASE_T4: 5008 case I40E_DEV_ID_10G_BASE_T_BC: 5009 case I40E_DEV_ID_10G_BASE_T_X722: 5010 case I40E_DEV_ID_25G_B: 5011 case I40E_DEV_ID_25G_SFP28: 5012 status = i40e_write_phy_register_clause45(hw, page, reg, 5013 phy_addr, value); 5014 break; 5015 default: 5016 status = I40E_ERR_UNKNOWN_PHY; 5017 break; 5018 } 5019 5020 return status; 5021 } 5022 5023 /** 5024 * i40e_read_phy_register 5025 * @hw: pointer to the HW structure 5026 * @page: registers page number 5027 * @reg: register address in the page 5028 * @phy_addr: PHY address on MDIO interface 5029 * @value: PHY register value 5030 * 5031 * Reads specified PHY register value 5032 **/ 5033 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 5034 u8 page, u16 reg, u8 phy_addr, u16 *value) 5035 { 5036 i40e_status status; 5037 5038 switch (hw->device_id) { 5039 case I40E_DEV_ID_1G_BASE_T_X722: 5040 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 5041 value); 5042 break; 5043 case I40E_DEV_ID_1G_BASE_T_BC: 5044 case I40E_DEV_ID_5G_BASE_T_BC: 5045 case I40E_DEV_ID_10G_BASE_T: 5046 case I40E_DEV_ID_10G_BASE_T4: 5047 case I40E_DEV_ID_10G_BASE_T_BC: 5048 case I40E_DEV_ID_10G_BASE_T_X722: 5049 case I40E_DEV_ID_25G_B: 5050 case I40E_DEV_ID_25G_SFP28: 5051 status = i40e_read_phy_register_clause45(hw, page, reg, 5052 phy_addr, value); 5053 break; 5054 default: 5055 status = I40E_ERR_UNKNOWN_PHY; 5056 break; 5057 } 5058 5059 return status; 5060 } 5061 5062 /** 5063 * i40e_get_phy_address 5064 * @hw: pointer to the HW structure 5065 * @dev_num: PHY port num that address we want 5066 * 5067 * Gets PHY address for current port 5068 **/ 5069 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 5070 { 5071 u8 port_num = hw->func_caps.mdio_port_num; 5072 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 5073 5074 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 5075 } 5076 5077 /** 5078 * i40e_blink_phy_link_led 5079 * @hw: pointer to the HW structure 5080 * @time: time how long led will blinks in secs 5081 * @interval: gap between LED on and off in msecs 5082 * 5083 * Blinks PHY link LED 5084 **/ 5085 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 5086 u32 time, u32 interval) 5087 { 5088 i40e_status status = 0; 5089 u32 i; 5090 u16 led_ctl; 5091 u16 gpio_led_port; 5092 u16 led_reg; 5093 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 5094 u8 phy_addr = 0; 5095 u8 port_num; 5096 5097 i = rd32(hw, I40E_PFGEN_PORTNUM); 5098 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5099 phy_addr = i40e_get_phy_address(hw, port_num); 5100 5101 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5102 led_addr++) { 5103 status = i40e_read_phy_register_clause45(hw, 5104 I40E_PHY_COM_REG_PAGE, 5105 led_addr, phy_addr, 5106 &led_reg); 5107 if (status) 5108 goto phy_blinking_end; 5109 led_ctl = led_reg; 5110 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5111 led_reg = 0; 5112 status = i40e_write_phy_register_clause45(hw, 5113 I40E_PHY_COM_REG_PAGE, 5114 led_addr, phy_addr, 5115 led_reg); 5116 if (status) 5117 goto phy_blinking_end; 5118 break; 5119 } 5120 } 5121 5122 if (time > 0 && interval > 0) { 5123 for (i = 0; i < time * 1000; i += interval) { 5124 status = i40e_read_phy_register_clause45(hw, 5125 I40E_PHY_COM_REG_PAGE, 5126 led_addr, phy_addr, &led_reg); 5127 if (status) 5128 goto restore_config; 5129 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5130 led_reg = 0; 5131 else 5132 led_reg = I40E_PHY_LED_MANUAL_ON; 5133 status = i40e_write_phy_register_clause45(hw, 5134 I40E_PHY_COM_REG_PAGE, 5135 led_addr, phy_addr, led_reg); 5136 if (status) 5137 goto restore_config; 5138 msleep(interval); 5139 } 5140 } 5141 5142 restore_config: 5143 status = i40e_write_phy_register_clause45(hw, 5144 I40E_PHY_COM_REG_PAGE, 5145 led_addr, phy_addr, led_ctl); 5146 5147 phy_blinking_end: 5148 return status; 5149 } 5150 5151 /** 5152 * i40e_led_get_reg - read LED register 5153 * @hw: pointer to the HW structure 5154 * @led_addr: LED register address 5155 * @reg_val: read register value 5156 **/ 5157 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5158 u32 *reg_val) 5159 { 5160 enum i40e_status_code status; 5161 u8 phy_addr = 0; 5162 u8 port_num; 5163 u32 i; 5164 5165 *reg_val = 0; 5166 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5167 status = 5168 i40e_aq_get_phy_register(hw, 5169 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5170 I40E_PHY_COM_REG_PAGE, true, 5171 I40E_PHY_LED_PROV_REG_1, 5172 reg_val, NULL); 5173 } else { 5174 i = rd32(hw, I40E_PFGEN_PORTNUM); 5175 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5176 phy_addr = i40e_get_phy_address(hw, port_num); 5177 status = i40e_read_phy_register_clause45(hw, 5178 I40E_PHY_COM_REG_PAGE, 5179 led_addr, phy_addr, 5180 (u16 *)reg_val); 5181 } 5182 return status; 5183 } 5184 5185 /** 5186 * i40e_led_set_reg - write LED register 5187 * @hw: pointer to the HW structure 5188 * @led_addr: LED register address 5189 * @reg_val: register value to write 5190 **/ 5191 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5192 u32 reg_val) 5193 { 5194 enum i40e_status_code status; 5195 u8 phy_addr = 0; 5196 u8 port_num; 5197 u32 i; 5198 5199 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5200 status = 5201 i40e_aq_set_phy_register(hw, 5202 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5203 I40E_PHY_COM_REG_PAGE, true, 5204 I40E_PHY_LED_PROV_REG_1, 5205 reg_val, NULL); 5206 } else { 5207 i = rd32(hw, I40E_PFGEN_PORTNUM); 5208 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5209 phy_addr = i40e_get_phy_address(hw, port_num); 5210 status = i40e_write_phy_register_clause45(hw, 5211 I40E_PHY_COM_REG_PAGE, 5212 led_addr, phy_addr, 5213 (u16)reg_val); 5214 } 5215 5216 return status; 5217 } 5218 5219 /** 5220 * i40e_led_get_phy - return current on/off mode 5221 * @hw: pointer to the hw struct 5222 * @led_addr: address of led register to use 5223 * @val: original value of register to use 5224 * 5225 **/ 5226 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5227 u16 *val) 5228 { 5229 i40e_status status = 0; 5230 u16 gpio_led_port; 5231 u8 phy_addr = 0; 5232 u16 reg_val; 5233 u16 temp_addr; 5234 u8 port_num; 5235 u32 i; 5236 u32 reg_val_aq; 5237 5238 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5239 status = 5240 i40e_aq_get_phy_register(hw, 5241 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5242 I40E_PHY_COM_REG_PAGE, true, 5243 I40E_PHY_LED_PROV_REG_1, 5244 ®_val_aq, NULL); 5245 if (status == I40E_SUCCESS) 5246 *val = (u16)reg_val_aq; 5247 return status; 5248 } 5249 temp_addr = I40E_PHY_LED_PROV_REG_1; 5250 i = rd32(hw, I40E_PFGEN_PORTNUM); 5251 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5252 phy_addr = i40e_get_phy_address(hw, port_num); 5253 5254 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5255 temp_addr++) { 5256 status = i40e_read_phy_register_clause45(hw, 5257 I40E_PHY_COM_REG_PAGE, 5258 temp_addr, phy_addr, 5259 ®_val); 5260 if (status) 5261 return status; 5262 *val = reg_val; 5263 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5264 *led_addr = temp_addr; 5265 break; 5266 } 5267 } 5268 return status; 5269 } 5270 5271 /** 5272 * i40e_led_set_phy 5273 * @hw: pointer to the HW structure 5274 * @on: true or false 5275 * @led_addr: address of led register to use 5276 * @mode: original val plus bit for set or ignore 5277 * 5278 * Set led's on or off when controlled by the PHY 5279 * 5280 **/ 5281 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5282 u16 led_addr, u32 mode) 5283 { 5284 i40e_status status = 0; 5285 u32 led_ctl = 0; 5286 u32 led_reg = 0; 5287 5288 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5289 if (status) 5290 return status; 5291 led_ctl = led_reg; 5292 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5293 led_reg = 0; 5294 status = i40e_led_set_reg(hw, led_addr, led_reg); 5295 if (status) 5296 return status; 5297 } 5298 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5299 if (status) 5300 goto restore_config; 5301 if (on) 5302 led_reg = I40E_PHY_LED_MANUAL_ON; 5303 else 5304 led_reg = 0; 5305 5306 status = i40e_led_set_reg(hw, led_addr, led_reg); 5307 if (status) 5308 goto restore_config; 5309 if (mode & I40E_PHY_LED_MODE_ORIG) { 5310 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5311 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5312 } 5313 return status; 5314 5315 restore_config: 5316 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5317 return status; 5318 } 5319 5320 /** 5321 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5322 * @hw: pointer to the hw struct 5323 * @reg_addr: register address 5324 * @reg_val: ptr to register value 5325 * @cmd_details: pointer to command details structure or NULL 5326 * 5327 * Use the firmware to read the Rx control register, 5328 * especially useful if the Rx unit is under heavy pressure 5329 **/ 5330 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5331 u32 reg_addr, u32 *reg_val, 5332 struct i40e_asq_cmd_details *cmd_details) 5333 { 5334 struct i40e_aq_desc desc; 5335 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5336 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5337 i40e_status status; 5338 5339 if (!reg_val) 5340 return I40E_ERR_PARAM; 5341 5342 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5343 5344 cmd_resp->address = cpu_to_le32(reg_addr); 5345 5346 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5347 5348 if (status == 0) 5349 *reg_val = le32_to_cpu(cmd_resp->value); 5350 5351 return status; 5352 } 5353 5354 /** 5355 * i40e_read_rx_ctl - read from an Rx control register 5356 * @hw: pointer to the hw struct 5357 * @reg_addr: register address 5358 **/ 5359 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5360 { 5361 i40e_status status = 0; 5362 bool use_register; 5363 int retry = 5; 5364 u32 val = 0; 5365 5366 use_register = (((hw->aq.api_maj_ver == 1) && 5367 (hw->aq.api_min_ver < 5)) || 5368 (hw->mac.type == I40E_MAC_X722)); 5369 if (!use_register) { 5370 do_retry: 5371 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5372 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5373 usleep_range(1000, 2000); 5374 retry--; 5375 goto do_retry; 5376 } 5377 } 5378 5379 /* if the AQ access failed, try the old-fashioned way */ 5380 if (status || use_register) 5381 val = rd32(hw, reg_addr); 5382 5383 return val; 5384 } 5385 5386 /** 5387 * i40e_aq_rx_ctl_write_register 5388 * @hw: pointer to the hw struct 5389 * @reg_addr: register address 5390 * @reg_val: register value 5391 * @cmd_details: pointer to command details structure or NULL 5392 * 5393 * Use the firmware to write to an Rx control register, 5394 * especially useful if the Rx unit is under heavy pressure 5395 **/ 5396 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5397 u32 reg_addr, u32 reg_val, 5398 struct i40e_asq_cmd_details *cmd_details) 5399 { 5400 struct i40e_aq_desc desc; 5401 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5402 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5403 i40e_status status; 5404 5405 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5406 5407 cmd->address = cpu_to_le32(reg_addr); 5408 cmd->value = cpu_to_le32(reg_val); 5409 5410 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5411 5412 return status; 5413 } 5414 5415 /** 5416 * i40e_write_rx_ctl - write to an Rx control register 5417 * @hw: pointer to the hw struct 5418 * @reg_addr: register address 5419 * @reg_val: register value 5420 **/ 5421 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5422 { 5423 i40e_status status = 0; 5424 bool use_register; 5425 int retry = 5; 5426 5427 use_register = (((hw->aq.api_maj_ver == 1) && 5428 (hw->aq.api_min_ver < 5)) || 5429 (hw->mac.type == I40E_MAC_X722)); 5430 if (!use_register) { 5431 do_retry: 5432 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5433 reg_val, NULL); 5434 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5435 usleep_range(1000, 2000); 5436 retry--; 5437 goto do_retry; 5438 } 5439 } 5440 5441 /* if the AQ access failed, try the old-fashioned way */ 5442 if (status || use_register) 5443 wr32(hw, reg_addr, reg_val); 5444 } 5445 5446 /** 5447 * i40e_mdio_if_number_selection - MDIO I/F number selection 5448 * @hw: pointer to the hw struct 5449 * @set_mdio: use MDIO I/F number specified by mdio_num 5450 * @mdio_num: MDIO I/F number 5451 * @cmd: pointer to PHY Register command structure 5452 **/ 5453 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5454 u8 mdio_num, 5455 struct i40e_aqc_phy_register_access *cmd) 5456 { 5457 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5458 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5459 cmd->cmd_flags |= 5460 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5461 ((mdio_num << 5462 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5463 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5464 else 5465 i40e_debug(hw, I40E_DEBUG_PHY, 5466 "MDIO I/F number selection not supported by current FW version.\n"); 5467 } 5468 } 5469 5470 /** 5471 * i40e_aq_set_phy_register_ext 5472 * @hw: pointer to the hw struct 5473 * @phy_select: select which phy should be accessed 5474 * @dev_addr: PHY device address 5475 * @page_change: flag to indicate if phy page should be updated 5476 * @set_mdio: use MDIO I/F number specified by mdio_num 5477 * @mdio_num: MDIO I/F number 5478 * @reg_addr: PHY register address 5479 * @reg_val: new register value 5480 * @cmd_details: pointer to command details structure or NULL 5481 * 5482 * Write the external PHY register. 5483 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5484 * may use simple wrapper i40e_aq_set_phy_register. 5485 **/ 5486 enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5487 u8 phy_select, u8 dev_addr, bool page_change, 5488 bool set_mdio, u8 mdio_num, 5489 u32 reg_addr, u32 reg_val, 5490 struct i40e_asq_cmd_details *cmd_details) 5491 { 5492 struct i40e_aq_desc desc; 5493 struct i40e_aqc_phy_register_access *cmd = 5494 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5495 i40e_status status; 5496 5497 i40e_fill_default_direct_cmd_desc(&desc, 5498 i40e_aqc_opc_set_phy_register); 5499 5500 cmd->phy_interface = phy_select; 5501 cmd->dev_address = dev_addr; 5502 cmd->reg_address = cpu_to_le32(reg_addr); 5503 cmd->reg_value = cpu_to_le32(reg_val); 5504 5505 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5506 5507 if (!page_change) 5508 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5509 5510 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5511 5512 return status; 5513 } 5514 5515 /** 5516 * i40e_aq_get_phy_register_ext 5517 * @hw: pointer to the hw struct 5518 * @phy_select: select which phy should be accessed 5519 * @dev_addr: PHY device address 5520 * @page_change: flag to indicate if phy page should be updated 5521 * @set_mdio: use MDIO I/F number specified by mdio_num 5522 * @mdio_num: MDIO I/F number 5523 * @reg_addr: PHY register address 5524 * @reg_val: read register value 5525 * @cmd_details: pointer to command details structure or NULL 5526 * 5527 * Read the external PHY register. 5528 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5529 * may use simple wrapper i40e_aq_get_phy_register. 5530 **/ 5531 enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5532 u8 phy_select, u8 dev_addr, bool page_change, 5533 bool set_mdio, u8 mdio_num, 5534 u32 reg_addr, u32 *reg_val, 5535 struct i40e_asq_cmd_details *cmd_details) 5536 { 5537 struct i40e_aq_desc desc; 5538 struct i40e_aqc_phy_register_access *cmd = 5539 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5540 i40e_status status; 5541 5542 i40e_fill_default_direct_cmd_desc(&desc, 5543 i40e_aqc_opc_get_phy_register); 5544 5545 cmd->phy_interface = phy_select; 5546 cmd->dev_address = dev_addr; 5547 cmd->reg_address = cpu_to_le32(reg_addr); 5548 5549 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5550 5551 if (!page_change) 5552 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5553 5554 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5555 if (!status) 5556 *reg_val = le32_to_cpu(cmd->reg_value); 5557 5558 return status; 5559 } 5560 5561 /** 5562 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5563 * @hw: pointer to the hw struct 5564 * @buff: command buffer (size in bytes = buff_size) 5565 * @buff_size: buffer size in bytes 5566 * @track_id: package tracking id 5567 * @error_offset: returns error offset 5568 * @error_info: returns error information 5569 * @cmd_details: pointer to command details structure or NULL 5570 **/ 5571 enum 5572 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5573 u16 buff_size, u32 track_id, 5574 u32 *error_offset, u32 *error_info, 5575 struct i40e_asq_cmd_details *cmd_details) 5576 { 5577 struct i40e_aq_desc desc; 5578 struct i40e_aqc_write_personalization_profile *cmd = 5579 (struct i40e_aqc_write_personalization_profile *) 5580 &desc.params.raw; 5581 struct i40e_aqc_write_ddp_resp *resp; 5582 i40e_status status; 5583 5584 i40e_fill_default_direct_cmd_desc(&desc, 5585 i40e_aqc_opc_write_personalization_profile); 5586 5587 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5588 if (buff_size > I40E_AQ_LARGE_BUF) 5589 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5590 5591 desc.datalen = cpu_to_le16(buff_size); 5592 5593 cmd->profile_track_id = cpu_to_le32(track_id); 5594 5595 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5596 if (!status) { 5597 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5598 if (error_offset) 5599 *error_offset = le32_to_cpu(resp->error_offset); 5600 if (error_info) 5601 *error_info = le32_to_cpu(resp->error_info); 5602 } 5603 5604 return status; 5605 } 5606 5607 /** 5608 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5609 * @hw: pointer to the hw struct 5610 * @buff: command buffer (size in bytes = buff_size) 5611 * @buff_size: buffer size in bytes 5612 * @flags: AdminQ command flags 5613 * @cmd_details: pointer to command details structure or NULL 5614 **/ 5615 enum 5616 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5617 u16 buff_size, u8 flags, 5618 struct i40e_asq_cmd_details *cmd_details) 5619 { 5620 struct i40e_aq_desc desc; 5621 struct i40e_aqc_get_applied_profiles *cmd = 5622 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5623 i40e_status status; 5624 5625 i40e_fill_default_direct_cmd_desc(&desc, 5626 i40e_aqc_opc_get_personalization_profile_list); 5627 5628 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5629 if (buff_size > I40E_AQ_LARGE_BUF) 5630 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5631 desc.datalen = cpu_to_le16(buff_size); 5632 5633 cmd->flags = flags; 5634 5635 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5636 5637 return status; 5638 } 5639 5640 /** 5641 * i40e_find_segment_in_package 5642 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5643 * @pkg_hdr: pointer to the package header to be searched 5644 * 5645 * This function searches a package file for a particular segment type. On 5646 * success it returns a pointer to the segment header, otherwise it will 5647 * return NULL. 5648 **/ 5649 struct i40e_generic_seg_header * 5650 i40e_find_segment_in_package(u32 segment_type, 5651 struct i40e_package_header *pkg_hdr) 5652 { 5653 struct i40e_generic_seg_header *segment; 5654 u32 i; 5655 5656 /* Search all package segments for the requested segment type */ 5657 for (i = 0; i < pkg_hdr->segment_count; i++) { 5658 segment = 5659 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5660 pkg_hdr->segment_offset[i]); 5661 5662 if (segment->type == segment_type) 5663 return segment; 5664 } 5665 5666 return NULL; 5667 } 5668 5669 /* Get section table in profile */ 5670 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5671 do { \ 5672 struct i40e_profile_segment *p = (profile); \ 5673 u32 count; \ 5674 u32 *nvm; \ 5675 count = p->device_table_count; \ 5676 nvm = (u32 *)&p->device_table[count]; \ 5677 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5678 } while (0) 5679 5680 /* Get section header in profile */ 5681 #define I40E_SECTION_HEADER(profile, offset) \ 5682 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5683 5684 /** 5685 * i40e_find_section_in_profile 5686 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5687 * @profile: pointer to the i40e segment header to be searched 5688 * 5689 * This function searches i40e segment for a particular section type. On 5690 * success it returns a pointer to the section header, otherwise it will 5691 * return NULL. 5692 **/ 5693 struct i40e_profile_section_header * 5694 i40e_find_section_in_profile(u32 section_type, 5695 struct i40e_profile_segment *profile) 5696 { 5697 struct i40e_profile_section_header *sec; 5698 struct i40e_section_table *sec_tbl; 5699 u32 sec_off; 5700 u32 i; 5701 5702 if (profile->header.type != SEGMENT_TYPE_I40E) 5703 return NULL; 5704 5705 I40E_SECTION_TABLE(profile, sec_tbl); 5706 5707 for (i = 0; i < sec_tbl->section_count; i++) { 5708 sec_off = sec_tbl->section_offset[i]; 5709 sec = I40E_SECTION_HEADER(profile, sec_off); 5710 if (sec->section.type == section_type) 5711 return sec; 5712 } 5713 5714 return NULL; 5715 } 5716 5717 /** 5718 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5719 * @hw: pointer to the hw struct 5720 * @aq: command buffer containing all data to execute AQ 5721 **/ 5722 static enum 5723 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5724 struct i40e_profile_aq_section *aq) 5725 { 5726 i40e_status status; 5727 struct i40e_aq_desc desc; 5728 u8 *msg = NULL; 5729 u16 msglen; 5730 5731 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5732 desc.flags |= cpu_to_le16(aq->flags); 5733 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5734 5735 msglen = aq->datalen; 5736 if (msglen) { 5737 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5738 I40E_AQ_FLAG_RD)); 5739 if (msglen > I40E_AQ_LARGE_BUF) 5740 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5741 desc.datalen = cpu_to_le16(msglen); 5742 msg = &aq->data[0]; 5743 } 5744 5745 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5746 5747 if (status) { 5748 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5749 "unable to exec DDP AQ opcode %u, error %d\n", 5750 aq->opcode, status); 5751 return status; 5752 } 5753 5754 /* copy returned desc to aq_buf */ 5755 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5756 5757 return 0; 5758 } 5759 5760 /** 5761 * i40e_validate_profile 5762 * @hw: pointer to the hardware structure 5763 * @profile: pointer to the profile segment of the package to be validated 5764 * @track_id: package tracking id 5765 * @rollback: flag if the profile is for rollback. 5766 * 5767 * Validates supported devices and profile's sections. 5768 */ 5769 static enum i40e_status_code 5770 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5771 u32 track_id, bool rollback) 5772 { 5773 struct i40e_profile_section_header *sec = NULL; 5774 i40e_status status = 0; 5775 struct i40e_section_table *sec_tbl; 5776 u32 vendor_dev_id; 5777 u32 dev_cnt; 5778 u32 sec_off; 5779 u32 i; 5780 5781 if (track_id == I40E_DDP_TRACKID_INVALID) { 5782 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5783 return I40E_NOT_SUPPORTED; 5784 } 5785 5786 dev_cnt = profile->device_table_count; 5787 for (i = 0; i < dev_cnt; i++) { 5788 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5789 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5790 hw->device_id == (vendor_dev_id & 0xFFFF)) 5791 break; 5792 } 5793 if (dev_cnt && i == dev_cnt) { 5794 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5795 "Device doesn't support DDP\n"); 5796 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5797 } 5798 5799 I40E_SECTION_TABLE(profile, sec_tbl); 5800 5801 /* Validate sections types */ 5802 for (i = 0; i < sec_tbl->section_count; i++) { 5803 sec_off = sec_tbl->section_offset[i]; 5804 sec = I40E_SECTION_HEADER(profile, sec_off); 5805 if (rollback) { 5806 if (sec->section.type == SECTION_TYPE_MMIO || 5807 sec->section.type == SECTION_TYPE_AQ || 5808 sec->section.type == SECTION_TYPE_RB_AQ) { 5809 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5810 "Not a roll-back package\n"); 5811 return I40E_NOT_SUPPORTED; 5812 } 5813 } else { 5814 if (sec->section.type == SECTION_TYPE_RB_AQ || 5815 sec->section.type == SECTION_TYPE_RB_MMIO) { 5816 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5817 "Not an original package\n"); 5818 return I40E_NOT_SUPPORTED; 5819 } 5820 } 5821 } 5822 5823 return status; 5824 } 5825 5826 /** 5827 * i40e_write_profile 5828 * @hw: pointer to the hardware structure 5829 * @profile: pointer to the profile segment of the package to be downloaded 5830 * @track_id: package tracking id 5831 * 5832 * Handles the download of a complete package. 5833 */ 5834 enum i40e_status_code 5835 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5836 u32 track_id) 5837 { 5838 i40e_status status = 0; 5839 struct i40e_section_table *sec_tbl; 5840 struct i40e_profile_section_header *sec = NULL; 5841 struct i40e_profile_aq_section *ddp_aq; 5842 u32 section_size = 0; 5843 u32 offset = 0, info = 0; 5844 u32 sec_off; 5845 u32 i; 5846 5847 status = i40e_validate_profile(hw, profile, track_id, false); 5848 if (status) 5849 return status; 5850 5851 I40E_SECTION_TABLE(profile, sec_tbl); 5852 5853 for (i = 0; i < sec_tbl->section_count; i++) { 5854 sec_off = sec_tbl->section_offset[i]; 5855 sec = I40E_SECTION_HEADER(profile, sec_off); 5856 /* Process generic admin command */ 5857 if (sec->section.type == SECTION_TYPE_AQ) { 5858 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5859 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5860 if (status) { 5861 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5862 "Failed to execute aq: section %d, opcode %u\n", 5863 i, ddp_aq->opcode); 5864 break; 5865 } 5866 sec->section.type = SECTION_TYPE_RB_AQ; 5867 } 5868 5869 /* Skip any non-mmio sections */ 5870 if (sec->section.type != SECTION_TYPE_MMIO) 5871 continue; 5872 5873 section_size = sec->section.size + 5874 sizeof(struct i40e_profile_section_header); 5875 5876 /* Write MMIO section */ 5877 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5878 track_id, &offset, &info, NULL); 5879 if (status) { 5880 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5881 "Failed to write profile: section %d, offset %d, info %d\n", 5882 i, offset, info); 5883 break; 5884 } 5885 } 5886 return status; 5887 } 5888 5889 /** 5890 * i40e_rollback_profile 5891 * @hw: pointer to the hardware structure 5892 * @profile: pointer to the profile segment of the package to be removed 5893 * @track_id: package tracking id 5894 * 5895 * Rolls back previously loaded package. 5896 */ 5897 enum i40e_status_code 5898 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5899 u32 track_id) 5900 { 5901 struct i40e_profile_section_header *sec = NULL; 5902 i40e_status status = 0; 5903 struct i40e_section_table *sec_tbl; 5904 u32 offset = 0, info = 0; 5905 u32 section_size = 0; 5906 u32 sec_off; 5907 int i; 5908 5909 status = i40e_validate_profile(hw, profile, track_id, true); 5910 if (status) 5911 return status; 5912 5913 I40E_SECTION_TABLE(profile, sec_tbl); 5914 5915 /* For rollback write sections in reverse */ 5916 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5917 sec_off = sec_tbl->section_offset[i]; 5918 sec = I40E_SECTION_HEADER(profile, sec_off); 5919 5920 /* Skip any non-rollback sections */ 5921 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5922 continue; 5923 5924 section_size = sec->section.size + 5925 sizeof(struct i40e_profile_section_header); 5926 5927 /* Write roll-back MMIO section */ 5928 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5929 track_id, &offset, &info, NULL); 5930 if (status) { 5931 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5932 "Failed to write profile: section %d, offset %d, info %d\n", 5933 i, offset, info); 5934 break; 5935 } 5936 } 5937 return status; 5938 } 5939 5940 /** 5941 * i40e_add_pinfo_to_list 5942 * @hw: pointer to the hardware structure 5943 * @profile: pointer to the profile segment of the package 5944 * @profile_info_sec: buffer for information section 5945 * @track_id: package tracking id 5946 * 5947 * Register a profile to the list of loaded profiles. 5948 */ 5949 enum i40e_status_code 5950 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5951 struct i40e_profile_segment *profile, 5952 u8 *profile_info_sec, u32 track_id) 5953 { 5954 i40e_status status = 0; 5955 struct i40e_profile_section_header *sec = NULL; 5956 struct i40e_profile_info *pinfo; 5957 u32 offset = 0, info = 0; 5958 5959 sec = (struct i40e_profile_section_header *)profile_info_sec; 5960 sec->tbl_size = 1; 5961 sec->data_end = sizeof(struct i40e_profile_section_header) + 5962 sizeof(struct i40e_profile_info); 5963 sec->section.type = SECTION_TYPE_INFO; 5964 sec->section.offset = sizeof(struct i40e_profile_section_header); 5965 sec->section.size = sizeof(struct i40e_profile_info); 5966 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5967 sec->section.offset); 5968 pinfo->track_id = track_id; 5969 pinfo->version = profile->version; 5970 pinfo->op = I40E_DDP_ADD_TRACKID; 5971 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5972 5973 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5974 track_id, &offset, &info, NULL); 5975 5976 return status; 5977 } 5978 5979 /** 5980 * i40e_aq_add_cloud_filters 5981 * @hw: pointer to the hardware structure 5982 * @seid: VSI seid to add cloud filters from 5983 * @filters: Buffer which contains the filters to be added 5984 * @filter_count: number of filters contained in the buffer 5985 * 5986 * Set the cloud filters for a given VSI. The contents of the 5987 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5988 * of the function. 5989 * 5990 **/ 5991 enum i40e_status_code 5992 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5993 struct i40e_aqc_cloud_filters_element_data *filters, 5994 u8 filter_count) 5995 { 5996 struct i40e_aq_desc desc; 5997 struct i40e_aqc_add_remove_cloud_filters *cmd = 5998 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5999 enum i40e_status_code status; 6000 u16 buff_len; 6001 6002 i40e_fill_default_direct_cmd_desc(&desc, 6003 i40e_aqc_opc_add_cloud_filters); 6004 6005 buff_len = filter_count * sizeof(*filters); 6006 desc.datalen = cpu_to_le16(buff_len); 6007 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6008 cmd->num_filters = filter_count; 6009 cmd->seid = cpu_to_le16(seid); 6010 6011 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6012 6013 return status; 6014 } 6015 6016 /** 6017 * i40e_aq_add_cloud_filters_bb 6018 * @hw: pointer to the hardware structure 6019 * @seid: VSI seid to add cloud filters from 6020 * @filters: Buffer which contains the filters in big buffer to be added 6021 * @filter_count: number of filters contained in the buffer 6022 * 6023 * Set the big buffer cloud filters for a given VSI. The contents of the 6024 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6025 * function. 6026 * 6027 **/ 6028 enum i40e_status_code 6029 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6030 struct i40e_aqc_cloud_filters_element_bb *filters, 6031 u8 filter_count) 6032 { 6033 struct i40e_aq_desc desc; 6034 struct i40e_aqc_add_remove_cloud_filters *cmd = 6035 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6036 i40e_status status; 6037 u16 buff_len; 6038 int i; 6039 6040 i40e_fill_default_direct_cmd_desc(&desc, 6041 i40e_aqc_opc_add_cloud_filters); 6042 6043 buff_len = filter_count * sizeof(*filters); 6044 desc.datalen = cpu_to_le16(buff_len); 6045 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6046 cmd->num_filters = filter_count; 6047 cmd->seid = cpu_to_le16(seid); 6048 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6049 6050 for (i = 0; i < filter_count; i++) { 6051 u16 tnl_type; 6052 u32 ti; 6053 6054 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6055 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6056 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6057 6058 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6059 * one more byte further than normally used for Tenant ID in 6060 * other tunnel types. 6061 */ 6062 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6063 ti = le32_to_cpu(filters[i].element.tenant_id); 6064 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6065 } 6066 } 6067 6068 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6069 6070 return status; 6071 } 6072 6073 /** 6074 * i40e_aq_rem_cloud_filters 6075 * @hw: pointer to the hardware structure 6076 * @seid: VSI seid to remove cloud filters from 6077 * @filters: Buffer which contains the filters to be removed 6078 * @filter_count: number of filters contained in the buffer 6079 * 6080 * Remove the cloud filters for a given VSI. The contents of the 6081 * i40e_aqc_cloud_filters_element_data are filled in by the caller 6082 * of the function. 6083 * 6084 **/ 6085 enum i40e_status_code 6086 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 6087 struct i40e_aqc_cloud_filters_element_data *filters, 6088 u8 filter_count) 6089 { 6090 struct i40e_aq_desc desc; 6091 struct i40e_aqc_add_remove_cloud_filters *cmd = 6092 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6093 enum i40e_status_code status; 6094 u16 buff_len; 6095 6096 i40e_fill_default_direct_cmd_desc(&desc, 6097 i40e_aqc_opc_remove_cloud_filters); 6098 6099 buff_len = filter_count * sizeof(*filters); 6100 desc.datalen = cpu_to_le16(buff_len); 6101 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6102 cmd->num_filters = filter_count; 6103 cmd->seid = cpu_to_le16(seid); 6104 6105 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6106 6107 return status; 6108 } 6109 6110 /** 6111 * i40e_aq_rem_cloud_filters_bb 6112 * @hw: pointer to the hardware structure 6113 * @seid: VSI seid to remove cloud filters from 6114 * @filters: Buffer which contains the filters in big buffer to be removed 6115 * @filter_count: number of filters contained in the buffer 6116 * 6117 * Remove the big buffer cloud filters for a given VSI. The contents of the 6118 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6119 * function. 6120 * 6121 **/ 6122 enum i40e_status_code 6123 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6124 struct i40e_aqc_cloud_filters_element_bb *filters, 6125 u8 filter_count) 6126 { 6127 struct i40e_aq_desc desc; 6128 struct i40e_aqc_add_remove_cloud_filters *cmd = 6129 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6130 i40e_status status; 6131 u16 buff_len; 6132 int i; 6133 6134 i40e_fill_default_direct_cmd_desc(&desc, 6135 i40e_aqc_opc_remove_cloud_filters); 6136 6137 buff_len = filter_count * sizeof(*filters); 6138 desc.datalen = cpu_to_le16(buff_len); 6139 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6140 cmd->num_filters = filter_count; 6141 cmd->seid = cpu_to_le16(seid); 6142 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6143 6144 for (i = 0; i < filter_count; i++) { 6145 u16 tnl_type; 6146 u32 ti; 6147 6148 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6149 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6150 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6151 6152 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6153 * one more byte further than normally used for Tenant ID in 6154 * other tunnel types. 6155 */ 6156 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6157 ti = le32_to_cpu(filters[i].element.tenant_id); 6158 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6159 } 6160 } 6161 6162 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6163 6164 return status; 6165 } 6166