1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_type.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 #include <linux/avf/virtchnl.h> 9 10 /** 11 * i40e_set_mac_type - Sets MAC type 12 * @hw: pointer to the HW structure 13 * 14 * This function sets the mac type of the adapter based on the 15 * vendor ID and device ID stored in the hw structure. 16 **/ 17 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 18 { 19 i40e_status status = 0; 20 21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 22 switch (hw->device_id) { 23 case I40E_DEV_ID_SFP_XL710: 24 case I40E_DEV_ID_QEMU: 25 case I40E_DEV_ID_KX_B: 26 case I40E_DEV_ID_KX_C: 27 case I40E_DEV_ID_QSFP_A: 28 case I40E_DEV_ID_QSFP_B: 29 case I40E_DEV_ID_QSFP_C: 30 case I40E_DEV_ID_5G_BASE_T_BC: 31 case I40E_DEV_ID_10G_BASE_T: 32 case I40E_DEV_ID_10G_BASE_T4: 33 case I40E_DEV_ID_10G_BASE_T_BC: 34 case I40E_DEV_ID_10G_B: 35 case I40E_DEV_ID_10G_SFP: 36 case I40E_DEV_ID_20G_KR2: 37 case I40E_DEV_ID_20G_KR2_A: 38 case I40E_DEV_ID_25G_B: 39 case I40E_DEV_ID_25G_SFP28: 40 case I40E_DEV_ID_X710_N3000: 41 case I40E_DEV_ID_XXV710_N3000: 42 hw->mac.type = I40E_MAC_XL710; 43 break; 44 case I40E_DEV_ID_KX_X722: 45 case I40E_DEV_ID_QSFP_X722: 46 case I40E_DEV_ID_SFP_X722: 47 case I40E_DEV_ID_1G_BASE_T_X722: 48 case I40E_DEV_ID_10G_BASE_T_X722: 49 case I40E_DEV_ID_SFP_I_X722: 50 hw->mac.type = I40E_MAC_X722; 51 break; 52 default: 53 hw->mac.type = I40E_MAC_GENERIC; 54 break; 55 } 56 } else { 57 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 58 } 59 60 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 61 hw->mac.type, status); 62 return status; 63 } 64 65 /** 66 * i40e_aq_str - convert AQ err code to a string 67 * @hw: pointer to the HW structure 68 * @aq_err: the AQ error code to convert 69 **/ 70 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 71 { 72 switch (aq_err) { 73 case I40E_AQ_RC_OK: 74 return "OK"; 75 case I40E_AQ_RC_EPERM: 76 return "I40E_AQ_RC_EPERM"; 77 case I40E_AQ_RC_ENOENT: 78 return "I40E_AQ_RC_ENOENT"; 79 case I40E_AQ_RC_ESRCH: 80 return "I40E_AQ_RC_ESRCH"; 81 case I40E_AQ_RC_EINTR: 82 return "I40E_AQ_RC_EINTR"; 83 case I40E_AQ_RC_EIO: 84 return "I40E_AQ_RC_EIO"; 85 case I40E_AQ_RC_ENXIO: 86 return "I40E_AQ_RC_ENXIO"; 87 case I40E_AQ_RC_E2BIG: 88 return "I40E_AQ_RC_E2BIG"; 89 case I40E_AQ_RC_EAGAIN: 90 return "I40E_AQ_RC_EAGAIN"; 91 case I40E_AQ_RC_ENOMEM: 92 return "I40E_AQ_RC_ENOMEM"; 93 case I40E_AQ_RC_EACCES: 94 return "I40E_AQ_RC_EACCES"; 95 case I40E_AQ_RC_EFAULT: 96 return "I40E_AQ_RC_EFAULT"; 97 case I40E_AQ_RC_EBUSY: 98 return "I40E_AQ_RC_EBUSY"; 99 case I40E_AQ_RC_EEXIST: 100 return "I40E_AQ_RC_EEXIST"; 101 case I40E_AQ_RC_EINVAL: 102 return "I40E_AQ_RC_EINVAL"; 103 case I40E_AQ_RC_ENOTTY: 104 return "I40E_AQ_RC_ENOTTY"; 105 case I40E_AQ_RC_ENOSPC: 106 return "I40E_AQ_RC_ENOSPC"; 107 case I40E_AQ_RC_ENOSYS: 108 return "I40E_AQ_RC_ENOSYS"; 109 case I40E_AQ_RC_ERANGE: 110 return "I40E_AQ_RC_ERANGE"; 111 case I40E_AQ_RC_EFLUSHED: 112 return "I40E_AQ_RC_EFLUSHED"; 113 case I40E_AQ_RC_BAD_ADDR: 114 return "I40E_AQ_RC_BAD_ADDR"; 115 case I40E_AQ_RC_EMODE: 116 return "I40E_AQ_RC_EMODE"; 117 case I40E_AQ_RC_EFBIG: 118 return "I40E_AQ_RC_EFBIG"; 119 } 120 121 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 122 return hw->err_str; 123 } 124 125 /** 126 * i40e_stat_str - convert status err code to a string 127 * @hw: pointer to the HW structure 128 * @stat_err: the status error code to convert 129 **/ 130 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 131 { 132 switch (stat_err) { 133 case 0: 134 return "OK"; 135 case I40E_ERR_NVM: 136 return "I40E_ERR_NVM"; 137 case I40E_ERR_NVM_CHECKSUM: 138 return "I40E_ERR_NVM_CHECKSUM"; 139 case I40E_ERR_PHY: 140 return "I40E_ERR_PHY"; 141 case I40E_ERR_CONFIG: 142 return "I40E_ERR_CONFIG"; 143 case I40E_ERR_PARAM: 144 return "I40E_ERR_PARAM"; 145 case I40E_ERR_MAC_TYPE: 146 return "I40E_ERR_MAC_TYPE"; 147 case I40E_ERR_UNKNOWN_PHY: 148 return "I40E_ERR_UNKNOWN_PHY"; 149 case I40E_ERR_LINK_SETUP: 150 return "I40E_ERR_LINK_SETUP"; 151 case I40E_ERR_ADAPTER_STOPPED: 152 return "I40E_ERR_ADAPTER_STOPPED"; 153 case I40E_ERR_INVALID_MAC_ADDR: 154 return "I40E_ERR_INVALID_MAC_ADDR"; 155 case I40E_ERR_DEVICE_NOT_SUPPORTED: 156 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 157 case I40E_ERR_MASTER_REQUESTS_PENDING: 158 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 159 case I40E_ERR_INVALID_LINK_SETTINGS: 160 return "I40E_ERR_INVALID_LINK_SETTINGS"; 161 case I40E_ERR_AUTONEG_NOT_COMPLETE: 162 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 163 case I40E_ERR_RESET_FAILED: 164 return "I40E_ERR_RESET_FAILED"; 165 case I40E_ERR_SWFW_SYNC: 166 return "I40E_ERR_SWFW_SYNC"; 167 case I40E_ERR_NO_AVAILABLE_VSI: 168 return "I40E_ERR_NO_AVAILABLE_VSI"; 169 case I40E_ERR_NO_MEMORY: 170 return "I40E_ERR_NO_MEMORY"; 171 case I40E_ERR_BAD_PTR: 172 return "I40E_ERR_BAD_PTR"; 173 case I40E_ERR_RING_FULL: 174 return "I40E_ERR_RING_FULL"; 175 case I40E_ERR_INVALID_PD_ID: 176 return "I40E_ERR_INVALID_PD_ID"; 177 case I40E_ERR_INVALID_QP_ID: 178 return "I40E_ERR_INVALID_QP_ID"; 179 case I40E_ERR_INVALID_CQ_ID: 180 return "I40E_ERR_INVALID_CQ_ID"; 181 case I40E_ERR_INVALID_CEQ_ID: 182 return "I40E_ERR_INVALID_CEQ_ID"; 183 case I40E_ERR_INVALID_AEQ_ID: 184 return "I40E_ERR_INVALID_AEQ_ID"; 185 case I40E_ERR_INVALID_SIZE: 186 return "I40E_ERR_INVALID_SIZE"; 187 case I40E_ERR_INVALID_ARP_INDEX: 188 return "I40E_ERR_INVALID_ARP_INDEX"; 189 case I40E_ERR_INVALID_FPM_FUNC_ID: 190 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 191 case I40E_ERR_QP_INVALID_MSG_SIZE: 192 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 193 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 194 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 195 case I40E_ERR_INVALID_FRAG_COUNT: 196 return "I40E_ERR_INVALID_FRAG_COUNT"; 197 case I40E_ERR_QUEUE_EMPTY: 198 return "I40E_ERR_QUEUE_EMPTY"; 199 case I40E_ERR_INVALID_ALIGNMENT: 200 return "I40E_ERR_INVALID_ALIGNMENT"; 201 case I40E_ERR_FLUSHED_QUEUE: 202 return "I40E_ERR_FLUSHED_QUEUE"; 203 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 204 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 205 case I40E_ERR_INVALID_IMM_DATA_SIZE: 206 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 207 case I40E_ERR_TIMEOUT: 208 return "I40E_ERR_TIMEOUT"; 209 case I40E_ERR_OPCODE_MISMATCH: 210 return "I40E_ERR_OPCODE_MISMATCH"; 211 case I40E_ERR_CQP_COMPL_ERROR: 212 return "I40E_ERR_CQP_COMPL_ERROR"; 213 case I40E_ERR_INVALID_VF_ID: 214 return "I40E_ERR_INVALID_VF_ID"; 215 case I40E_ERR_INVALID_HMCFN_ID: 216 return "I40E_ERR_INVALID_HMCFN_ID"; 217 case I40E_ERR_BACKING_PAGE_ERROR: 218 return "I40E_ERR_BACKING_PAGE_ERROR"; 219 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 220 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 221 case I40E_ERR_INVALID_PBLE_INDEX: 222 return "I40E_ERR_INVALID_PBLE_INDEX"; 223 case I40E_ERR_INVALID_SD_INDEX: 224 return "I40E_ERR_INVALID_SD_INDEX"; 225 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 226 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 227 case I40E_ERR_INVALID_SD_TYPE: 228 return "I40E_ERR_INVALID_SD_TYPE"; 229 case I40E_ERR_MEMCPY_FAILED: 230 return "I40E_ERR_MEMCPY_FAILED"; 231 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 232 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 233 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 234 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 235 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 236 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 237 case I40E_ERR_SRQ_ENABLED: 238 return "I40E_ERR_SRQ_ENABLED"; 239 case I40E_ERR_ADMIN_QUEUE_ERROR: 240 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 241 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 242 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 243 case I40E_ERR_BUF_TOO_SHORT: 244 return "I40E_ERR_BUF_TOO_SHORT"; 245 case I40E_ERR_ADMIN_QUEUE_FULL: 246 return "I40E_ERR_ADMIN_QUEUE_FULL"; 247 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 248 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 249 case I40E_ERR_BAD_IWARP_CQE: 250 return "I40E_ERR_BAD_IWARP_CQE"; 251 case I40E_ERR_NVM_BLANK_MODE: 252 return "I40E_ERR_NVM_BLANK_MODE"; 253 case I40E_ERR_NOT_IMPLEMENTED: 254 return "I40E_ERR_NOT_IMPLEMENTED"; 255 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 256 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 257 case I40E_ERR_DIAG_TEST_FAILED: 258 return "I40E_ERR_DIAG_TEST_FAILED"; 259 case I40E_ERR_NOT_READY: 260 return "I40E_ERR_NOT_READY"; 261 case I40E_NOT_SUPPORTED: 262 return "I40E_NOT_SUPPORTED"; 263 case I40E_ERR_FIRMWARE_API_VERSION: 264 return "I40E_ERR_FIRMWARE_API_VERSION"; 265 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 266 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 267 } 268 269 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 270 return hw->err_str; 271 } 272 273 /** 274 * i40e_debug_aq 275 * @hw: debug mask related to admin queue 276 * @mask: debug mask 277 * @desc: pointer to admin queue descriptor 278 * @buffer: pointer to command buffer 279 * @buf_len: max length of buffer 280 * 281 * Dumps debug log about adminq command with descriptor contents. 282 **/ 283 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 284 void *buffer, u16 buf_len) 285 { 286 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 287 u32 effective_mask = hw->debug_mask & mask; 288 char prefix[27]; 289 u16 len; 290 u8 *buf = (u8 *)buffer; 291 292 if (!effective_mask || !desc) 293 return; 294 295 len = le16_to_cpu(aq_desc->datalen); 296 297 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 298 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 299 le16_to_cpu(aq_desc->opcode), 300 le16_to_cpu(aq_desc->flags), 301 le16_to_cpu(aq_desc->datalen), 302 le16_to_cpu(aq_desc->retval)); 303 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 304 "\tcookie (h,l) 0x%08X 0x%08X\n", 305 le32_to_cpu(aq_desc->cookie_high), 306 le32_to_cpu(aq_desc->cookie_low)); 307 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 308 "\tparam (0,1) 0x%08X 0x%08X\n", 309 le32_to_cpu(aq_desc->params.internal.param0), 310 le32_to_cpu(aq_desc->params.internal.param1)); 311 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 312 "\taddr (h,l) 0x%08X 0x%08X\n", 313 le32_to_cpu(aq_desc->params.external.addr_high), 314 le32_to_cpu(aq_desc->params.external.addr_low)); 315 316 if (buffer && buf_len != 0 && len != 0 && 317 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 318 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 319 if (buf_len < len) 320 len = buf_len; 321 322 snprintf(prefix, sizeof(prefix), 323 "i40e %02x:%02x.%x: \t0x", 324 hw->bus.bus_id, 325 hw->bus.device, 326 hw->bus.func); 327 328 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 329 16, 1, buf, len, false); 330 } 331 } 332 333 /** 334 * i40e_check_asq_alive 335 * @hw: pointer to the hw struct 336 * 337 * Returns true if Queue is enabled else false. 338 **/ 339 bool i40e_check_asq_alive(struct i40e_hw *hw) 340 { 341 if (hw->aq.asq.len) 342 return !!(rd32(hw, hw->aq.asq.len) & 343 I40E_PF_ATQLEN_ATQENABLE_MASK); 344 else 345 return false; 346 } 347 348 /** 349 * i40e_aq_queue_shutdown 350 * @hw: pointer to the hw struct 351 * @unloading: is the driver unloading itself 352 * 353 * Tell the Firmware that we're shutting down the AdminQ and whether 354 * or not the driver is unloading as well. 355 **/ 356 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 357 bool unloading) 358 { 359 struct i40e_aq_desc desc; 360 struct i40e_aqc_queue_shutdown *cmd = 361 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 362 i40e_status status; 363 364 i40e_fill_default_direct_cmd_desc(&desc, 365 i40e_aqc_opc_queue_shutdown); 366 367 if (unloading) 368 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 369 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 370 371 return status; 372 } 373 374 /** 375 * i40e_aq_get_set_rss_lut 376 * @hw: pointer to the hardware structure 377 * @vsi_id: vsi fw index 378 * @pf_lut: for PF table set true, for VSI table set false 379 * @lut: pointer to the lut buffer provided by the caller 380 * @lut_size: size of the lut buffer 381 * @set: set true to set the table, false to get the table 382 * 383 * Internal function to get or set RSS look up table 384 **/ 385 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 386 u16 vsi_id, bool pf_lut, 387 u8 *lut, u16 lut_size, 388 bool set) 389 { 390 i40e_status status; 391 struct i40e_aq_desc desc; 392 struct i40e_aqc_get_set_rss_lut *cmd_resp = 393 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 394 395 if (set) 396 i40e_fill_default_direct_cmd_desc(&desc, 397 i40e_aqc_opc_set_rss_lut); 398 else 399 i40e_fill_default_direct_cmd_desc(&desc, 400 i40e_aqc_opc_get_rss_lut); 401 402 /* Indirect command */ 403 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 404 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 405 406 cmd_resp->vsi_id = 407 cpu_to_le16((u16)((vsi_id << 408 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 409 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 410 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 411 412 if (pf_lut) 413 cmd_resp->flags |= cpu_to_le16((u16) 414 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 415 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 416 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 417 else 418 cmd_resp->flags |= cpu_to_le16((u16) 419 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 420 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 421 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 422 423 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 424 425 return status; 426 } 427 428 /** 429 * i40e_aq_get_rss_lut 430 * @hw: pointer to the hardware structure 431 * @vsi_id: vsi fw index 432 * @pf_lut: for PF table set true, for VSI table set false 433 * @lut: pointer to the lut buffer provided by the caller 434 * @lut_size: size of the lut buffer 435 * 436 * get the RSS lookup table, PF or VSI type 437 **/ 438 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 439 bool pf_lut, u8 *lut, u16 lut_size) 440 { 441 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 442 false); 443 } 444 445 /** 446 * i40e_aq_set_rss_lut 447 * @hw: pointer to the hardware structure 448 * @vsi_id: vsi fw index 449 * @pf_lut: for PF table set true, for VSI table set false 450 * @lut: pointer to the lut buffer provided by the caller 451 * @lut_size: size of the lut buffer 452 * 453 * set the RSS lookup table, PF or VSI type 454 **/ 455 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 456 bool pf_lut, u8 *lut, u16 lut_size) 457 { 458 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 459 } 460 461 /** 462 * i40e_aq_get_set_rss_key 463 * @hw: pointer to the hw struct 464 * @vsi_id: vsi fw index 465 * @key: pointer to key info struct 466 * @set: set true to set the key, false to get the key 467 * 468 * get the RSS key per VSI 469 **/ 470 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 471 u16 vsi_id, 472 struct i40e_aqc_get_set_rss_key_data *key, 473 bool set) 474 { 475 i40e_status status; 476 struct i40e_aq_desc desc; 477 struct i40e_aqc_get_set_rss_key *cmd_resp = 478 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 479 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 480 481 if (set) 482 i40e_fill_default_direct_cmd_desc(&desc, 483 i40e_aqc_opc_set_rss_key); 484 else 485 i40e_fill_default_direct_cmd_desc(&desc, 486 i40e_aqc_opc_get_rss_key); 487 488 /* Indirect command */ 489 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 490 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 491 492 cmd_resp->vsi_id = 493 cpu_to_le16((u16)((vsi_id << 494 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 495 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 496 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 497 498 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 499 500 return status; 501 } 502 503 /** 504 * i40e_aq_get_rss_key 505 * @hw: pointer to the hw struct 506 * @vsi_id: vsi fw index 507 * @key: pointer to key info struct 508 * 509 **/ 510 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 511 u16 vsi_id, 512 struct i40e_aqc_get_set_rss_key_data *key) 513 { 514 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 515 } 516 517 /** 518 * i40e_aq_set_rss_key 519 * @hw: pointer to the hw struct 520 * @vsi_id: vsi fw index 521 * @key: pointer to key info struct 522 * 523 * set the RSS key per VSI 524 **/ 525 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 526 u16 vsi_id, 527 struct i40e_aqc_get_set_rss_key_data *key) 528 { 529 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 530 } 531 532 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 533 * hardware to a bit-field that can be used by SW to more easily determine the 534 * packet type. 535 * 536 * Macros are used to shorten the table lines and make this table human 537 * readable. 538 * 539 * We store the PTYPE in the top byte of the bit field - this is just so that 540 * we can check that the table doesn't have a row missing, as the index into 541 * the table should be the PTYPE. 542 * 543 * Typical work flow: 544 * 545 * IF NOT i40e_ptype_lookup[ptype].known 546 * THEN 547 * Packet is unknown 548 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 549 * Use the rest of the fields to look at the tunnels, inner protocols, etc 550 * ELSE 551 * Use the enum i40e_rx_l2_ptype to decode the packet type 552 * ENDIF 553 */ 554 555 /* macro to make the table lines short */ 556 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 557 { PTYPE, \ 558 1, \ 559 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 560 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 561 I40E_RX_PTYPE_##OUTER_FRAG, \ 562 I40E_RX_PTYPE_TUNNEL_##T, \ 563 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 564 I40E_RX_PTYPE_##TEF, \ 565 I40E_RX_PTYPE_INNER_PROT_##I, \ 566 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 567 568 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 569 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 570 571 /* shorter macros makes the table fit but are terse */ 572 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 573 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 574 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 575 576 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 577 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 578 /* L2 Packet types */ 579 I40E_PTT_UNUSED_ENTRY(0), 580 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 581 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 582 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 583 I40E_PTT_UNUSED_ENTRY(4), 584 I40E_PTT_UNUSED_ENTRY(5), 585 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 586 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 587 I40E_PTT_UNUSED_ENTRY(8), 588 I40E_PTT_UNUSED_ENTRY(9), 589 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 590 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 591 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 599 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 600 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 601 602 /* Non Tunneled IPv4 */ 603 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 604 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 605 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 606 I40E_PTT_UNUSED_ENTRY(25), 607 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 608 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 609 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 610 611 /* IPv4 --> IPv4 */ 612 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 613 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 614 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 615 I40E_PTT_UNUSED_ENTRY(32), 616 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 617 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 618 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 619 620 /* IPv4 --> IPv6 */ 621 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 622 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 623 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 624 I40E_PTT_UNUSED_ENTRY(39), 625 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 626 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 627 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 628 629 /* IPv4 --> GRE/NAT */ 630 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 631 632 /* IPv4 --> GRE/NAT --> IPv4 */ 633 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 634 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 635 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 636 I40E_PTT_UNUSED_ENTRY(47), 637 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 638 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 639 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 640 641 /* IPv4 --> GRE/NAT --> IPv6 */ 642 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 643 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 644 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 645 I40E_PTT_UNUSED_ENTRY(54), 646 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 647 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 648 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 649 650 /* IPv4 --> GRE/NAT --> MAC */ 651 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 652 653 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 654 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 655 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 656 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 657 I40E_PTT_UNUSED_ENTRY(62), 658 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 659 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 660 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 661 662 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 663 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 664 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 665 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 666 I40E_PTT_UNUSED_ENTRY(69), 667 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 668 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 669 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 670 671 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 672 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 673 674 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 675 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 676 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 677 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 678 I40E_PTT_UNUSED_ENTRY(77), 679 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 680 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 681 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 682 683 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 684 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 685 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 686 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 687 I40E_PTT_UNUSED_ENTRY(84), 688 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 689 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 690 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 691 692 /* Non Tunneled IPv6 */ 693 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 694 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 695 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 696 I40E_PTT_UNUSED_ENTRY(91), 697 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 698 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 699 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 700 701 /* IPv6 --> IPv4 */ 702 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 703 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 704 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 705 I40E_PTT_UNUSED_ENTRY(98), 706 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 707 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 708 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 709 710 /* IPv6 --> IPv6 */ 711 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 712 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 713 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 714 I40E_PTT_UNUSED_ENTRY(105), 715 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 716 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 717 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 718 719 /* IPv6 --> GRE/NAT */ 720 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 721 722 /* IPv6 --> GRE/NAT -> IPv4 */ 723 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 724 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 725 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 726 I40E_PTT_UNUSED_ENTRY(113), 727 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 728 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 729 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 730 731 /* IPv6 --> GRE/NAT -> IPv6 */ 732 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 733 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 734 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 735 I40E_PTT_UNUSED_ENTRY(120), 736 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 737 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 738 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 739 740 /* IPv6 --> GRE/NAT -> MAC */ 741 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 742 743 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 744 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 745 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 746 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 747 I40E_PTT_UNUSED_ENTRY(128), 748 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 749 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 750 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 751 752 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 753 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 754 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 755 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 756 I40E_PTT_UNUSED_ENTRY(135), 757 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 758 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 759 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 760 761 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 762 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 763 764 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 765 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 766 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 767 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 768 I40E_PTT_UNUSED_ENTRY(143), 769 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 770 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 771 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 772 773 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 774 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 775 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 776 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 777 I40E_PTT_UNUSED_ENTRY(150), 778 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 779 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 780 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 781 782 /* unused entries */ 783 I40E_PTT_UNUSED_ENTRY(154), 784 I40E_PTT_UNUSED_ENTRY(155), 785 I40E_PTT_UNUSED_ENTRY(156), 786 I40E_PTT_UNUSED_ENTRY(157), 787 I40E_PTT_UNUSED_ENTRY(158), 788 I40E_PTT_UNUSED_ENTRY(159), 789 790 I40E_PTT_UNUSED_ENTRY(160), 791 I40E_PTT_UNUSED_ENTRY(161), 792 I40E_PTT_UNUSED_ENTRY(162), 793 I40E_PTT_UNUSED_ENTRY(163), 794 I40E_PTT_UNUSED_ENTRY(164), 795 I40E_PTT_UNUSED_ENTRY(165), 796 I40E_PTT_UNUSED_ENTRY(166), 797 I40E_PTT_UNUSED_ENTRY(167), 798 I40E_PTT_UNUSED_ENTRY(168), 799 I40E_PTT_UNUSED_ENTRY(169), 800 801 I40E_PTT_UNUSED_ENTRY(170), 802 I40E_PTT_UNUSED_ENTRY(171), 803 I40E_PTT_UNUSED_ENTRY(172), 804 I40E_PTT_UNUSED_ENTRY(173), 805 I40E_PTT_UNUSED_ENTRY(174), 806 I40E_PTT_UNUSED_ENTRY(175), 807 I40E_PTT_UNUSED_ENTRY(176), 808 I40E_PTT_UNUSED_ENTRY(177), 809 I40E_PTT_UNUSED_ENTRY(178), 810 I40E_PTT_UNUSED_ENTRY(179), 811 812 I40E_PTT_UNUSED_ENTRY(180), 813 I40E_PTT_UNUSED_ENTRY(181), 814 I40E_PTT_UNUSED_ENTRY(182), 815 I40E_PTT_UNUSED_ENTRY(183), 816 I40E_PTT_UNUSED_ENTRY(184), 817 I40E_PTT_UNUSED_ENTRY(185), 818 I40E_PTT_UNUSED_ENTRY(186), 819 I40E_PTT_UNUSED_ENTRY(187), 820 I40E_PTT_UNUSED_ENTRY(188), 821 I40E_PTT_UNUSED_ENTRY(189), 822 823 I40E_PTT_UNUSED_ENTRY(190), 824 I40E_PTT_UNUSED_ENTRY(191), 825 I40E_PTT_UNUSED_ENTRY(192), 826 I40E_PTT_UNUSED_ENTRY(193), 827 I40E_PTT_UNUSED_ENTRY(194), 828 I40E_PTT_UNUSED_ENTRY(195), 829 I40E_PTT_UNUSED_ENTRY(196), 830 I40E_PTT_UNUSED_ENTRY(197), 831 I40E_PTT_UNUSED_ENTRY(198), 832 I40E_PTT_UNUSED_ENTRY(199), 833 834 I40E_PTT_UNUSED_ENTRY(200), 835 I40E_PTT_UNUSED_ENTRY(201), 836 I40E_PTT_UNUSED_ENTRY(202), 837 I40E_PTT_UNUSED_ENTRY(203), 838 I40E_PTT_UNUSED_ENTRY(204), 839 I40E_PTT_UNUSED_ENTRY(205), 840 I40E_PTT_UNUSED_ENTRY(206), 841 I40E_PTT_UNUSED_ENTRY(207), 842 I40E_PTT_UNUSED_ENTRY(208), 843 I40E_PTT_UNUSED_ENTRY(209), 844 845 I40E_PTT_UNUSED_ENTRY(210), 846 I40E_PTT_UNUSED_ENTRY(211), 847 I40E_PTT_UNUSED_ENTRY(212), 848 I40E_PTT_UNUSED_ENTRY(213), 849 I40E_PTT_UNUSED_ENTRY(214), 850 I40E_PTT_UNUSED_ENTRY(215), 851 I40E_PTT_UNUSED_ENTRY(216), 852 I40E_PTT_UNUSED_ENTRY(217), 853 I40E_PTT_UNUSED_ENTRY(218), 854 I40E_PTT_UNUSED_ENTRY(219), 855 856 I40E_PTT_UNUSED_ENTRY(220), 857 I40E_PTT_UNUSED_ENTRY(221), 858 I40E_PTT_UNUSED_ENTRY(222), 859 I40E_PTT_UNUSED_ENTRY(223), 860 I40E_PTT_UNUSED_ENTRY(224), 861 I40E_PTT_UNUSED_ENTRY(225), 862 I40E_PTT_UNUSED_ENTRY(226), 863 I40E_PTT_UNUSED_ENTRY(227), 864 I40E_PTT_UNUSED_ENTRY(228), 865 I40E_PTT_UNUSED_ENTRY(229), 866 867 I40E_PTT_UNUSED_ENTRY(230), 868 I40E_PTT_UNUSED_ENTRY(231), 869 I40E_PTT_UNUSED_ENTRY(232), 870 I40E_PTT_UNUSED_ENTRY(233), 871 I40E_PTT_UNUSED_ENTRY(234), 872 I40E_PTT_UNUSED_ENTRY(235), 873 I40E_PTT_UNUSED_ENTRY(236), 874 I40E_PTT_UNUSED_ENTRY(237), 875 I40E_PTT_UNUSED_ENTRY(238), 876 I40E_PTT_UNUSED_ENTRY(239), 877 878 I40E_PTT_UNUSED_ENTRY(240), 879 I40E_PTT_UNUSED_ENTRY(241), 880 I40E_PTT_UNUSED_ENTRY(242), 881 I40E_PTT_UNUSED_ENTRY(243), 882 I40E_PTT_UNUSED_ENTRY(244), 883 I40E_PTT_UNUSED_ENTRY(245), 884 I40E_PTT_UNUSED_ENTRY(246), 885 I40E_PTT_UNUSED_ENTRY(247), 886 I40E_PTT_UNUSED_ENTRY(248), 887 I40E_PTT_UNUSED_ENTRY(249), 888 889 I40E_PTT_UNUSED_ENTRY(250), 890 I40E_PTT_UNUSED_ENTRY(251), 891 I40E_PTT_UNUSED_ENTRY(252), 892 I40E_PTT_UNUSED_ENTRY(253), 893 I40E_PTT_UNUSED_ENTRY(254), 894 I40E_PTT_UNUSED_ENTRY(255) 895 }; 896 897 /** 898 * i40e_init_shared_code - Initialize the shared code 899 * @hw: pointer to hardware structure 900 * 901 * This assigns the MAC type and PHY code and inits the NVM. 902 * Does not touch the hardware. This function must be called prior to any 903 * other function in the shared code. The i40e_hw structure should be 904 * memset to 0 prior to calling this function. The following fields in 905 * hw structure should be filled in prior to calling this function: 906 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 907 * subsystem_vendor_id, and revision_id 908 **/ 909 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 910 { 911 i40e_status status = 0; 912 u32 port, ari, func_rid; 913 914 i40e_set_mac_type(hw); 915 916 switch (hw->mac.type) { 917 case I40E_MAC_XL710: 918 case I40E_MAC_X722: 919 break; 920 default: 921 return I40E_ERR_DEVICE_NOT_SUPPORTED; 922 } 923 924 hw->phy.get_link_info = true; 925 926 /* Determine port number and PF number*/ 927 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 928 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 929 hw->port = (u8)port; 930 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 931 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 932 func_rid = rd32(hw, I40E_PF_FUNC_RID); 933 if (ari) 934 hw->pf_id = (u8)(func_rid & 0xff); 935 else 936 hw->pf_id = (u8)(func_rid & 0x7); 937 938 status = i40e_init_nvm(hw); 939 return status; 940 } 941 942 /** 943 * i40e_aq_mac_address_read - Retrieve the MAC addresses 944 * @hw: pointer to the hw struct 945 * @flags: a return indicator of what addresses were added to the addr store 946 * @addrs: the requestor's mac addr store 947 * @cmd_details: pointer to command details structure or NULL 948 **/ 949 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 950 u16 *flags, 951 struct i40e_aqc_mac_address_read_data *addrs, 952 struct i40e_asq_cmd_details *cmd_details) 953 { 954 struct i40e_aq_desc desc; 955 struct i40e_aqc_mac_address_read *cmd_data = 956 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 957 i40e_status status; 958 959 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 960 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 961 962 status = i40e_asq_send_command(hw, &desc, addrs, 963 sizeof(*addrs), cmd_details); 964 *flags = le16_to_cpu(cmd_data->command_flags); 965 966 return status; 967 } 968 969 /** 970 * i40e_aq_mac_address_write - Change the MAC addresses 971 * @hw: pointer to the hw struct 972 * @flags: indicates which MAC to be written 973 * @mac_addr: address to write 974 * @cmd_details: pointer to command details structure or NULL 975 **/ 976 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 977 u16 flags, u8 *mac_addr, 978 struct i40e_asq_cmd_details *cmd_details) 979 { 980 struct i40e_aq_desc desc; 981 struct i40e_aqc_mac_address_write *cmd_data = 982 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 983 i40e_status status; 984 985 i40e_fill_default_direct_cmd_desc(&desc, 986 i40e_aqc_opc_mac_address_write); 987 cmd_data->command_flags = cpu_to_le16(flags); 988 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 989 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 990 ((u32)mac_addr[3] << 16) | 991 ((u32)mac_addr[4] << 8) | 992 mac_addr[5]); 993 994 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 995 996 return status; 997 } 998 999 /** 1000 * i40e_get_mac_addr - get MAC address 1001 * @hw: pointer to the HW structure 1002 * @mac_addr: pointer to MAC address 1003 * 1004 * Reads the adapter's MAC address from register 1005 **/ 1006 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1007 { 1008 struct i40e_aqc_mac_address_read_data addrs; 1009 i40e_status status; 1010 u16 flags = 0; 1011 1012 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1013 1014 if (flags & I40E_AQC_LAN_ADDR_VALID) 1015 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1016 1017 return status; 1018 } 1019 1020 /** 1021 * i40e_get_port_mac_addr - get Port MAC address 1022 * @hw: pointer to the HW structure 1023 * @mac_addr: pointer to Port MAC address 1024 * 1025 * Reads the adapter's Port MAC address 1026 **/ 1027 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1028 { 1029 struct i40e_aqc_mac_address_read_data addrs; 1030 i40e_status status; 1031 u16 flags = 0; 1032 1033 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1034 if (status) 1035 return status; 1036 1037 if (flags & I40E_AQC_PORT_ADDR_VALID) 1038 ether_addr_copy(mac_addr, addrs.port_mac); 1039 else 1040 status = I40E_ERR_INVALID_MAC_ADDR; 1041 1042 return status; 1043 } 1044 1045 /** 1046 * i40e_pre_tx_queue_cfg - pre tx queue configure 1047 * @hw: pointer to the HW structure 1048 * @queue: target PF queue index 1049 * @enable: state change request 1050 * 1051 * Handles hw requirement to indicate intention to enable 1052 * or disable target queue. 1053 **/ 1054 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1055 { 1056 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1057 u32 reg_block = 0; 1058 u32 reg_val; 1059 1060 if (abs_queue_idx >= 128) { 1061 reg_block = abs_queue_idx / 128; 1062 abs_queue_idx %= 128; 1063 } 1064 1065 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1066 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1067 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1068 1069 if (enable) 1070 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1071 else 1072 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1073 1074 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1075 } 1076 1077 /** 1078 * i40e_read_pba_string - Reads part number string from EEPROM 1079 * @hw: pointer to hardware structure 1080 * @pba_num: stores the part number string from the EEPROM 1081 * @pba_num_size: part number string buffer length 1082 * 1083 * Reads the part number string from the EEPROM. 1084 **/ 1085 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1086 u32 pba_num_size) 1087 { 1088 i40e_status status = 0; 1089 u16 pba_word = 0; 1090 u16 pba_size = 0; 1091 u16 pba_ptr = 0; 1092 u16 i = 0; 1093 1094 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1095 if (status || (pba_word != 0xFAFA)) { 1096 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1097 return status; 1098 } 1099 1100 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1101 if (status) { 1102 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1103 return status; 1104 } 1105 1106 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1107 if (status) { 1108 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1109 return status; 1110 } 1111 1112 /* Subtract one to get PBA word count (PBA Size word is included in 1113 * total size) 1114 */ 1115 pba_size--; 1116 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1117 hw_dbg(hw, "Buffer too small for PBA data.\n"); 1118 return I40E_ERR_PARAM; 1119 } 1120 1121 for (i = 0; i < pba_size; i++) { 1122 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1123 if (status) { 1124 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1125 return status; 1126 } 1127 1128 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1129 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1130 } 1131 pba_num[(pba_size * 2)] = '\0'; 1132 1133 return status; 1134 } 1135 1136 /** 1137 * i40e_get_media_type - Gets media type 1138 * @hw: pointer to the hardware structure 1139 **/ 1140 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1141 { 1142 enum i40e_media_type media; 1143 1144 switch (hw->phy.link_info.phy_type) { 1145 case I40E_PHY_TYPE_10GBASE_SR: 1146 case I40E_PHY_TYPE_10GBASE_LR: 1147 case I40E_PHY_TYPE_1000BASE_SX: 1148 case I40E_PHY_TYPE_1000BASE_LX: 1149 case I40E_PHY_TYPE_40GBASE_SR4: 1150 case I40E_PHY_TYPE_40GBASE_LR4: 1151 case I40E_PHY_TYPE_25GBASE_LR: 1152 case I40E_PHY_TYPE_25GBASE_SR: 1153 media = I40E_MEDIA_TYPE_FIBER; 1154 break; 1155 case I40E_PHY_TYPE_100BASE_TX: 1156 case I40E_PHY_TYPE_1000BASE_T: 1157 case I40E_PHY_TYPE_2_5GBASE_T: 1158 case I40E_PHY_TYPE_5GBASE_T: 1159 case I40E_PHY_TYPE_10GBASE_T: 1160 media = I40E_MEDIA_TYPE_BASET; 1161 break; 1162 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1163 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1164 case I40E_PHY_TYPE_10GBASE_CR1: 1165 case I40E_PHY_TYPE_40GBASE_CR4: 1166 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1167 case I40E_PHY_TYPE_40GBASE_AOC: 1168 case I40E_PHY_TYPE_10GBASE_AOC: 1169 case I40E_PHY_TYPE_25GBASE_CR: 1170 case I40E_PHY_TYPE_25GBASE_AOC: 1171 case I40E_PHY_TYPE_25GBASE_ACC: 1172 media = I40E_MEDIA_TYPE_DA; 1173 break; 1174 case I40E_PHY_TYPE_1000BASE_KX: 1175 case I40E_PHY_TYPE_10GBASE_KX4: 1176 case I40E_PHY_TYPE_10GBASE_KR: 1177 case I40E_PHY_TYPE_40GBASE_KR4: 1178 case I40E_PHY_TYPE_20GBASE_KR2: 1179 case I40E_PHY_TYPE_25GBASE_KR: 1180 media = I40E_MEDIA_TYPE_BACKPLANE; 1181 break; 1182 case I40E_PHY_TYPE_SGMII: 1183 case I40E_PHY_TYPE_XAUI: 1184 case I40E_PHY_TYPE_XFI: 1185 case I40E_PHY_TYPE_XLAUI: 1186 case I40E_PHY_TYPE_XLPPI: 1187 default: 1188 media = I40E_MEDIA_TYPE_UNKNOWN; 1189 break; 1190 } 1191 1192 return media; 1193 } 1194 1195 /** 1196 * i40e_poll_globr - Poll for Global Reset completion 1197 * @hw: pointer to the hardware structure 1198 * @retry_limit: how many times to retry before failure 1199 **/ 1200 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1201 u32 retry_limit) 1202 { 1203 u32 cnt, reg = 0; 1204 1205 for (cnt = 0; cnt < retry_limit; cnt++) { 1206 reg = rd32(hw, I40E_GLGEN_RSTAT); 1207 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1208 return 0; 1209 msleep(100); 1210 } 1211 1212 hw_dbg(hw, "Global reset failed.\n"); 1213 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1214 1215 return I40E_ERR_RESET_FAILED; 1216 } 1217 1218 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1219 #define I40E_PF_RESET_WAIT_COUNT 200 1220 /** 1221 * i40e_pf_reset - Reset the PF 1222 * @hw: pointer to the hardware structure 1223 * 1224 * Assuming someone else has triggered a global reset, 1225 * assure the global reset is complete and then reset the PF 1226 **/ 1227 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1228 { 1229 u32 cnt = 0; 1230 u32 cnt1 = 0; 1231 u32 reg = 0; 1232 u32 grst_del; 1233 1234 /* Poll for Global Reset steady state in case of recent GRST. 1235 * The grst delay value is in 100ms units, and we'll wait a 1236 * couple counts longer to be sure we don't just miss the end. 1237 */ 1238 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1239 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1240 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1241 1242 /* It can take upto 15 secs for GRST steady state. 1243 * Bump it to 16 secs max to be safe. 1244 */ 1245 grst_del = grst_del * 20; 1246 1247 for (cnt = 0; cnt < grst_del; cnt++) { 1248 reg = rd32(hw, I40E_GLGEN_RSTAT); 1249 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1250 break; 1251 msleep(100); 1252 } 1253 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1254 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1255 return I40E_ERR_RESET_FAILED; 1256 } 1257 1258 /* Now Wait for the FW to be ready */ 1259 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1260 reg = rd32(hw, I40E_GLNVM_ULD); 1261 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1262 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1263 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1264 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1265 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1266 break; 1267 } 1268 usleep_range(10000, 20000); 1269 } 1270 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1271 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1272 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1273 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1274 return I40E_ERR_RESET_FAILED; 1275 } 1276 1277 /* If there was a Global Reset in progress when we got here, 1278 * we don't need to do the PF Reset 1279 */ 1280 if (!cnt) { 1281 u32 reg2 = 0; 1282 if (hw->revision_id == 0) 1283 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1284 else 1285 cnt = I40E_PF_RESET_WAIT_COUNT; 1286 reg = rd32(hw, I40E_PFGEN_CTRL); 1287 wr32(hw, I40E_PFGEN_CTRL, 1288 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1289 for (; cnt; cnt--) { 1290 reg = rd32(hw, I40E_PFGEN_CTRL); 1291 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1292 break; 1293 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1294 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1295 break; 1296 usleep_range(1000, 2000); 1297 } 1298 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1299 if (i40e_poll_globr(hw, grst_del)) 1300 return I40E_ERR_RESET_FAILED; 1301 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1302 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1303 return I40E_ERR_RESET_FAILED; 1304 } 1305 } 1306 1307 i40e_clear_pxe_mode(hw); 1308 1309 return 0; 1310 } 1311 1312 /** 1313 * i40e_clear_hw - clear out any left over hw state 1314 * @hw: pointer to the hw struct 1315 * 1316 * Clear queues and interrupts, typically called at init time, 1317 * but after the capabilities have been found so we know how many 1318 * queues and msix vectors have been allocated. 1319 **/ 1320 void i40e_clear_hw(struct i40e_hw *hw) 1321 { 1322 u32 num_queues, base_queue; 1323 u32 num_pf_int; 1324 u32 num_vf_int; 1325 u32 num_vfs; 1326 u32 i, j; 1327 u32 val; 1328 u32 eol = 0x7ff; 1329 1330 /* get number of interrupts, queues, and VFs */ 1331 val = rd32(hw, I40E_GLPCI_CNF2); 1332 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1333 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1334 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1335 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1336 1337 val = rd32(hw, I40E_PFLAN_QALLOC); 1338 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1339 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1340 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1341 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1342 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1343 num_queues = (j - base_queue) + 1; 1344 else 1345 num_queues = 0; 1346 1347 val = rd32(hw, I40E_PF_VT_PFALLOC); 1348 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1349 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1350 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1351 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1352 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1353 num_vfs = (j - i) + 1; 1354 else 1355 num_vfs = 0; 1356 1357 /* stop all the interrupts */ 1358 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1359 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1360 for (i = 0; i < num_pf_int - 2; i++) 1361 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1362 1363 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1364 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1365 wr32(hw, I40E_PFINT_LNKLST0, val); 1366 for (i = 0; i < num_pf_int - 2; i++) 1367 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1368 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1369 for (i = 0; i < num_vfs; i++) 1370 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1371 for (i = 0; i < num_vf_int - 2; i++) 1372 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1373 1374 /* warn the HW of the coming Tx disables */ 1375 for (i = 0; i < num_queues; i++) { 1376 u32 abs_queue_idx = base_queue + i; 1377 u32 reg_block = 0; 1378 1379 if (abs_queue_idx >= 128) { 1380 reg_block = abs_queue_idx / 128; 1381 abs_queue_idx %= 128; 1382 } 1383 1384 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1385 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1386 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1387 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1388 1389 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1390 } 1391 udelay(400); 1392 1393 /* stop all the queues */ 1394 for (i = 0; i < num_queues; i++) { 1395 wr32(hw, I40E_QINT_TQCTL(i), 0); 1396 wr32(hw, I40E_QTX_ENA(i), 0); 1397 wr32(hw, I40E_QINT_RQCTL(i), 0); 1398 wr32(hw, I40E_QRX_ENA(i), 0); 1399 } 1400 1401 /* short wait for all queue disables to settle */ 1402 udelay(50); 1403 } 1404 1405 /** 1406 * i40e_clear_pxe_mode - clear pxe operations mode 1407 * @hw: pointer to the hw struct 1408 * 1409 * Make sure all PXE mode settings are cleared, including things 1410 * like descriptor fetch/write-back mode. 1411 **/ 1412 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1413 { 1414 u32 reg; 1415 1416 if (i40e_check_asq_alive(hw)) 1417 i40e_aq_clear_pxe_mode(hw, NULL); 1418 1419 /* Clear single descriptor fetch/write-back mode */ 1420 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1421 1422 if (hw->revision_id == 0) { 1423 /* As a work around clear PXE_MODE instead of setting it */ 1424 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1425 } else { 1426 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1427 } 1428 } 1429 1430 /** 1431 * i40e_led_is_mine - helper to find matching led 1432 * @hw: pointer to the hw struct 1433 * @idx: index into GPIO registers 1434 * 1435 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1436 */ 1437 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1438 { 1439 u32 gpio_val = 0; 1440 u32 port; 1441 1442 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1443 !hw->func_caps.led[idx]) 1444 return 0; 1445 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1446 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1447 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1448 1449 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1450 * if it is not our port then ignore 1451 */ 1452 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1453 (port != hw->port)) 1454 return 0; 1455 1456 return gpio_val; 1457 } 1458 1459 #define I40E_FW_LED BIT(4) 1460 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1461 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1462 1463 #define I40E_LED0 22 1464 1465 #define I40E_PIN_FUNC_SDP 0x0 1466 #define I40E_PIN_FUNC_LED 0x1 1467 1468 /** 1469 * i40e_led_get - return current on/off mode 1470 * @hw: pointer to the hw struct 1471 * 1472 * The value returned is the 'mode' field as defined in the 1473 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1474 * values are variations of possible behaviors relating to 1475 * blink, link, and wire. 1476 **/ 1477 u32 i40e_led_get(struct i40e_hw *hw) 1478 { 1479 u32 mode = 0; 1480 int i; 1481 1482 /* as per the documentation GPIO 22-29 are the LED 1483 * GPIO pins named LED0..LED7 1484 */ 1485 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1486 u32 gpio_val = i40e_led_is_mine(hw, i); 1487 1488 if (!gpio_val) 1489 continue; 1490 1491 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1492 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1493 break; 1494 } 1495 1496 return mode; 1497 } 1498 1499 /** 1500 * i40e_led_set - set new on/off mode 1501 * @hw: pointer to the hw struct 1502 * @mode: 0=off, 0xf=on (else see manual for mode details) 1503 * @blink: true if the LED should blink when on, false if steady 1504 * 1505 * if this function is used to turn on the blink it should 1506 * be used to disable the blink when restoring the original state. 1507 **/ 1508 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1509 { 1510 int i; 1511 1512 if (mode & ~I40E_LED_MODE_VALID) { 1513 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1514 return; 1515 } 1516 1517 /* as per the documentation GPIO 22-29 are the LED 1518 * GPIO pins named LED0..LED7 1519 */ 1520 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1521 u32 gpio_val = i40e_led_is_mine(hw, i); 1522 1523 if (!gpio_val) 1524 continue; 1525 1526 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1527 u32 pin_func = 0; 1528 1529 if (mode & I40E_FW_LED) 1530 pin_func = I40E_PIN_FUNC_SDP; 1531 else 1532 pin_func = I40E_PIN_FUNC_LED; 1533 1534 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1535 gpio_val |= ((pin_func << 1536 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1537 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1538 } 1539 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1540 /* this & is a bit of paranoia, but serves as a range check */ 1541 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1542 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1543 1544 if (blink) 1545 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1546 else 1547 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1548 1549 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1550 break; 1551 } 1552 } 1553 1554 /* Admin command wrappers */ 1555 1556 /** 1557 * i40e_aq_get_phy_capabilities 1558 * @hw: pointer to the hw struct 1559 * @abilities: structure for PHY capabilities to be filled 1560 * @qualified_modules: report Qualified Modules 1561 * @report_init: report init capabilities (active are default) 1562 * @cmd_details: pointer to command details structure or NULL 1563 * 1564 * Returns the various PHY abilities supported on the Port. 1565 **/ 1566 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1567 bool qualified_modules, bool report_init, 1568 struct i40e_aq_get_phy_abilities_resp *abilities, 1569 struct i40e_asq_cmd_details *cmd_details) 1570 { 1571 struct i40e_aq_desc desc; 1572 i40e_status status; 1573 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1574 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1575 1576 if (!abilities) 1577 return I40E_ERR_PARAM; 1578 1579 do { 1580 i40e_fill_default_direct_cmd_desc(&desc, 1581 i40e_aqc_opc_get_phy_abilities); 1582 1583 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1584 if (abilities_size > I40E_AQ_LARGE_BUF) 1585 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1586 1587 if (qualified_modules) 1588 desc.params.external.param0 |= 1589 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1590 1591 if (report_init) 1592 desc.params.external.param0 |= 1593 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1594 1595 status = i40e_asq_send_command(hw, &desc, abilities, 1596 abilities_size, cmd_details); 1597 1598 switch (hw->aq.asq_last_status) { 1599 case I40E_AQ_RC_EIO: 1600 status = I40E_ERR_UNKNOWN_PHY; 1601 break; 1602 case I40E_AQ_RC_EAGAIN: 1603 usleep_range(1000, 2000); 1604 total_delay++; 1605 status = I40E_ERR_TIMEOUT; 1606 break; 1607 /* also covers I40E_AQ_RC_OK */ 1608 default: 1609 break; 1610 } 1611 1612 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1613 (total_delay < max_delay)); 1614 1615 if (status) 1616 return status; 1617 1618 if (report_init) { 1619 if (hw->mac.type == I40E_MAC_XL710 && 1620 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1621 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1622 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1623 } else { 1624 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1625 hw->phy.phy_types |= 1626 ((u64)abilities->phy_type_ext << 32); 1627 } 1628 } 1629 1630 return status; 1631 } 1632 1633 /** 1634 * i40e_aq_set_phy_config 1635 * @hw: pointer to the hw struct 1636 * @config: structure with PHY configuration to be set 1637 * @cmd_details: pointer to command details structure or NULL 1638 * 1639 * Set the various PHY configuration parameters 1640 * supported on the Port.One or more of the Set PHY config parameters may be 1641 * ignored in an MFP mode as the PF may not have the privilege to set some 1642 * of the PHY Config parameters. This status will be indicated by the 1643 * command response. 1644 **/ 1645 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1646 struct i40e_aq_set_phy_config *config, 1647 struct i40e_asq_cmd_details *cmd_details) 1648 { 1649 struct i40e_aq_desc desc; 1650 struct i40e_aq_set_phy_config *cmd = 1651 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1652 enum i40e_status_code status; 1653 1654 if (!config) 1655 return I40E_ERR_PARAM; 1656 1657 i40e_fill_default_direct_cmd_desc(&desc, 1658 i40e_aqc_opc_set_phy_config); 1659 1660 *cmd = *config; 1661 1662 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1663 1664 return status; 1665 } 1666 1667 static noinline_for_stack enum i40e_status_code 1668 i40e_set_fc_status(struct i40e_hw *hw, 1669 struct i40e_aq_get_phy_abilities_resp *abilities, 1670 bool atomic_restart) 1671 { 1672 struct i40e_aq_set_phy_config config; 1673 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1674 u8 pause_mask = 0x0; 1675 1676 switch (fc_mode) { 1677 case I40E_FC_FULL: 1678 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1679 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1680 break; 1681 case I40E_FC_RX_PAUSE: 1682 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1683 break; 1684 case I40E_FC_TX_PAUSE: 1685 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1686 break; 1687 default: 1688 break; 1689 } 1690 1691 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1692 /* clear the old pause settings */ 1693 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1694 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1695 /* set the new abilities */ 1696 config.abilities |= pause_mask; 1697 /* If the abilities have changed, then set the new config */ 1698 if (config.abilities == abilities->abilities) 1699 return 0; 1700 1701 /* Auto restart link so settings take effect */ 1702 if (atomic_restart) 1703 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1704 /* Copy over all the old settings */ 1705 config.phy_type = abilities->phy_type; 1706 config.phy_type_ext = abilities->phy_type_ext; 1707 config.link_speed = abilities->link_speed; 1708 config.eee_capability = abilities->eee_capability; 1709 config.eeer = abilities->eeer_val; 1710 config.low_power_ctrl = abilities->d3_lpan; 1711 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1712 I40E_AQ_PHY_FEC_CONFIG_MASK; 1713 1714 return i40e_aq_set_phy_config(hw, &config, NULL); 1715 } 1716 1717 /** 1718 * i40e_set_fc 1719 * @hw: pointer to the hw struct 1720 * @aq_failures: buffer to return AdminQ failure information 1721 * @atomic_restart: whether to enable atomic link restart 1722 * 1723 * Set the requested flow control mode using set_phy_config. 1724 **/ 1725 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1726 bool atomic_restart) 1727 { 1728 struct i40e_aq_get_phy_abilities_resp abilities; 1729 enum i40e_status_code status; 1730 1731 *aq_failures = 0x0; 1732 1733 /* Get the current phy config */ 1734 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1735 NULL); 1736 if (status) { 1737 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1738 return status; 1739 } 1740 1741 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1742 if (status) 1743 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1744 1745 /* Update the link info */ 1746 status = i40e_update_link_info(hw); 1747 if (status) { 1748 /* Wait a little bit (on 40G cards it sometimes takes a really 1749 * long time for link to come back from the atomic reset) 1750 * and try once more 1751 */ 1752 msleep(1000); 1753 status = i40e_update_link_info(hw); 1754 } 1755 if (status) 1756 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1757 1758 return status; 1759 } 1760 1761 /** 1762 * i40e_aq_clear_pxe_mode 1763 * @hw: pointer to the hw struct 1764 * @cmd_details: pointer to command details structure or NULL 1765 * 1766 * Tell the firmware that the driver is taking over from PXE 1767 **/ 1768 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1769 struct i40e_asq_cmd_details *cmd_details) 1770 { 1771 i40e_status status; 1772 struct i40e_aq_desc desc; 1773 struct i40e_aqc_clear_pxe *cmd = 1774 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1775 1776 i40e_fill_default_direct_cmd_desc(&desc, 1777 i40e_aqc_opc_clear_pxe_mode); 1778 1779 cmd->rx_cnt = 0x2; 1780 1781 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1782 1783 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1784 1785 return status; 1786 } 1787 1788 /** 1789 * i40e_aq_set_link_restart_an 1790 * @hw: pointer to the hw struct 1791 * @enable_link: if true: enable link, if false: disable link 1792 * @cmd_details: pointer to command details structure or NULL 1793 * 1794 * Sets up the link and restarts the Auto-Negotiation over the link. 1795 **/ 1796 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1797 bool enable_link, 1798 struct i40e_asq_cmd_details *cmd_details) 1799 { 1800 struct i40e_aq_desc desc; 1801 struct i40e_aqc_set_link_restart_an *cmd = 1802 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1803 i40e_status status; 1804 1805 i40e_fill_default_direct_cmd_desc(&desc, 1806 i40e_aqc_opc_set_link_restart_an); 1807 1808 cmd->command = I40E_AQ_PHY_RESTART_AN; 1809 if (enable_link) 1810 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1811 else 1812 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1813 1814 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1815 1816 return status; 1817 } 1818 1819 /** 1820 * i40e_aq_get_link_info 1821 * @hw: pointer to the hw struct 1822 * @enable_lse: enable/disable LinkStatusEvent reporting 1823 * @link: pointer to link status structure - optional 1824 * @cmd_details: pointer to command details structure or NULL 1825 * 1826 * Returns the link status of the adapter. 1827 **/ 1828 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1829 bool enable_lse, struct i40e_link_status *link, 1830 struct i40e_asq_cmd_details *cmd_details) 1831 { 1832 struct i40e_aq_desc desc; 1833 struct i40e_aqc_get_link_status *resp = 1834 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1835 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1836 i40e_status status; 1837 bool tx_pause, rx_pause; 1838 u16 command_flags; 1839 1840 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1841 1842 if (enable_lse) 1843 command_flags = I40E_AQ_LSE_ENABLE; 1844 else 1845 command_flags = I40E_AQ_LSE_DISABLE; 1846 resp->command_flags = cpu_to_le16(command_flags); 1847 1848 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1849 1850 if (status) 1851 goto aq_get_link_info_exit; 1852 1853 /* save off old link status information */ 1854 hw->phy.link_info_old = *hw_link_info; 1855 1856 /* update link status */ 1857 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1858 hw->phy.media_type = i40e_get_media_type(hw); 1859 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1860 hw_link_info->link_info = resp->link_info; 1861 hw_link_info->an_info = resp->an_info; 1862 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1863 I40E_AQ_CONFIG_FEC_RS_ENA); 1864 hw_link_info->ext_info = resp->ext_info; 1865 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1866 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1867 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1868 1869 /* update fc info */ 1870 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1871 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1872 if (tx_pause & rx_pause) 1873 hw->fc.current_mode = I40E_FC_FULL; 1874 else if (tx_pause) 1875 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1876 else if (rx_pause) 1877 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1878 else 1879 hw->fc.current_mode = I40E_FC_NONE; 1880 1881 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1882 hw_link_info->crc_enable = true; 1883 else 1884 hw_link_info->crc_enable = false; 1885 1886 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1887 hw_link_info->lse_enable = true; 1888 else 1889 hw_link_info->lse_enable = false; 1890 1891 if ((hw->mac.type == I40E_MAC_XL710) && 1892 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1893 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1894 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1895 1896 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1897 hw->mac.type != I40E_MAC_X722) { 1898 __le32 tmp; 1899 1900 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1901 hw->phy.phy_types = le32_to_cpu(tmp); 1902 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1903 } 1904 1905 /* save link status information */ 1906 if (link) 1907 *link = *hw_link_info; 1908 1909 /* flag cleared so helper functions don't call AQ again */ 1910 hw->phy.get_link_info = false; 1911 1912 aq_get_link_info_exit: 1913 return status; 1914 } 1915 1916 /** 1917 * i40e_aq_set_phy_int_mask 1918 * @hw: pointer to the hw struct 1919 * @mask: interrupt mask to be set 1920 * @cmd_details: pointer to command details structure or NULL 1921 * 1922 * Set link interrupt mask. 1923 **/ 1924 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1925 u16 mask, 1926 struct i40e_asq_cmd_details *cmd_details) 1927 { 1928 struct i40e_aq_desc desc; 1929 struct i40e_aqc_set_phy_int_mask *cmd = 1930 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1931 i40e_status status; 1932 1933 i40e_fill_default_direct_cmd_desc(&desc, 1934 i40e_aqc_opc_set_phy_int_mask); 1935 1936 cmd->event_mask = cpu_to_le16(mask); 1937 1938 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1939 1940 return status; 1941 } 1942 1943 /** 1944 * i40e_aq_set_phy_debug 1945 * @hw: pointer to the hw struct 1946 * @cmd_flags: debug command flags 1947 * @cmd_details: pointer to command details structure or NULL 1948 * 1949 * Reset the external PHY. 1950 **/ 1951 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1952 struct i40e_asq_cmd_details *cmd_details) 1953 { 1954 struct i40e_aq_desc desc; 1955 struct i40e_aqc_set_phy_debug *cmd = 1956 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1957 i40e_status status; 1958 1959 i40e_fill_default_direct_cmd_desc(&desc, 1960 i40e_aqc_opc_set_phy_debug); 1961 1962 cmd->command_flags = cmd_flags; 1963 1964 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1965 1966 return status; 1967 } 1968 1969 /** 1970 * i40e_is_aq_api_ver_ge 1971 * @aq: pointer to AdminQ info containing HW API version to compare 1972 * @maj: API major value 1973 * @min: API minor value 1974 * 1975 * Assert whether current HW API version is greater/equal than provided. 1976 **/ 1977 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, 1978 u16 min) 1979 { 1980 return (aq->api_maj_ver > maj || 1981 (aq->api_maj_ver == maj && aq->api_min_ver >= min)); 1982 } 1983 1984 /** 1985 * i40e_aq_add_vsi 1986 * @hw: pointer to the hw struct 1987 * @vsi_ctx: pointer to a vsi context struct 1988 * @cmd_details: pointer to command details structure or NULL 1989 * 1990 * Add a VSI context to the hardware. 1991 **/ 1992 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1993 struct i40e_vsi_context *vsi_ctx, 1994 struct i40e_asq_cmd_details *cmd_details) 1995 { 1996 struct i40e_aq_desc desc; 1997 struct i40e_aqc_add_get_update_vsi *cmd = 1998 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1999 struct i40e_aqc_add_get_update_vsi_completion *resp = 2000 (struct i40e_aqc_add_get_update_vsi_completion *) 2001 &desc.params.raw; 2002 i40e_status status; 2003 2004 i40e_fill_default_direct_cmd_desc(&desc, 2005 i40e_aqc_opc_add_vsi); 2006 2007 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 2008 cmd->connection_type = vsi_ctx->connection_type; 2009 cmd->vf_id = vsi_ctx->vf_num; 2010 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 2011 2012 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2013 2014 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2015 sizeof(vsi_ctx->info), cmd_details); 2016 2017 if (status) 2018 goto aq_add_vsi_exit; 2019 2020 vsi_ctx->seid = le16_to_cpu(resp->seid); 2021 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2022 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2023 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2024 2025 aq_add_vsi_exit: 2026 return status; 2027 } 2028 2029 /** 2030 * i40e_aq_set_default_vsi 2031 * @hw: pointer to the hw struct 2032 * @seid: vsi number 2033 * @cmd_details: pointer to command details structure or NULL 2034 **/ 2035 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 2036 u16 seid, 2037 struct i40e_asq_cmd_details *cmd_details) 2038 { 2039 struct i40e_aq_desc desc; 2040 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2041 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2042 &desc.params.raw; 2043 i40e_status status; 2044 2045 i40e_fill_default_direct_cmd_desc(&desc, 2046 i40e_aqc_opc_set_vsi_promiscuous_modes); 2047 2048 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2049 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2050 cmd->seid = cpu_to_le16(seid); 2051 2052 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2053 2054 return status; 2055 } 2056 2057 /** 2058 * i40e_aq_clear_default_vsi 2059 * @hw: pointer to the hw struct 2060 * @seid: vsi number 2061 * @cmd_details: pointer to command details structure or NULL 2062 **/ 2063 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2064 u16 seid, 2065 struct i40e_asq_cmd_details *cmd_details) 2066 { 2067 struct i40e_aq_desc desc; 2068 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2069 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2070 &desc.params.raw; 2071 i40e_status status; 2072 2073 i40e_fill_default_direct_cmd_desc(&desc, 2074 i40e_aqc_opc_set_vsi_promiscuous_modes); 2075 2076 cmd->promiscuous_flags = cpu_to_le16(0); 2077 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2078 cmd->seid = cpu_to_le16(seid); 2079 2080 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2081 2082 return status; 2083 } 2084 2085 /** 2086 * i40e_aq_set_vsi_unicast_promiscuous 2087 * @hw: pointer to the hw struct 2088 * @seid: vsi number 2089 * @set: set unicast promiscuous enable/disable 2090 * @cmd_details: pointer to command details structure or NULL 2091 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2092 **/ 2093 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2094 u16 seid, bool set, 2095 struct i40e_asq_cmd_details *cmd_details, 2096 bool rx_only_promisc) 2097 { 2098 struct i40e_aq_desc desc; 2099 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2100 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2101 i40e_status status; 2102 u16 flags = 0; 2103 2104 i40e_fill_default_direct_cmd_desc(&desc, 2105 i40e_aqc_opc_set_vsi_promiscuous_modes); 2106 2107 if (set) { 2108 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2109 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2110 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2111 } 2112 2113 cmd->promiscuous_flags = cpu_to_le16(flags); 2114 2115 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2116 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2117 cmd->valid_flags |= 2118 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2119 2120 cmd->seid = cpu_to_le16(seid); 2121 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2122 2123 return status; 2124 } 2125 2126 /** 2127 * i40e_aq_set_vsi_multicast_promiscuous 2128 * @hw: pointer to the hw struct 2129 * @seid: vsi number 2130 * @set: set multicast promiscuous enable/disable 2131 * @cmd_details: pointer to command details structure or NULL 2132 **/ 2133 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2134 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2135 { 2136 struct i40e_aq_desc desc; 2137 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2138 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2139 i40e_status status; 2140 u16 flags = 0; 2141 2142 i40e_fill_default_direct_cmd_desc(&desc, 2143 i40e_aqc_opc_set_vsi_promiscuous_modes); 2144 2145 if (set) 2146 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2147 2148 cmd->promiscuous_flags = cpu_to_le16(flags); 2149 2150 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2151 2152 cmd->seid = cpu_to_le16(seid); 2153 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2154 2155 return status; 2156 } 2157 2158 /** 2159 * i40e_aq_set_vsi_mc_promisc_on_vlan 2160 * @hw: pointer to the hw struct 2161 * @seid: vsi number 2162 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2163 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2164 * @cmd_details: pointer to command details structure or NULL 2165 **/ 2166 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2167 u16 seid, bool enable, 2168 u16 vid, 2169 struct i40e_asq_cmd_details *cmd_details) 2170 { 2171 struct i40e_aq_desc desc; 2172 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2173 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2174 enum i40e_status_code status; 2175 u16 flags = 0; 2176 2177 i40e_fill_default_direct_cmd_desc(&desc, 2178 i40e_aqc_opc_set_vsi_promiscuous_modes); 2179 2180 if (enable) 2181 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2182 2183 cmd->promiscuous_flags = cpu_to_le16(flags); 2184 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2185 cmd->seid = cpu_to_le16(seid); 2186 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2187 2188 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2189 2190 return status; 2191 } 2192 2193 /** 2194 * i40e_aq_set_vsi_uc_promisc_on_vlan 2195 * @hw: pointer to the hw struct 2196 * @seid: vsi number 2197 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2198 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2199 * @cmd_details: pointer to command details structure or NULL 2200 **/ 2201 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2202 u16 seid, bool enable, 2203 u16 vid, 2204 struct i40e_asq_cmd_details *cmd_details) 2205 { 2206 struct i40e_aq_desc desc; 2207 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2208 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2209 enum i40e_status_code status; 2210 u16 flags = 0; 2211 2212 i40e_fill_default_direct_cmd_desc(&desc, 2213 i40e_aqc_opc_set_vsi_promiscuous_modes); 2214 2215 if (enable) { 2216 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2217 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2218 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2219 } 2220 2221 cmd->promiscuous_flags = cpu_to_le16(flags); 2222 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2223 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2224 cmd->valid_flags |= 2225 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2226 cmd->seid = cpu_to_le16(seid); 2227 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2228 2229 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2230 2231 return status; 2232 } 2233 2234 /** 2235 * i40e_aq_set_vsi_bc_promisc_on_vlan 2236 * @hw: pointer to the hw struct 2237 * @seid: vsi number 2238 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2239 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2240 * @cmd_details: pointer to command details structure or NULL 2241 **/ 2242 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2243 u16 seid, bool enable, u16 vid, 2244 struct i40e_asq_cmd_details *cmd_details) 2245 { 2246 struct i40e_aq_desc desc; 2247 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2248 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2249 i40e_status status; 2250 u16 flags = 0; 2251 2252 i40e_fill_default_direct_cmd_desc(&desc, 2253 i40e_aqc_opc_set_vsi_promiscuous_modes); 2254 2255 if (enable) 2256 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2257 2258 cmd->promiscuous_flags = cpu_to_le16(flags); 2259 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2260 cmd->seid = cpu_to_le16(seid); 2261 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2262 2263 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2264 2265 return status; 2266 } 2267 2268 /** 2269 * i40e_aq_set_vsi_broadcast 2270 * @hw: pointer to the hw struct 2271 * @seid: vsi number 2272 * @set_filter: true to set filter, false to clear filter 2273 * @cmd_details: pointer to command details structure or NULL 2274 * 2275 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2276 **/ 2277 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2278 u16 seid, bool set_filter, 2279 struct i40e_asq_cmd_details *cmd_details) 2280 { 2281 struct i40e_aq_desc desc; 2282 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2283 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2284 i40e_status status; 2285 2286 i40e_fill_default_direct_cmd_desc(&desc, 2287 i40e_aqc_opc_set_vsi_promiscuous_modes); 2288 2289 if (set_filter) 2290 cmd->promiscuous_flags 2291 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2292 else 2293 cmd->promiscuous_flags 2294 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2295 2296 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2297 cmd->seid = cpu_to_le16(seid); 2298 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2299 2300 return status; 2301 } 2302 2303 /** 2304 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2305 * @hw: pointer to the hw struct 2306 * @seid: vsi number 2307 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2308 * @cmd_details: pointer to command details structure or NULL 2309 **/ 2310 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2311 u16 seid, bool enable, 2312 struct i40e_asq_cmd_details *cmd_details) 2313 { 2314 struct i40e_aq_desc desc; 2315 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2316 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2317 i40e_status status; 2318 u16 flags = 0; 2319 2320 i40e_fill_default_direct_cmd_desc(&desc, 2321 i40e_aqc_opc_set_vsi_promiscuous_modes); 2322 if (enable) 2323 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2324 2325 cmd->promiscuous_flags = cpu_to_le16(flags); 2326 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2327 cmd->seid = cpu_to_le16(seid); 2328 2329 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2330 2331 return status; 2332 } 2333 2334 /** 2335 * i40e_get_vsi_params - get VSI configuration info 2336 * @hw: pointer to the hw struct 2337 * @vsi_ctx: pointer to a vsi context struct 2338 * @cmd_details: pointer to command details structure or NULL 2339 **/ 2340 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2341 struct i40e_vsi_context *vsi_ctx, 2342 struct i40e_asq_cmd_details *cmd_details) 2343 { 2344 struct i40e_aq_desc desc; 2345 struct i40e_aqc_add_get_update_vsi *cmd = 2346 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2347 struct i40e_aqc_add_get_update_vsi_completion *resp = 2348 (struct i40e_aqc_add_get_update_vsi_completion *) 2349 &desc.params.raw; 2350 i40e_status status; 2351 2352 i40e_fill_default_direct_cmd_desc(&desc, 2353 i40e_aqc_opc_get_vsi_parameters); 2354 2355 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2356 2357 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2358 2359 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2360 sizeof(vsi_ctx->info), NULL); 2361 2362 if (status) 2363 goto aq_get_vsi_params_exit; 2364 2365 vsi_ctx->seid = le16_to_cpu(resp->seid); 2366 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2367 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2368 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2369 2370 aq_get_vsi_params_exit: 2371 return status; 2372 } 2373 2374 /** 2375 * i40e_aq_update_vsi_params 2376 * @hw: pointer to the hw struct 2377 * @vsi_ctx: pointer to a vsi context struct 2378 * @cmd_details: pointer to command details structure or NULL 2379 * 2380 * Update a VSI context. 2381 **/ 2382 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2383 struct i40e_vsi_context *vsi_ctx, 2384 struct i40e_asq_cmd_details *cmd_details) 2385 { 2386 struct i40e_aq_desc desc; 2387 struct i40e_aqc_add_get_update_vsi *cmd = 2388 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2389 struct i40e_aqc_add_get_update_vsi_completion *resp = 2390 (struct i40e_aqc_add_get_update_vsi_completion *) 2391 &desc.params.raw; 2392 i40e_status status; 2393 2394 i40e_fill_default_direct_cmd_desc(&desc, 2395 i40e_aqc_opc_update_vsi_parameters); 2396 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2397 2398 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2399 2400 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2401 sizeof(vsi_ctx->info), cmd_details); 2402 2403 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2404 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2405 2406 return status; 2407 } 2408 2409 /** 2410 * i40e_aq_get_switch_config 2411 * @hw: pointer to the hardware structure 2412 * @buf: pointer to the result buffer 2413 * @buf_size: length of input buffer 2414 * @start_seid: seid to start for the report, 0 == beginning 2415 * @cmd_details: pointer to command details structure or NULL 2416 * 2417 * Fill the buf with switch configuration returned from AdminQ command 2418 **/ 2419 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2420 struct i40e_aqc_get_switch_config_resp *buf, 2421 u16 buf_size, u16 *start_seid, 2422 struct i40e_asq_cmd_details *cmd_details) 2423 { 2424 struct i40e_aq_desc desc; 2425 struct i40e_aqc_switch_seid *scfg = 2426 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2427 i40e_status status; 2428 2429 i40e_fill_default_direct_cmd_desc(&desc, 2430 i40e_aqc_opc_get_switch_config); 2431 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2432 if (buf_size > I40E_AQ_LARGE_BUF) 2433 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2434 scfg->seid = cpu_to_le16(*start_seid); 2435 2436 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2437 *start_seid = le16_to_cpu(scfg->seid); 2438 2439 return status; 2440 } 2441 2442 /** 2443 * i40e_aq_set_switch_config 2444 * @hw: pointer to the hardware structure 2445 * @flags: bit flag values to set 2446 * @mode: cloud filter mode 2447 * @valid_flags: which bit flags to set 2448 * @mode: cloud filter mode 2449 * @cmd_details: pointer to command details structure or NULL 2450 * 2451 * Set switch configuration bits 2452 **/ 2453 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2454 u16 flags, 2455 u16 valid_flags, u8 mode, 2456 struct i40e_asq_cmd_details *cmd_details) 2457 { 2458 struct i40e_aq_desc desc; 2459 struct i40e_aqc_set_switch_config *scfg = 2460 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2461 enum i40e_status_code status; 2462 2463 i40e_fill_default_direct_cmd_desc(&desc, 2464 i40e_aqc_opc_set_switch_config); 2465 scfg->flags = cpu_to_le16(flags); 2466 scfg->valid_flags = cpu_to_le16(valid_flags); 2467 scfg->mode = mode; 2468 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2469 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2470 scfg->first_tag = cpu_to_le16(hw->first_tag); 2471 scfg->second_tag = cpu_to_le16(hw->second_tag); 2472 } 2473 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2474 2475 return status; 2476 } 2477 2478 /** 2479 * i40e_aq_get_firmware_version 2480 * @hw: pointer to the hw struct 2481 * @fw_major_version: firmware major version 2482 * @fw_minor_version: firmware minor version 2483 * @fw_build: firmware build number 2484 * @api_major_version: major queue version 2485 * @api_minor_version: minor queue version 2486 * @cmd_details: pointer to command details structure or NULL 2487 * 2488 * Get the firmware version from the admin queue commands 2489 **/ 2490 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2491 u16 *fw_major_version, u16 *fw_minor_version, 2492 u32 *fw_build, 2493 u16 *api_major_version, u16 *api_minor_version, 2494 struct i40e_asq_cmd_details *cmd_details) 2495 { 2496 struct i40e_aq_desc desc; 2497 struct i40e_aqc_get_version *resp = 2498 (struct i40e_aqc_get_version *)&desc.params.raw; 2499 i40e_status status; 2500 2501 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2502 2503 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2504 2505 if (!status) { 2506 if (fw_major_version) 2507 *fw_major_version = le16_to_cpu(resp->fw_major); 2508 if (fw_minor_version) 2509 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2510 if (fw_build) 2511 *fw_build = le32_to_cpu(resp->fw_build); 2512 if (api_major_version) 2513 *api_major_version = le16_to_cpu(resp->api_major); 2514 if (api_minor_version) 2515 *api_minor_version = le16_to_cpu(resp->api_minor); 2516 } 2517 2518 return status; 2519 } 2520 2521 /** 2522 * i40e_aq_send_driver_version 2523 * @hw: pointer to the hw struct 2524 * @dv: driver's major, minor version 2525 * @cmd_details: pointer to command details structure or NULL 2526 * 2527 * Send the driver version to the firmware 2528 **/ 2529 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2530 struct i40e_driver_version *dv, 2531 struct i40e_asq_cmd_details *cmd_details) 2532 { 2533 struct i40e_aq_desc desc; 2534 struct i40e_aqc_driver_version *cmd = 2535 (struct i40e_aqc_driver_version *)&desc.params.raw; 2536 i40e_status status; 2537 u16 len; 2538 2539 if (dv == NULL) 2540 return I40E_ERR_PARAM; 2541 2542 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2543 2544 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2545 cmd->driver_major_ver = dv->major_version; 2546 cmd->driver_minor_ver = dv->minor_version; 2547 cmd->driver_build_ver = dv->build_version; 2548 cmd->driver_subbuild_ver = dv->subbuild_version; 2549 2550 len = 0; 2551 while (len < sizeof(dv->driver_string) && 2552 (dv->driver_string[len] < 0x80) && 2553 dv->driver_string[len]) 2554 len++; 2555 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2556 len, cmd_details); 2557 2558 return status; 2559 } 2560 2561 /** 2562 * i40e_get_link_status - get status of the HW network link 2563 * @hw: pointer to the hw struct 2564 * @link_up: pointer to bool (true/false = linkup/linkdown) 2565 * 2566 * Variable link_up true if link is up, false if link is down. 2567 * The variable link_up is invalid if returned value of status != 0 2568 * 2569 * Side effect: LinkStatusEvent reporting becomes enabled 2570 **/ 2571 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2572 { 2573 i40e_status status = 0; 2574 2575 if (hw->phy.get_link_info) { 2576 status = i40e_update_link_info(hw); 2577 2578 if (status) 2579 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2580 status); 2581 } 2582 2583 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2584 2585 return status; 2586 } 2587 2588 /** 2589 * i40e_updatelink_status - update status of the HW network link 2590 * @hw: pointer to the hw struct 2591 **/ 2592 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2593 { 2594 struct i40e_aq_get_phy_abilities_resp abilities; 2595 i40e_status status = 0; 2596 2597 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2598 if (status) 2599 return status; 2600 2601 /* extra checking needed to ensure link info to user is timely */ 2602 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2603 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2604 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2605 status = i40e_aq_get_phy_capabilities(hw, false, false, 2606 &abilities, NULL); 2607 if (status) 2608 return status; 2609 2610 if (abilities.fec_cfg_curr_mod_ext_info & 2611 I40E_AQ_ENABLE_FEC_AUTO) 2612 hw->phy.link_info.req_fec_info = 2613 (I40E_AQ_REQUEST_FEC_KR | 2614 I40E_AQ_REQUEST_FEC_RS); 2615 else 2616 hw->phy.link_info.req_fec_info = 2617 abilities.fec_cfg_curr_mod_ext_info & 2618 (I40E_AQ_REQUEST_FEC_KR | 2619 I40E_AQ_REQUEST_FEC_RS); 2620 2621 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2622 sizeof(hw->phy.link_info.module_type)); 2623 } 2624 2625 return status; 2626 } 2627 2628 /** 2629 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2630 * @hw: pointer to the hw struct 2631 * @uplink_seid: the MAC or other gizmo SEID 2632 * @downlink_seid: the VSI SEID 2633 * @enabled_tc: bitmap of TCs to be enabled 2634 * @default_port: true for default port VSI, false for control port 2635 * @veb_seid: pointer to where to put the resulting VEB SEID 2636 * @enable_stats: true to turn on VEB stats 2637 * @cmd_details: pointer to command details structure or NULL 2638 * 2639 * This asks the FW to add a VEB between the uplink and downlink 2640 * elements. If the uplink SEID is 0, this will be a floating VEB. 2641 **/ 2642 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2643 u16 downlink_seid, u8 enabled_tc, 2644 bool default_port, u16 *veb_seid, 2645 bool enable_stats, 2646 struct i40e_asq_cmd_details *cmd_details) 2647 { 2648 struct i40e_aq_desc desc; 2649 struct i40e_aqc_add_veb *cmd = 2650 (struct i40e_aqc_add_veb *)&desc.params.raw; 2651 struct i40e_aqc_add_veb_completion *resp = 2652 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2653 i40e_status status; 2654 u16 veb_flags = 0; 2655 2656 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2657 if (!!uplink_seid != !!downlink_seid) 2658 return I40E_ERR_PARAM; 2659 2660 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2661 2662 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2663 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2664 cmd->enable_tcs = enabled_tc; 2665 if (!uplink_seid) 2666 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2667 if (default_port) 2668 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2669 else 2670 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2671 2672 /* reverse logic here: set the bitflag to disable the stats */ 2673 if (!enable_stats) 2674 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2675 2676 cmd->veb_flags = cpu_to_le16(veb_flags); 2677 2678 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2679 2680 if (!status && veb_seid) 2681 *veb_seid = le16_to_cpu(resp->veb_seid); 2682 2683 return status; 2684 } 2685 2686 /** 2687 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2688 * @hw: pointer to the hw struct 2689 * @veb_seid: the SEID of the VEB to query 2690 * @switch_id: the uplink switch id 2691 * @floating: set to true if the VEB is floating 2692 * @statistic_index: index of the stats counter block for this VEB 2693 * @vebs_used: number of VEB's used by function 2694 * @vebs_free: total VEB's not reserved by any function 2695 * @cmd_details: pointer to command details structure or NULL 2696 * 2697 * This retrieves the parameters for a particular VEB, specified by 2698 * uplink_seid, and returns them to the caller. 2699 **/ 2700 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2701 u16 veb_seid, u16 *switch_id, 2702 bool *floating, u16 *statistic_index, 2703 u16 *vebs_used, u16 *vebs_free, 2704 struct i40e_asq_cmd_details *cmd_details) 2705 { 2706 struct i40e_aq_desc desc; 2707 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2708 (struct i40e_aqc_get_veb_parameters_completion *) 2709 &desc.params.raw; 2710 i40e_status status; 2711 2712 if (veb_seid == 0) 2713 return I40E_ERR_PARAM; 2714 2715 i40e_fill_default_direct_cmd_desc(&desc, 2716 i40e_aqc_opc_get_veb_parameters); 2717 cmd_resp->seid = cpu_to_le16(veb_seid); 2718 2719 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2720 if (status) 2721 goto get_veb_exit; 2722 2723 if (switch_id) 2724 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2725 if (statistic_index) 2726 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2727 if (vebs_used) 2728 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2729 if (vebs_free) 2730 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2731 if (floating) { 2732 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2733 2734 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2735 *floating = true; 2736 else 2737 *floating = false; 2738 } 2739 2740 get_veb_exit: 2741 return status; 2742 } 2743 2744 /** 2745 * i40e_aq_add_macvlan 2746 * @hw: pointer to the hw struct 2747 * @seid: VSI for the mac address 2748 * @mv_list: list of macvlans to be added 2749 * @count: length of the list 2750 * @cmd_details: pointer to command details structure or NULL 2751 * 2752 * Add MAC/VLAN addresses to the HW filtering 2753 **/ 2754 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2755 struct i40e_aqc_add_macvlan_element_data *mv_list, 2756 u16 count, struct i40e_asq_cmd_details *cmd_details) 2757 { 2758 struct i40e_aq_desc desc; 2759 struct i40e_aqc_macvlan *cmd = 2760 (struct i40e_aqc_macvlan *)&desc.params.raw; 2761 i40e_status status; 2762 u16 buf_size; 2763 int i; 2764 2765 if (count == 0 || !mv_list || !hw) 2766 return I40E_ERR_PARAM; 2767 2768 buf_size = count * sizeof(*mv_list); 2769 2770 /* prep the rest of the request */ 2771 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2772 cmd->num_addresses = cpu_to_le16(count); 2773 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2774 cmd->seid[1] = 0; 2775 cmd->seid[2] = 0; 2776 2777 for (i = 0; i < count; i++) 2778 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2779 mv_list[i].flags |= 2780 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2781 2782 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2783 if (buf_size > I40E_AQ_LARGE_BUF) 2784 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2785 2786 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2787 cmd_details); 2788 2789 return status; 2790 } 2791 2792 /** 2793 * i40e_aq_remove_macvlan 2794 * @hw: pointer to the hw struct 2795 * @seid: VSI for the mac address 2796 * @mv_list: list of macvlans to be removed 2797 * @count: length of the list 2798 * @cmd_details: pointer to command details structure or NULL 2799 * 2800 * Remove MAC/VLAN addresses from the HW filtering 2801 **/ 2802 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2803 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2804 u16 count, struct i40e_asq_cmd_details *cmd_details) 2805 { 2806 struct i40e_aq_desc desc; 2807 struct i40e_aqc_macvlan *cmd = 2808 (struct i40e_aqc_macvlan *)&desc.params.raw; 2809 i40e_status status; 2810 u16 buf_size; 2811 2812 if (count == 0 || !mv_list || !hw) 2813 return I40E_ERR_PARAM; 2814 2815 buf_size = count * sizeof(*mv_list); 2816 2817 /* prep the rest of the request */ 2818 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2819 cmd->num_addresses = cpu_to_le16(count); 2820 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2821 cmd->seid[1] = 0; 2822 cmd->seid[2] = 0; 2823 2824 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2825 if (buf_size > I40E_AQ_LARGE_BUF) 2826 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2827 2828 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2829 cmd_details); 2830 2831 return status; 2832 } 2833 2834 /** 2835 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2836 * @hw: pointer to the hw struct 2837 * @opcode: AQ opcode for add or delete mirror rule 2838 * @sw_seid: Switch SEID (to which rule refers) 2839 * @rule_type: Rule Type (ingress/egress/VLAN) 2840 * @id: Destination VSI SEID or Rule ID 2841 * @count: length of the list 2842 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2843 * @cmd_details: pointer to command details structure or NULL 2844 * @rule_id: Rule ID returned from FW 2845 * @rules_used: Number of rules used in internal switch 2846 * @rules_free: Number of rules free in internal switch 2847 * 2848 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2849 * VEBs/VEPA elements only 2850 **/ 2851 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2852 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2853 u16 count, __le16 *mr_list, 2854 struct i40e_asq_cmd_details *cmd_details, 2855 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2856 { 2857 struct i40e_aq_desc desc; 2858 struct i40e_aqc_add_delete_mirror_rule *cmd = 2859 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2860 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2861 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2862 i40e_status status; 2863 u16 buf_size; 2864 2865 buf_size = count * sizeof(*mr_list); 2866 2867 /* prep the rest of the request */ 2868 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2869 cmd->seid = cpu_to_le16(sw_seid); 2870 cmd->rule_type = cpu_to_le16(rule_type & 2871 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2872 cmd->num_entries = cpu_to_le16(count); 2873 /* Dest VSI for add, rule_id for delete */ 2874 cmd->destination = cpu_to_le16(id); 2875 if (mr_list) { 2876 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2877 I40E_AQ_FLAG_RD)); 2878 if (buf_size > I40E_AQ_LARGE_BUF) 2879 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2880 } 2881 2882 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2883 cmd_details); 2884 if (!status || 2885 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2886 if (rule_id) 2887 *rule_id = le16_to_cpu(resp->rule_id); 2888 if (rules_used) 2889 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2890 if (rules_free) 2891 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2892 } 2893 return status; 2894 } 2895 2896 /** 2897 * i40e_aq_add_mirrorrule - add a mirror rule 2898 * @hw: pointer to the hw struct 2899 * @sw_seid: Switch SEID (to which rule refers) 2900 * @rule_type: Rule Type (ingress/egress/VLAN) 2901 * @dest_vsi: SEID of VSI to which packets will be mirrored 2902 * @count: length of the list 2903 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2904 * @cmd_details: pointer to command details structure or NULL 2905 * @rule_id: Rule ID returned from FW 2906 * @rules_used: Number of rules used in internal switch 2907 * @rules_free: Number of rules free in internal switch 2908 * 2909 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2910 **/ 2911 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2912 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2913 struct i40e_asq_cmd_details *cmd_details, 2914 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2915 { 2916 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2917 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2918 if (count == 0 || !mr_list) 2919 return I40E_ERR_PARAM; 2920 } 2921 2922 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2923 rule_type, dest_vsi, count, mr_list, 2924 cmd_details, rule_id, rules_used, rules_free); 2925 } 2926 2927 /** 2928 * i40e_aq_delete_mirrorrule - delete a mirror rule 2929 * @hw: pointer to the hw struct 2930 * @sw_seid: Switch SEID (to which rule refers) 2931 * @rule_type: Rule Type (ingress/egress/VLAN) 2932 * @count: length of the list 2933 * @rule_id: Rule ID that is returned in the receive desc as part of 2934 * add_mirrorrule. 2935 * @mr_list: list of mirrored VLAN IDs to be removed 2936 * @cmd_details: pointer to command details structure or NULL 2937 * @rules_used: Number of rules used in internal switch 2938 * @rules_free: Number of rules free in internal switch 2939 * 2940 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2941 **/ 2942 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2943 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2944 struct i40e_asq_cmd_details *cmd_details, 2945 u16 *rules_used, u16 *rules_free) 2946 { 2947 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2948 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2949 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2950 * mirroring. For other rule_type, count and rule_type should 2951 * not matter. 2952 */ 2953 if (count == 0 || !mr_list) 2954 return I40E_ERR_PARAM; 2955 } 2956 2957 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2958 rule_type, rule_id, count, mr_list, 2959 cmd_details, NULL, rules_used, rules_free); 2960 } 2961 2962 /** 2963 * i40e_aq_send_msg_to_vf 2964 * @hw: pointer to the hardware structure 2965 * @vfid: VF id to send msg 2966 * @v_opcode: opcodes for VF-PF communication 2967 * @v_retval: return error code 2968 * @msg: pointer to the msg buffer 2969 * @msglen: msg length 2970 * @cmd_details: pointer to command details 2971 * 2972 * send msg to vf 2973 **/ 2974 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2975 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2976 struct i40e_asq_cmd_details *cmd_details) 2977 { 2978 struct i40e_aq_desc desc; 2979 struct i40e_aqc_pf_vf_message *cmd = 2980 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2981 i40e_status status; 2982 2983 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2984 cmd->id = cpu_to_le32(vfid); 2985 desc.cookie_high = cpu_to_le32(v_opcode); 2986 desc.cookie_low = cpu_to_le32(v_retval); 2987 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2988 if (msglen) { 2989 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2990 I40E_AQ_FLAG_RD)); 2991 if (msglen > I40E_AQ_LARGE_BUF) 2992 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2993 desc.datalen = cpu_to_le16(msglen); 2994 } 2995 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2996 2997 return status; 2998 } 2999 3000 /** 3001 * i40e_aq_debug_read_register 3002 * @hw: pointer to the hw struct 3003 * @reg_addr: register address 3004 * @reg_val: register value 3005 * @cmd_details: pointer to command details structure or NULL 3006 * 3007 * Read the register using the admin queue commands 3008 **/ 3009 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 3010 u32 reg_addr, u64 *reg_val, 3011 struct i40e_asq_cmd_details *cmd_details) 3012 { 3013 struct i40e_aq_desc desc; 3014 struct i40e_aqc_debug_reg_read_write *cmd_resp = 3015 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3016 i40e_status status; 3017 3018 if (reg_val == NULL) 3019 return I40E_ERR_PARAM; 3020 3021 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 3022 3023 cmd_resp->address = cpu_to_le32(reg_addr); 3024 3025 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3026 3027 if (!status) { 3028 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 3029 (u64)le32_to_cpu(cmd_resp->value_low); 3030 } 3031 3032 return status; 3033 } 3034 3035 /** 3036 * i40e_aq_debug_write_register 3037 * @hw: pointer to the hw struct 3038 * @reg_addr: register address 3039 * @reg_val: register value 3040 * @cmd_details: pointer to command details structure or NULL 3041 * 3042 * Write to a register using the admin queue commands 3043 **/ 3044 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3045 u32 reg_addr, u64 reg_val, 3046 struct i40e_asq_cmd_details *cmd_details) 3047 { 3048 struct i40e_aq_desc desc; 3049 struct i40e_aqc_debug_reg_read_write *cmd = 3050 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3051 i40e_status status; 3052 3053 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3054 3055 cmd->address = cpu_to_le32(reg_addr); 3056 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3057 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3058 3059 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3060 3061 return status; 3062 } 3063 3064 /** 3065 * i40e_aq_request_resource 3066 * @hw: pointer to the hw struct 3067 * @resource: resource id 3068 * @access: access type 3069 * @sdp_number: resource number 3070 * @timeout: the maximum time in ms that the driver may hold the resource 3071 * @cmd_details: pointer to command details structure or NULL 3072 * 3073 * requests common resource using the admin queue commands 3074 **/ 3075 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3076 enum i40e_aq_resources_ids resource, 3077 enum i40e_aq_resource_access_type access, 3078 u8 sdp_number, u64 *timeout, 3079 struct i40e_asq_cmd_details *cmd_details) 3080 { 3081 struct i40e_aq_desc desc; 3082 struct i40e_aqc_request_resource *cmd_resp = 3083 (struct i40e_aqc_request_resource *)&desc.params.raw; 3084 i40e_status status; 3085 3086 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3087 3088 cmd_resp->resource_id = cpu_to_le16(resource); 3089 cmd_resp->access_type = cpu_to_le16(access); 3090 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3091 3092 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3093 /* The completion specifies the maximum time in ms that the driver 3094 * may hold the resource in the Timeout field. 3095 * If the resource is held by someone else, the command completes with 3096 * busy return value and the timeout field indicates the maximum time 3097 * the current owner of the resource has to free it. 3098 */ 3099 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3100 *timeout = le32_to_cpu(cmd_resp->timeout); 3101 3102 return status; 3103 } 3104 3105 /** 3106 * i40e_aq_release_resource 3107 * @hw: pointer to the hw struct 3108 * @resource: resource id 3109 * @sdp_number: resource number 3110 * @cmd_details: pointer to command details structure or NULL 3111 * 3112 * release common resource using the admin queue commands 3113 **/ 3114 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3115 enum i40e_aq_resources_ids resource, 3116 u8 sdp_number, 3117 struct i40e_asq_cmd_details *cmd_details) 3118 { 3119 struct i40e_aq_desc desc; 3120 struct i40e_aqc_request_resource *cmd = 3121 (struct i40e_aqc_request_resource *)&desc.params.raw; 3122 i40e_status status; 3123 3124 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3125 3126 cmd->resource_id = cpu_to_le16(resource); 3127 cmd->resource_number = cpu_to_le32(sdp_number); 3128 3129 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3130 3131 return status; 3132 } 3133 3134 /** 3135 * i40e_aq_read_nvm 3136 * @hw: pointer to the hw struct 3137 * @module_pointer: module pointer location in words from the NVM beginning 3138 * @offset: byte offset from the module beginning 3139 * @length: length of the section to be read (in bytes from the offset) 3140 * @data: command buffer (size [bytes] = length) 3141 * @last_command: tells if this is the last command in a series 3142 * @cmd_details: pointer to command details structure or NULL 3143 * 3144 * Read the NVM using the admin queue commands 3145 **/ 3146 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3147 u32 offset, u16 length, void *data, 3148 bool last_command, 3149 struct i40e_asq_cmd_details *cmd_details) 3150 { 3151 struct i40e_aq_desc desc; 3152 struct i40e_aqc_nvm_update *cmd = 3153 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3154 i40e_status status; 3155 3156 /* In offset the highest byte must be zeroed. */ 3157 if (offset & 0xFF000000) { 3158 status = I40E_ERR_PARAM; 3159 goto i40e_aq_read_nvm_exit; 3160 } 3161 3162 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3163 3164 /* If this is the last command in a series, set the proper flag. */ 3165 if (last_command) 3166 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3167 cmd->module_pointer = module_pointer; 3168 cmd->offset = cpu_to_le32(offset); 3169 cmd->length = cpu_to_le16(length); 3170 3171 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3172 if (length > I40E_AQ_LARGE_BUF) 3173 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3174 3175 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3176 3177 i40e_aq_read_nvm_exit: 3178 return status; 3179 } 3180 3181 /** 3182 * i40e_aq_erase_nvm 3183 * @hw: pointer to the hw struct 3184 * @module_pointer: module pointer location in words from the NVM beginning 3185 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3186 * @length: length of the section to be erased (expressed in 4 KB) 3187 * @last_command: tells if this is the last command in a series 3188 * @cmd_details: pointer to command details structure or NULL 3189 * 3190 * Erase the NVM sector using the admin queue commands 3191 **/ 3192 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3193 u32 offset, u16 length, bool last_command, 3194 struct i40e_asq_cmd_details *cmd_details) 3195 { 3196 struct i40e_aq_desc desc; 3197 struct i40e_aqc_nvm_update *cmd = 3198 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3199 i40e_status status; 3200 3201 /* In offset the highest byte must be zeroed. */ 3202 if (offset & 0xFF000000) { 3203 status = I40E_ERR_PARAM; 3204 goto i40e_aq_erase_nvm_exit; 3205 } 3206 3207 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3208 3209 /* If this is the last command in a series, set the proper flag. */ 3210 if (last_command) 3211 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3212 cmd->module_pointer = module_pointer; 3213 cmd->offset = cpu_to_le32(offset); 3214 cmd->length = cpu_to_le16(length); 3215 3216 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3217 3218 i40e_aq_erase_nvm_exit: 3219 return status; 3220 } 3221 3222 /** 3223 * i40e_parse_discover_capabilities 3224 * @hw: pointer to the hw struct 3225 * @buff: pointer to a buffer containing device/function capability records 3226 * @cap_count: number of capability records in the list 3227 * @list_type_opc: type of capabilities list to parse 3228 * 3229 * Parse the device/function capabilities list. 3230 **/ 3231 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3232 u32 cap_count, 3233 enum i40e_admin_queue_opc list_type_opc) 3234 { 3235 struct i40e_aqc_list_capabilities_element_resp *cap; 3236 u32 valid_functions, num_functions; 3237 u32 number, logical_id, phys_id; 3238 struct i40e_hw_capabilities *p; 3239 u16 id, ocp_cfg_word0; 3240 i40e_status status; 3241 u8 major_rev; 3242 u32 i = 0; 3243 3244 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3245 3246 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3247 p = &hw->dev_caps; 3248 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3249 p = &hw->func_caps; 3250 else 3251 return; 3252 3253 for (i = 0; i < cap_count; i++, cap++) { 3254 id = le16_to_cpu(cap->id); 3255 number = le32_to_cpu(cap->number); 3256 logical_id = le32_to_cpu(cap->logical_id); 3257 phys_id = le32_to_cpu(cap->phys_id); 3258 major_rev = cap->major_rev; 3259 3260 switch (id) { 3261 case I40E_AQ_CAP_ID_SWITCH_MODE: 3262 p->switch_mode = number; 3263 break; 3264 case I40E_AQ_CAP_ID_MNG_MODE: 3265 p->management_mode = number; 3266 if (major_rev > 1) { 3267 p->mng_protocols_over_mctp = logical_id; 3268 i40e_debug(hw, I40E_DEBUG_INIT, 3269 "HW Capability: Protocols over MCTP = %d\n", 3270 p->mng_protocols_over_mctp); 3271 } else { 3272 p->mng_protocols_over_mctp = 0; 3273 } 3274 break; 3275 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3276 p->npar_enable = number; 3277 break; 3278 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3279 p->os2bmc = number; 3280 break; 3281 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3282 p->valid_functions = number; 3283 break; 3284 case I40E_AQ_CAP_ID_SRIOV: 3285 if (number == 1) 3286 p->sr_iov_1_1 = true; 3287 break; 3288 case I40E_AQ_CAP_ID_VF: 3289 p->num_vfs = number; 3290 p->vf_base_id = logical_id; 3291 break; 3292 case I40E_AQ_CAP_ID_VMDQ: 3293 if (number == 1) 3294 p->vmdq = true; 3295 break; 3296 case I40E_AQ_CAP_ID_8021QBG: 3297 if (number == 1) 3298 p->evb_802_1_qbg = true; 3299 break; 3300 case I40E_AQ_CAP_ID_8021QBR: 3301 if (number == 1) 3302 p->evb_802_1_qbh = true; 3303 break; 3304 case I40E_AQ_CAP_ID_VSI: 3305 p->num_vsis = number; 3306 break; 3307 case I40E_AQ_CAP_ID_DCB: 3308 if (number == 1) { 3309 p->dcb = true; 3310 p->enabled_tcmap = logical_id; 3311 p->maxtc = phys_id; 3312 } 3313 break; 3314 case I40E_AQ_CAP_ID_FCOE: 3315 if (number == 1) 3316 p->fcoe = true; 3317 break; 3318 case I40E_AQ_CAP_ID_ISCSI: 3319 if (number == 1) 3320 p->iscsi = true; 3321 break; 3322 case I40E_AQ_CAP_ID_RSS: 3323 p->rss = true; 3324 p->rss_table_size = number; 3325 p->rss_table_entry_width = logical_id; 3326 break; 3327 case I40E_AQ_CAP_ID_RXQ: 3328 p->num_rx_qp = number; 3329 p->base_queue = phys_id; 3330 break; 3331 case I40E_AQ_CAP_ID_TXQ: 3332 p->num_tx_qp = number; 3333 p->base_queue = phys_id; 3334 break; 3335 case I40E_AQ_CAP_ID_MSIX: 3336 p->num_msix_vectors = number; 3337 i40e_debug(hw, I40E_DEBUG_INIT, 3338 "HW Capability: MSIX vector count = %d\n", 3339 p->num_msix_vectors); 3340 break; 3341 case I40E_AQ_CAP_ID_VF_MSIX: 3342 p->num_msix_vectors_vf = number; 3343 break; 3344 case I40E_AQ_CAP_ID_FLEX10: 3345 if (major_rev == 1) { 3346 if (number == 1) { 3347 p->flex10_enable = true; 3348 p->flex10_capable = true; 3349 } 3350 } else { 3351 /* Capability revision >= 2 */ 3352 if (number & 1) 3353 p->flex10_enable = true; 3354 if (number & 2) 3355 p->flex10_capable = true; 3356 } 3357 p->flex10_mode = logical_id; 3358 p->flex10_status = phys_id; 3359 break; 3360 case I40E_AQ_CAP_ID_CEM: 3361 if (number == 1) 3362 p->mgmt_cem = true; 3363 break; 3364 case I40E_AQ_CAP_ID_IWARP: 3365 if (number == 1) 3366 p->iwarp = true; 3367 break; 3368 case I40E_AQ_CAP_ID_LED: 3369 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3370 p->led[phys_id] = true; 3371 break; 3372 case I40E_AQ_CAP_ID_SDP: 3373 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3374 p->sdp[phys_id] = true; 3375 break; 3376 case I40E_AQ_CAP_ID_MDIO: 3377 if (number == 1) { 3378 p->mdio_port_num = phys_id; 3379 p->mdio_port_mode = logical_id; 3380 } 3381 break; 3382 case I40E_AQ_CAP_ID_1588: 3383 if (number == 1) 3384 p->ieee_1588 = true; 3385 break; 3386 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3387 p->fd = true; 3388 p->fd_filters_guaranteed = number; 3389 p->fd_filters_best_effort = logical_id; 3390 break; 3391 case I40E_AQ_CAP_ID_WSR_PROT: 3392 p->wr_csr_prot = (u64)number; 3393 p->wr_csr_prot |= (u64)logical_id << 32; 3394 break; 3395 case I40E_AQ_CAP_ID_NVM_MGMT: 3396 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3397 p->sec_rev_disabled = true; 3398 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3399 p->update_disabled = true; 3400 break; 3401 default: 3402 break; 3403 } 3404 } 3405 3406 if (p->fcoe) 3407 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3408 3409 /* Software override ensuring FCoE is disabled if npar or mfp 3410 * mode because it is not supported in these modes. 3411 */ 3412 if (p->npar_enable || p->flex10_enable) 3413 p->fcoe = false; 3414 3415 /* count the enabled ports (aka the "not disabled" ports) */ 3416 hw->num_ports = 0; 3417 for (i = 0; i < 4; i++) { 3418 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3419 u64 port_cfg = 0; 3420 3421 /* use AQ read to get the physical register offset instead 3422 * of the port relative offset 3423 */ 3424 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3425 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3426 hw->num_ports++; 3427 } 3428 3429 /* OCP cards case: if a mezz is removed the Ethernet port is at 3430 * disabled state in PRTGEN_CNF register. Additional NVM read is 3431 * needed in order to check if we are dealing with OCP card. 3432 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3433 * physical ports results in wrong partition id calculation and thus 3434 * not supporting WoL. 3435 */ 3436 if (hw->mac.type == I40E_MAC_X722) { 3437 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3438 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3439 2 * I40E_SR_OCP_CFG_WORD0, 3440 sizeof(ocp_cfg_word0), 3441 &ocp_cfg_word0, true, NULL); 3442 if (!status && 3443 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3444 hw->num_ports = 4; 3445 i40e_release_nvm(hw); 3446 } 3447 } 3448 3449 valid_functions = p->valid_functions; 3450 num_functions = 0; 3451 while (valid_functions) { 3452 if (valid_functions & 1) 3453 num_functions++; 3454 valid_functions >>= 1; 3455 } 3456 3457 /* partition id is 1-based, and functions are evenly spread 3458 * across the ports as partitions 3459 */ 3460 if (hw->num_ports != 0) { 3461 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3462 hw->num_partitions = num_functions / hw->num_ports; 3463 } 3464 3465 /* additional HW specific goodies that might 3466 * someday be HW version specific 3467 */ 3468 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3469 } 3470 3471 /** 3472 * i40e_aq_discover_capabilities 3473 * @hw: pointer to the hw struct 3474 * @buff: a virtual buffer to hold the capabilities 3475 * @buff_size: Size of the virtual buffer 3476 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3477 * @list_type_opc: capabilities type to discover - pass in the command opcode 3478 * @cmd_details: pointer to command details structure or NULL 3479 * 3480 * Get the device capabilities descriptions from the firmware 3481 **/ 3482 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3483 void *buff, u16 buff_size, u16 *data_size, 3484 enum i40e_admin_queue_opc list_type_opc, 3485 struct i40e_asq_cmd_details *cmd_details) 3486 { 3487 struct i40e_aqc_list_capabilites *cmd; 3488 struct i40e_aq_desc desc; 3489 i40e_status status = 0; 3490 3491 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3492 3493 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3494 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3495 status = I40E_ERR_PARAM; 3496 goto exit; 3497 } 3498 3499 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3500 3501 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3502 if (buff_size > I40E_AQ_LARGE_BUF) 3503 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3504 3505 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3506 *data_size = le16_to_cpu(desc.datalen); 3507 3508 if (status) 3509 goto exit; 3510 3511 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3512 list_type_opc); 3513 3514 exit: 3515 return status; 3516 } 3517 3518 /** 3519 * i40e_aq_update_nvm 3520 * @hw: pointer to the hw struct 3521 * @module_pointer: module pointer location in words from the NVM beginning 3522 * @offset: byte offset from the module beginning 3523 * @length: length of the section to be written (in bytes from the offset) 3524 * @data: command buffer (size [bytes] = length) 3525 * @last_command: tells if this is the last command in a series 3526 * @preservation_flags: Preservation mode flags 3527 * @cmd_details: pointer to command details structure or NULL 3528 * 3529 * Update the NVM using the admin queue commands 3530 **/ 3531 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3532 u32 offset, u16 length, void *data, 3533 bool last_command, u8 preservation_flags, 3534 struct i40e_asq_cmd_details *cmd_details) 3535 { 3536 struct i40e_aq_desc desc; 3537 struct i40e_aqc_nvm_update *cmd = 3538 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3539 i40e_status status; 3540 3541 /* In offset the highest byte must be zeroed. */ 3542 if (offset & 0xFF000000) { 3543 status = I40E_ERR_PARAM; 3544 goto i40e_aq_update_nvm_exit; 3545 } 3546 3547 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3548 3549 /* If this is the last command in a series, set the proper flag. */ 3550 if (last_command) 3551 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3552 if (hw->mac.type == I40E_MAC_X722) { 3553 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3554 cmd->command_flags |= 3555 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3556 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3557 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3558 cmd->command_flags |= 3559 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3560 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3561 } 3562 cmd->module_pointer = module_pointer; 3563 cmd->offset = cpu_to_le32(offset); 3564 cmd->length = cpu_to_le16(length); 3565 3566 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3567 if (length > I40E_AQ_LARGE_BUF) 3568 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3569 3570 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3571 3572 i40e_aq_update_nvm_exit: 3573 return status; 3574 } 3575 3576 /** 3577 * i40e_aq_rearrange_nvm 3578 * @hw: pointer to the hw struct 3579 * @rearrange_nvm: defines direction of rearrangement 3580 * @cmd_details: pointer to command details structure or NULL 3581 * 3582 * Rearrange NVM structure, available only for transition FW 3583 **/ 3584 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3585 u8 rearrange_nvm, 3586 struct i40e_asq_cmd_details *cmd_details) 3587 { 3588 struct i40e_aqc_nvm_update *cmd; 3589 i40e_status status; 3590 struct i40e_aq_desc desc; 3591 3592 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3593 3594 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3595 3596 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3597 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3598 3599 if (!rearrange_nvm) { 3600 status = I40E_ERR_PARAM; 3601 goto i40e_aq_rearrange_nvm_exit; 3602 } 3603 3604 cmd->command_flags |= rearrange_nvm; 3605 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3606 3607 i40e_aq_rearrange_nvm_exit: 3608 return status; 3609 } 3610 3611 /** 3612 * i40e_aq_get_lldp_mib 3613 * @hw: pointer to the hw struct 3614 * @bridge_type: type of bridge requested 3615 * @mib_type: Local, Remote or both Local and Remote MIBs 3616 * @buff: pointer to a user supplied buffer to store the MIB block 3617 * @buff_size: size of the buffer (in bytes) 3618 * @local_len : length of the returned Local LLDP MIB 3619 * @remote_len: length of the returned Remote LLDP MIB 3620 * @cmd_details: pointer to command details structure or NULL 3621 * 3622 * Requests the complete LLDP MIB (entire packet). 3623 **/ 3624 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3625 u8 mib_type, void *buff, u16 buff_size, 3626 u16 *local_len, u16 *remote_len, 3627 struct i40e_asq_cmd_details *cmd_details) 3628 { 3629 struct i40e_aq_desc desc; 3630 struct i40e_aqc_lldp_get_mib *cmd = 3631 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3632 struct i40e_aqc_lldp_get_mib *resp = 3633 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3634 i40e_status status; 3635 3636 if (buff_size == 0 || !buff) 3637 return I40E_ERR_PARAM; 3638 3639 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3640 /* Indirect Command */ 3641 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3642 3643 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3644 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3645 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3646 3647 desc.datalen = cpu_to_le16(buff_size); 3648 3649 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3650 if (buff_size > I40E_AQ_LARGE_BUF) 3651 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3652 3653 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3654 if (!status) { 3655 if (local_len != NULL) 3656 *local_len = le16_to_cpu(resp->local_len); 3657 if (remote_len != NULL) 3658 *remote_len = le16_to_cpu(resp->remote_len); 3659 } 3660 3661 return status; 3662 } 3663 3664 /** 3665 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3666 * @hw: pointer to the hw struct 3667 * @mib_type: Local, Remote or both Local and Remote MIBs 3668 * @buff: pointer to a user supplied buffer to store the MIB block 3669 * @buff_size: size of the buffer (in bytes) 3670 * @cmd_details: pointer to command details structure or NULL 3671 * 3672 * Set the LLDP MIB. 3673 **/ 3674 enum i40e_status_code 3675 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3676 u8 mib_type, void *buff, u16 buff_size, 3677 struct i40e_asq_cmd_details *cmd_details) 3678 { 3679 struct i40e_aqc_lldp_set_local_mib *cmd; 3680 enum i40e_status_code status; 3681 struct i40e_aq_desc desc; 3682 3683 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3684 if (buff_size == 0 || !buff) 3685 return I40E_ERR_PARAM; 3686 3687 i40e_fill_default_direct_cmd_desc(&desc, 3688 i40e_aqc_opc_lldp_set_local_mib); 3689 /* Indirect Command */ 3690 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3691 if (buff_size > I40E_AQ_LARGE_BUF) 3692 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3693 desc.datalen = cpu_to_le16(buff_size); 3694 3695 cmd->type = mib_type; 3696 cmd->length = cpu_to_le16(buff_size); 3697 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3698 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3699 3700 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3701 return status; 3702 } 3703 3704 /** 3705 * i40e_aq_cfg_lldp_mib_change_event 3706 * @hw: pointer to the hw struct 3707 * @enable_update: Enable or Disable event posting 3708 * @cmd_details: pointer to command details structure or NULL 3709 * 3710 * Enable or Disable posting of an event on ARQ when LLDP MIB 3711 * associated with the interface changes 3712 **/ 3713 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3714 bool enable_update, 3715 struct i40e_asq_cmd_details *cmd_details) 3716 { 3717 struct i40e_aq_desc desc; 3718 struct i40e_aqc_lldp_update_mib *cmd = 3719 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3720 i40e_status status; 3721 3722 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3723 3724 if (!enable_update) 3725 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3726 3727 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3728 3729 return status; 3730 } 3731 3732 /** 3733 * i40e_aq_restore_lldp 3734 * @hw: pointer to the hw struct 3735 * @setting: pointer to factory setting variable or NULL 3736 * @restore: True if factory settings should be restored 3737 * @cmd_details: pointer to command details structure or NULL 3738 * 3739 * Restore LLDP Agent factory settings if @restore set to True. In other case 3740 * only returns factory setting in AQ response. 3741 **/ 3742 enum i40e_status_code 3743 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3744 struct i40e_asq_cmd_details *cmd_details) 3745 { 3746 struct i40e_aq_desc desc; 3747 struct i40e_aqc_lldp_restore *cmd = 3748 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3749 i40e_status status; 3750 3751 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3752 i40e_debug(hw, I40E_DEBUG_ALL, 3753 "Restore LLDP not supported by current FW version.\n"); 3754 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3755 } 3756 3757 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3758 3759 if (restore) 3760 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3761 3762 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3763 3764 if (setting) 3765 *setting = cmd->command & 1; 3766 3767 return status; 3768 } 3769 3770 /** 3771 * i40e_aq_stop_lldp 3772 * @hw: pointer to the hw struct 3773 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3774 * @persist: True if stop of LLDP should be persistent across power cycles 3775 * @cmd_details: pointer to command details structure or NULL 3776 * 3777 * Stop or Shutdown the embedded LLDP Agent 3778 **/ 3779 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3780 bool persist, 3781 struct i40e_asq_cmd_details *cmd_details) 3782 { 3783 struct i40e_aq_desc desc; 3784 struct i40e_aqc_lldp_stop *cmd = 3785 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3786 i40e_status status; 3787 3788 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3789 3790 if (shutdown_agent) 3791 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3792 3793 if (persist) { 3794 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3795 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3796 else 3797 i40e_debug(hw, I40E_DEBUG_ALL, 3798 "Persistent Stop LLDP not supported by current FW version.\n"); 3799 } 3800 3801 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3802 3803 return status; 3804 } 3805 3806 /** 3807 * i40e_aq_start_lldp 3808 * @hw: pointer to the hw struct 3809 * @persist: True if start of LLDP should be persistent across power cycles 3810 * @cmd_details: pointer to command details structure or NULL 3811 * 3812 * Start the embedded LLDP Agent on all ports. 3813 **/ 3814 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3815 struct i40e_asq_cmd_details *cmd_details) 3816 { 3817 struct i40e_aq_desc desc; 3818 struct i40e_aqc_lldp_start *cmd = 3819 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3820 i40e_status status; 3821 3822 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3823 3824 cmd->command = I40E_AQ_LLDP_AGENT_START; 3825 3826 if (persist) { 3827 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3828 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3829 else 3830 i40e_debug(hw, I40E_DEBUG_ALL, 3831 "Persistent Start LLDP not supported by current FW version.\n"); 3832 } 3833 3834 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3835 3836 return status; 3837 } 3838 3839 /** 3840 * i40e_aq_set_dcb_parameters 3841 * @hw: pointer to the hw struct 3842 * @cmd_details: pointer to command details structure or NULL 3843 * @dcb_enable: True if DCB configuration needs to be applied 3844 * 3845 **/ 3846 enum i40e_status_code 3847 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3848 struct i40e_asq_cmd_details *cmd_details) 3849 { 3850 struct i40e_aq_desc desc; 3851 struct i40e_aqc_set_dcb_parameters *cmd = 3852 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3853 i40e_status status; 3854 3855 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3856 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3857 3858 i40e_fill_default_direct_cmd_desc(&desc, 3859 i40e_aqc_opc_set_dcb_parameters); 3860 3861 if (dcb_enable) { 3862 cmd->valid_flags = I40E_DCB_VALID; 3863 cmd->command = I40E_AQ_DCB_SET_AGENT; 3864 } 3865 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3866 3867 return status; 3868 } 3869 3870 /** 3871 * i40e_aq_get_cee_dcb_config 3872 * @hw: pointer to the hw struct 3873 * @buff: response buffer that stores CEE operational configuration 3874 * @buff_size: size of the buffer passed 3875 * @cmd_details: pointer to command details structure or NULL 3876 * 3877 * Get CEE DCBX mode operational configuration from firmware 3878 **/ 3879 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3880 void *buff, u16 buff_size, 3881 struct i40e_asq_cmd_details *cmd_details) 3882 { 3883 struct i40e_aq_desc desc; 3884 i40e_status status; 3885 3886 if (buff_size == 0 || !buff) 3887 return I40E_ERR_PARAM; 3888 3889 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3890 3891 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3892 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3893 cmd_details); 3894 3895 return status; 3896 } 3897 3898 /** 3899 * i40e_aq_add_udp_tunnel 3900 * @hw: pointer to the hw struct 3901 * @udp_port: the UDP port to add in Host byte order 3902 * @protocol_index: protocol index type 3903 * @filter_index: pointer to filter index 3904 * @cmd_details: pointer to command details structure or NULL 3905 * 3906 * Note: Firmware expects the udp_port value to be in Little Endian format, 3907 * and this function will call cpu_to_le16 to convert from Host byte order to 3908 * Little Endian order. 3909 **/ 3910 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3911 u16 udp_port, u8 protocol_index, 3912 u8 *filter_index, 3913 struct i40e_asq_cmd_details *cmd_details) 3914 { 3915 struct i40e_aq_desc desc; 3916 struct i40e_aqc_add_udp_tunnel *cmd = 3917 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3918 struct i40e_aqc_del_udp_tunnel_completion *resp = 3919 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3920 i40e_status status; 3921 3922 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3923 3924 cmd->udp_port = cpu_to_le16(udp_port); 3925 cmd->protocol_type = protocol_index; 3926 3927 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3928 3929 if (!status && filter_index) 3930 *filter_index = resp->index; 3931 3932 return status; 3933 } 3934 3935 /** 3936 * i40e_aq_del_udp_tunnel 3937 * @hw: pointer to the hw struct 3938 * @index: filter index 3939 * @cmd_details: pointer to command details structure or NULL 3940 **/ 3941 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3942 struct i40e_asq_cmd_details *cmd_details) 3943 { 3944 struct i40e_aq_desc desc; 3945 struct i40e_aqc_remove_udp_tunnel *cmd = 3946 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3947 i40e_status status; 3948 3949 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3950 3951 cmd->index = index; 3952 3953 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3954 3955 return status; 3956 } 3957 3958 /** 3959 * i40e_aq_delete_element - Delete switch element 3960 * @hw: pointer to the hw struct 3961 * @seid: the SEID to delete from the switch 3962 * @cmd_details: pointer to command details structure or NULL 3963 * 3964 * This deletes a switch element from the switch. 3965 **/ 3966 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3967 struct i40e_asq_cmd_details *cmd_details) 3968 { 3969 struct i40e_aq_desc desc; 3970 struct i40e_aqc_switch_seid *cmd = 3971 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3972 i40e_status status; 3973 3974 if (seid == 0) 3975 return I40E_ERR_PARAM; 3976 3977 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3978 3979 cmd->seid = cpu_to_le16(seid); 3980 3981 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3982 3983 return status; 3984 } 3985 3986 /** 3987 * i40e_aq_dcb_updated - DCB Updated Command 3988 * @hw: pointer to the hw struct 3989 * @cmd_details: pointer to command details structure or NULL 3990 * 3991 * EMP will return when the shared RPB settings have been 3992 * recomputed and modified. The retval field in the descriptor 3993 * will be set to 0 when RPB is modified. 3994 **/ 3995 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3996 struct i40e_asq_cmd_details *cmd_details) 3997 { 3998 struct i40e_aq_desc desc; 3999 i40e_status status; 4000 4001 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 4002 4003 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4004 4005 return status; 4006 } 4007 4008 /** 4009 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 4010 * @hw: pointer to the hw struct 4011 * @seid: seid for the physical port/switching component/vsi 4012 * @buff: Indirect buffer to hold data parameters and response 4013 * @buff_size: Indirect buffer size 4014 * @opcode: Tx scheduler AQ command opcode 4015 * @cmd_details: pointer to command details structure or NULL 4016 * 4017 * Generic command handler for Tx scheduler AQ commands 4018 **/ 4019 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 4020 void *buff, u16 buff_size, 4021 enum i40e_admin_queue_opc opcode, 4022 struct i40e_asq_cmd_details *cmd_details) 4023 { 4024 struct i40e_aq_desc desc; 4025 struct i40e_aqc_tx_sched_ind *cmd = 4026 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4027 i40e_status status; 4028 bool cmd_param_flag = false; 4029 4030 switch (opcode) { 4031 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 4032 case i40e_aqc_opc_configure_vsi_tc_bw: 4033 case i40e_aqc_opc_enable_switching_comp_ets: 4034 case i40e_aqc_opc_modify_switching_comp_ets: 4035 case i40e_aqc_opc_disable_switching_comp_ets: 4036 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 4037 case i40e_aqc_opc_configure_switching_comp_bw_config: 4038 cmd_param_flag = true; 4039 break; 4040 case i40e_aqc_opc_query_vsi_bw_config: 4041 case i40e_aqc_opc_query_vsi_ets_sla_config: 4042 case i40e_aqc_opc_query_switching_comp_ets_config: 4043 case i40e_aqc_opc_query_port_ets_config: 4044 case i40e_aqc_opc_query_switching_comp_bw_config: 4045 cmd_param_flag = false; 4046 break; 4047 default: 4048 return I40E_ERR_PARAM; 4049 } 4050 4051 i40e_fill_default_direct_cmd_desc(&desc, opcode); 4052 4053 /* Indirect command */ 4054 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4055 if (cmd_param_flag) 4056 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4057 if (buff_size > I40E_AQ_LARGE_BUF) 4058 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4059 4060 desc.datalen = cpu_to_le16(buff_size); 4061 4062 cmd->vsi_seid = cpu_to_le16(seid); 4063 4064 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4065 4066 return status; 4067 } 4068 4069 /** 4070 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 4071 * @hw: pointer to the hw struct 4072 * @seid: VSI seid 4073 * @credit: BW limit credits (0 = disabled) 4074 * @max_credit: Max BW limit credits 4075 * @cmd_details: pointer to command details structure or NULL 4076 **/ 4077 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 4078 u16 seid, u16 credit, u8 max_credit, 4079 struct i40e_asq_cmd_details *cmd_details) 4080 { 4081 struct i40e_aq_desc desc; 4082 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4083 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4084 i40e_status status; 4085 4086 i40e_fill_default_direct_cmd_desc(&desc, 4087 i40e_aqc_opc_configure_vsi_bw_limit); 4088 4089 cmd->vsi_seid = cpu_to_le16(seid); 4090 cmd->credit = cpu_to_le16(credit); 4091 cmd->max_credit = max_credit; 4092 4093 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4094 4095 return status; 4096 } 4097 4098 /** 4099 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4100 * @hw: pointer to the hw struct 4101 * @seid: VSI seid 4102 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4103 * @cmd_details: pointer to command details structure or NULL 4104 **/ 4105 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4106 u16 seid, 4107 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4108 struct i40e_asq_cmd_details *cmd_details) 4109 { 4110 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4111 i40e_aqc_opc_configure_vsi_tc_bw, 4112 cmd_details); 4113 } 4114 4115 /** 4116 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4117 * @hw: pointer to the hw struct 4118 * @seid: seid of the switching component connected to Physical Port 4119 * @ets_data: Buffer holding ETS parameters 4120 * @opcode: Tx scheduler AQ command opcode 4121 * @cmd_details: pointer to command details structure or NULL 4122 **/ 4123 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4124 u16 seid, 4125 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4126 enum i40e_admin_queue_opc opcode, 4127 struct i40e_asq_cmd_details *cmd_details) 4128 { 4129 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4130 sizeof(*ets_data), opcode, cmd_details); 4131 } 4132 4133 /** 4134 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4135 * @hw: pointer to the hw struct 4136 * @seid: seid of the switching component 4137 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4138 * @cmd_details: pointer to command details structure or NULL 4139 **/ 4140 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4141 u16 seid, 4142 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4143 struct i40e_asq_cmd_details *cmd_details) 4144 { 4145 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4146 i40e_aqc_opc_configure_switching_comp_bw_config, 4147 cmd_details); 4148 } 4149 4150 /** 4151 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4152 * @hw: pointer to the hw struct 4153 * @seid: seid of the VSI 4154 * @bw_data: Buffer to hold VSI BW configuration 4155 * @cmd_details: pointer to command details structure or NULL 4156 **/ 4157 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4158 u16 seid, 4159 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4160 struct i40e_asq_cmd_details *cmd_details) 4161 { 4162 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4163 i40e_aqc_opc_query_vsi_bw_config, 4164 cmd_details); 4165 } 4166 4167 /** 4168 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4169 * @hw: pointer to the hw struct 4170 * @seid: seid of the VSI 4171 * @bw_data: Buffer to hold VSI BW configuration per TC 4172 * @cmd_details: pointer to command details structure or NULL 4173 **/ 4174 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4175 u16 seid, 4176 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4177 struct i40e_asq_cmd_details *cmd_details) 4178 { 4179 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4180 i40e_aqc_opc_query_vsi_ets_sla_config, 4181 cmd_details); 4182 } 4183 4184 /** 4185 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4186 * @hw: pointer to the hw struct 4187 * @seid: seid of the switching component 4188 * @bw_data: Buffer to hold switching component's per TC BW config 4189 * @cmd_details: pointer to command details structure or NULL 4190 **/ 4191 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4192 u16 seid, 4193 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4194 struct i40e_asq_cmd_details *cmd_details) 4195 { 4196 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4197 i40e_aqc_opc_query_switching_comp_ets_config, 4198 cmd_details); 4199 } 4200 4201 /** 4202 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4203 * @hw: pointer to the hw struct 4204 * @seid: seid of the VSI or switching component connected to Physical Port 4205 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4206 * @cmd_details: pointer to command details structure or NULL 4207 **/ 4208 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4209 u16 seid, 4210 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4211 struct i40e_asq_cmd_details *cmd_details) 4212 { 4213 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4214 i40e_aqc_opc_query_port_ets_config, 4215 cmd_details); 4216 } 4217 4218 /** 4219 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4220 * @hw: pointer to the hw struct 4221 * @seid: seid of the switching component 4222 * @bw_data: Buffer to hold switching component's BW configuration 4223 * @cmd_details: pointer to command details structure or NULL 4224 **/ 4225 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4226 u16 seid, 4227 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4228 struct i40e_asq_cmd_details *cmd_details) 4229 { 4230 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4231 i40e_aqc_opc_query_switching_comp_bw_config, 4232 cmd_details); 4233 } 4234 4235 /** 4236 * i40e_validate_filter_settings 4237 * @hw: pointer to the hardware structure 4238 * @settings: Filter control settings 4239 * 4240 * Check and validate the filter control settings passed. 4241 * The function checks for the valid filter/context sizes being 4242 * passed for FCoE and PE. 4243 * 4244 * Returns 0 if the values passed are valid and within 4245 * range else returns an error. 4246 **/ 4247 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4248 struct i40e_filter_control_settings *settings) 4249 { 4250 u32 fcoe_cntx_size, fcoe_filt_size; 4251 u32 pe_cntx_size, pe_filt_size; 4252 u32 fcoe_fmax; 4253 u32 val; 4254 4255 /* Validate FCoE settings passed */ 4256 switch (settings->fcoe_filt_num) { 4257 case I40E_HASH_FILTER_SIZE_1K: 4258 case I40E_HASH_FILTER_SIZE_2K: 4259 case I40E_HASH_FILTER_SIZE_4K: 4260 case I40E_HASH_FILTER_SIZE_8K: 4261 case I40E_HASH_FILTER_SIZE_16K: 4262 case I40E_HASH_FILTER_SIZE_32K: 4263 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4264 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4265 break; 4266 default: 4267 return I40E_ERR_PARAM; 4268 } 4269 4270 switch (settings->fcoe_cntx_num) { 4271 case I40E_DMA_CNTX_SIZE_512: 4272 case I40E_DMA_CNTX_SIZE_1K: 4273 case I40E_DMA_CNTX_SIZE_2K: 4274 case I40E_DMA_CNTX_SIZE_4K: 4275 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4276 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4277 break; 4278 default: 4279 return I40E_ERR_PARAM; 4280 } 4281 4282 /* Validate PE settings passed */ 4283 switch (settings->pe_filt_num) { 4284 case I40E_HASH_FILTER_SIZE_1K: 4285 case I40E_HASH_FILTER_SIZE_2K: 4286 case I40E_HASH_FILTER_SIZE_4K: 4287 case I40E_HASH_FILTER_SIZE_8K: 4288 case I40E_HASH_FILTER_SIZE_16K: 4289 case I40E_HASH_FILTER_SIZE_32K: 4290 case I40E_HASH_FILTER_SIZE_64K: 4291 case I40E_HASH_FILTER_SIZE_128K: 4292 case I40E_HASH_FILTER_SIZE_256K: 4293 case I40E_HASH_FILTER_SIZE_512K: 4294 case I40E_HASH_FILTER_SIZE_1M: 4295 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4296 pe_filt_size <<= (u32)settings->pe_filt_num; 4297 break; 4298 default: 4299 return I40E_ERR_PARAM; 4300 } 4301 4302 switch (settings->pe_cntx_num) { 4303 case I40E_DMA_CNTX_SIZE_512: 4304 case I40E_DMA_CNTX_SIZE_1K: 4305 case I40E_DMA_CNTX_SIZE_2K: 4306 case I40E_DMA_CNTX_SIZE_4K: 4307 case I40E_DMA_CNTX_SIZE_8K: 4308 case I40E_DMA_CNTX_SIZE_16K: 4309 case I40E_DMA_CNTX_SIZE_32K: 4310 case I40E_DMA_CNTX_SIZE_64K: 4311 case I40E_DMA_CNTX_SIZE_128K: 4312 case I40E_DMA_CNTX_SIZE_256K: 4313 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4314 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4315 break; 4316 default: 4317 return I40E_ERR_PARAM; 4318 } 4319 4320 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4321 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4322 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4323 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4324 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4325 return I40E_ERR_INVALID_SIZE; 4326 4327 return 0; 4328 } 4329 4330 /** 4331 * i40e_set_filter_control 4332 * @hw: pointer to the hardware structure 4333 * @settings: Filter control settings 4334 * 4335 * Set the Queue Filters for PE/FCoE and enable filters required 4336 * for a single PF. It is expected that these settings are programmed 4337 * at the driver initialization time. 4338 **/ 4339 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4340 struct i40e_filter_control_settings *settings) 4341 { 4342 i40e_status ret = 0; 4343 u32 hash_lut_size = 0; 4344 u32 val; 4345 4346 if (!settings) 4347 return I40E_ERR_PARAM; 4348 4349 /* Validate the input settings */ 4350 ret = i40e_validate_filter_settings(hw, settings); 4351 if (ret) 4352 return ret; 4353 4354 /* Read the PF Queue Filter control register */ 4355 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4356 4357 /* Program required PE hash buckets for the PF */ 4358 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4359 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4360 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4361 /* Program required PE contexts for the PF */ 4362 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4363 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4364 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4365 4366 /* Program required FCoE hash buckets for the PF */ 4367 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4368 val |= ((u32)settings->fcoe_filt_num << 4369 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4370 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4371 /* Program required FCoE DDP contexts for the PF */ 4372 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4373 val |= ((u32)settings->fcoe_cntx_num << 4374 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4375 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4376 4377 /* Program Hash LUT size for the PF */ 4378 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4379 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4380 hash_lut_size = 1; 4381 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4382 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4383 4384 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4385 if (settings->enable_fdir) 4386 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4387 if (settings->enable_ethtype) 4388 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4389 if (settings->enable_macvlan) 4390 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4391 4392 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4393 4394 return 0; 4395 } 4396 4397 /** 4398 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4399 * @hw: pointer to the hw struct 4400 * @mac_addr: MAC address to use in the filter 4401 * @ethtype: Ethertype to use in the filter 4402 * @flags: Flags that needs to be applied to the filter 4403 * @vsi_seid: seid of the control VSI 4404 * @queue: VSI queue number to send the packet to 4405 * @is_add: Add control packet filter if True else remove 4406 * @stats: Structure to hold information on control filter counts 4407 * @cmd_details: pointer to command details structure or NULL 4408 * 4409 * This command will Add or Remove control packet filter for a control VSI. 4410 * In return it will update the total number of perfect filter count in 4411 * the stats member. 4412 **/ 4413 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4414 u8 *mac_addr, u16 ethtype, u16 flags, 4415 u16 vsi_seid, u16 queue, bool is_add, 4416 struct i40e_control_filter_stats *stats, 4417 struct i40e_asq_cmd_details *cmd_details) 4418 { 4419 struct i40e_aq_desc desc; 4420 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4421 (struct i40e_aqc_add_remove_control_packet_filter *) 4422 &desc.params.raw; 4423 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4424 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4425 &desc.params.raw; 4426 i40e_status status; 4427 4428 if (vsi_seid == 0) 4429 return I40E_ERR_PARAM; 4430 4431 if (is_add) { 4432 i40e_fill_default_direct_cmd_desc(&desc, 4433 i40e_aqc_opc_add_control_packet_filter); 4434 cmd->queue = cpu_to_le16(queue); 4435 } else { 4436 i40e_fill_default_direct_cmd_desc(&desc, 4437 i40e_aqc_opc_remove_control_packet_filter); 4438 } 4439 4440 if (mac_addr) 4441 ether_addr_copy(cmd->mac, mac_addr); 4442 4443 cmd->etype = cpu_to_le16(ethtype); 4444 cmd->flags = cpu_to_le16(flags); 4445 cmd->seid = cpu_to_le16(vsi_seid); 4446 4447 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4448 4449 if (!status && stats) { 4450 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4451 stats->etype_used = le16_to_cpu(resp->etype_used); 4452 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4453 stats->etype_free = le16_to_cpu(resp->etype_free); 4454 } 4455 4456 return status; 4457 } 4458 4459 /** 4460 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4461 * @hw: pointer to the hw struct 4462 * @seid: VSI seid to add ethertype filter from 4463 **/ 4464 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4465 u16 seid) 4466 { 4467 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4468 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4469 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4470 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4471 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4472 i40e_status status; 4473 4474 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4475 seid, 0, true, NULL, 4476 NULL); 4477 if (status) 4478 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4479 } 4480 4481 /** 4482 * i40e_aq_alternate_read 4483 * @hw: pointer to the hardware structure 4484 * @reg_addr0: address of first dword to be read 4485 * @reg_val0: pointer for data read from 'reg_addr0' 4486 * @reg_addr1: address of second dword to be read 4487 * @reg_val1: pointer for data read from 'reg_addr1' 4488 * 4489 * Read one or two dwords from alternate structure. Fields are indicated 4490 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4491 * is not passed then only register at 'reg_addr0' is read. 4492 * 4493 **/ 4494 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4495 u32 reg_addr0, u32 *reg_val0, 4496 u32 reg_addr1, u32 *reg_val1) 4497 { 4498 struct i40e_aq_desc desc; 4499 struct i40e_aqc_alternate_write *cmd_resp = 4500 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4501 i40e_status status; 4502 4503 if (!reg_val0) 4504 return I40E_ERR_PARAM; 4505 4506 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4507 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4508 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4509 4510 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4511 4512 if (!status) { 4513 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4514 4515 if (reg_val1) 4516 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4517 } 4518 4519 return status; 4520 } 4521 4522 /** 4523 * i40e_aq_suspend_port_tx 4524 * @hw: pointer to the hardware structure 4525 * @seid: port seid 4526 * @cmd_details: pointer to command details structure or NULL 4527 * 4528 * Suspend port's Tx traffic 4529 **/ 4530 i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4531 struct i40e_asq_cmd_details *cmd_details) 4532 { 4533 struct i40e_aqc_tx_sched_ind *cmd; 4534 struct i40e_aq_desc desc; 4535 i40e_status status; 4536 4537 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4538 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4539 cmd->vsi_seid = cpu_to_le16(seid); 4540 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4541 4542 return status; 4543 } 4544 4545 /** 4546 * i40e_aq_resume_port_tx 4547 * @hw: pointer to the hardware structure 4548 * @cmd_details: pointer to command details structure or NULL 4549 * 4550 * Resume port's Tx traffic 4551 **/ 4552 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4553 struct i40e_asq_cmd_details *cmd_details) 4554 { 4555 struct i40e_aq_desc desc; 4556 i40e_status status; 4557 4558 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4559 4560 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4561 4562 return status; 4563 } 4564 4565 /** 4566 * i40e_set_pci_config_data - store PCI bus info 4567 * @hw: pointer to hardware structure 4568 * @link_status: the link status word from PCI config space 4569 * 4570 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4571 **/ 4572 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4573 { 4574 hw->bus.type = i40e_bus_type_pci_express; 4575 4576 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4577 case PCI_EXP_LNKSTA_NLW_X1: 4578 hw->bus.width = i40e_bus_width_pcie_x1; 4579 break; 4580 case PCI_EXP_LNKSTA_NLW_X2: 4581 hw->bus.width = i40e_bus_width_pcie_x2; 4582 break; 4583 case PCI_EXP_LNKSTA_NLW_X4: 4584 hw->bus.width = i40e_bus_width_pcie_x4; 4585 break; 4586 case PCI_EXP_LNKSTA_NLW_X8: 4587 hw->bus.width = i40e_bus_width_pcie_x8; 4588 break; 4589 default: 4590 hw->bus.width = i40e_bus_width_unknown; 4591 break; 4592 } 4593 4594 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4595 case PCI_EXP_LNKSTA_CLS_2_5GB: 4596 hw->bus.speed = i40e_bus_speed_2500; 4597 break; 4598 case PCI_EXP_LNKSTA_CLS_5_0GB: 4599 hw->bus.speed = i40e_bus_speed_5000; 4600 break; 4601 case PCI_EXP_LNKSTA_CLS_8_0GB: 4602 hw->bus.speed = i40e_bus_speed_8000; 4603 break; 4604 default: 4605 hw->bus.speed = i40e_bus_speed_unknown; 4606 break; 4607 } 4608 } 4609 4610 /** 4611 * i40e_aq_debug_dump 4612 * @hw: pointer to the hardware structure 4613 * @cluster_id: specific cluster to dump 4614 * @table_id: table id within cluster 4615 * @start_index: index of line in the block to read 4616 * @buff_size: dump buffer size 4617 * @buff: dump buffer 4618 * @ret_buff_size: actual buffer size returned 4619 * @ret_next_table: next block to read 4620 * @ret_next_index: next index to read 4621 * @cmd_details: pointer to command details structure or NULL 4622 * 4623 * Dump internal FW/HW data for debug purposes. 4624 * 4625 **/ 4626 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4627 u8 table_id, u32 start_index, u16 buff_size, 4628 void *buff, u16 *ret_buff_size, 4629 u8 *ret_next_table, u32 *ret_next_index, 4630 struct i40e_asq_cmd_details *cmd_details) 4631 { 4632 struct i40e_aq_desc desc; 4633 struct i40e_aqc_debug_dump_internals *cmd = 4634 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4635 struct i40e_aqc_debug_dump_internals *resp = 4636 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4637 i40e_status status; 4638 4639 if (buff_size == 0 || !buff) 4640 return I40E_ERR_PARAM; 4641 4642 i40e_fill_default_direct_cmd_desc(&desc, 4643 i40e_aqc_opc_debug_dump_internals); 4644 /* Indirect Command */ 4645 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4646 if (buff_size > I40E_AQ_LARGE_BUF) 4647 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4648 4649 cmd->cluster_id = cluster_id; 4650 cmd->table_id = table_id; 4651 cmd->idx = cpu_to_le32(start_index); 4652 4653 desc.datalen = cpu_to_le16(buff_size); 4654 4655 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4656 if (!status) { 4657 if (ret_buff_size) 4658 *ret_buff_size = le16_to_cpu(desc.datalen); 4659 if (ret_next_table) 4660 *ret_next_table = resp->table_id; 4661 if (ret_next_index) 4662 *ret_next_index = le32_to_cpu(resp->idx); 4663 } 4664 4665 return status; 4666 } 4667 4668 /** 4669 * i40e_read_bw_from_alt_ram 4670 * @hw: pointer to the hardware structure 4671 * @max_bw: pointer for max_bw read 4672 * @min_bw: pointer for min_bw read 4673 * @min_valid: pointer for bool that is true if min_bw is a valid value 4674 * @max_valid: pointer for bool that is true if max_bw is a valid value 4675 * 4676 * Read bw from the alternate ram for the given pf 4677 **/ 4678 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4679 u32 *max_bw, u32 *min_bw, 4680 bool *min_valid, bool *max_valid) 4681 { 4682 i40e_status status; 4683 u32 max_bw_addr, min_bw_addr; 4684 4685 /* Calculate the address of the min/max bw registers */ 4686 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4687 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4688 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4689 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4690 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4691 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4692 4693 /* Read the bandwidths from alt ram */ 4694 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4695 min_bw_addr, min_bw); 4696 4697 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4698 *min_valid = true; 4699 else 4700 *min_valid = false; 4701 4702 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4703 *max_valid = true; 4704 else 4705 *max_valid = false; 4706 4707 return status; 4708 } 4709 4710 /** 4711 * i40e_aq_configure_partition_bw 4712 * @hw: pointer to the hardware structure 4713 * @bw_data: Buffer holding valid pfs and bw limits 4714 * @cmd_details: pointer to command details 4715 * 4716 * Configure partitions guaranteed/max bw 4717 **/ 4718 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4719 struct i40e_aqc_configure_partition_bw_data *bw_data, 4720 struct i40e_asq_cmd_details *cmd_details) 4721 { 4722 i40e_status status; 4723 struct i40e_aq_desc desc; 4724 u16 bwd_size = sizeof(*bw_data); 4725 4726 i40e_fill_default_direct_cmd_desc(&desc, 4727 i40e_aqc_opc_configure_partition_bw); 4728 4729 /* Indirect command */ 4730 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4731 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4732 4733 if (bwd_size > I40E_AQ_LARGE_BUF) 4734 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4735 4736 desc.datalen = cpu_to_le16(bwd_size); 4737 4738 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4739 cmd_details); 4740 4741 return status; 4742 } 4743 4744 /** 4745 * i40e_read_phy_register_clause22 4746 * @hw: pointer to the HW structure 4747 * @reg: register address in the page 4748 * @phy_addr: PHY address on MDIO interface 4749 * @value: PHY register value 4750 * 4751 * Reads specified PHY register value 4752 **/ 4753 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4754 u16 reg, u8 phy_addr, u16 *value) 4755 { 4756 i40e_status status = I40E_ERR_TIMEOUT; 4757 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4758 u32 command = 0; 4759 u16 retry = 1000; 4760 4761 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4762 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4763 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4764 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4765 (I40E_GLGEN_MSCA_MDICMD_MASK); 4766 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4767 do { 4768 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4769 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4770 status = 0; 4771 break; 4772 } 4773 udelay(10); 4774 retry--; 4775 } while (retry); 4776 4777 if (status) { 4778 i40e_debug(hw, I40E_DEBUG_PHY, 4779 "PHY: Can't write command to external PHY.\n"); 4780 } else { 4781 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4782 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4783 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4784 } 4785 4786 return status; 4787 } 4788 4789 /** 4790 * i40e_write_phy_register_clause22 4791 * @hw: pointer to the HW structure 4792 * @reg: register address in the page 4793 * @phy_addr: PHY address on MDIO interface 4794 * @value: PHY register value 4795 * 4796 * Writes specified PHY register value 4797 **/ 4798 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4799 u16 reg, u8 phy_addr, u16 value) 4800 { 4801 i40e_status status = I40E_ERR_TIMEOUT; 4802 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4803 u32 command = 0; 4804 u16 retry = 1000; 4805 4806 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4807 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4808 4809 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4810 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4811 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4812 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4813 (I40E_GLGEN_MSCA_MDICMD_MASK); 4814 4815 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4816 do { 4817 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4818 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4819 status = 0; 4820 break; 4821 } 4822 udelay(10); 4823 retry--; 4824 } while (retry); 4825 4826 return status; 4827 } 4828 4829 /** 4830 * i40e_read_phy_register_clause45 4831 * @hw: pointer to the HW structure 4832 * @page: registers page number 4833 * @reg: register address in the page 4834 * @phy_addr: PHY address on MDIO interface 4835 * @value: PHY register value 4836 * 4837 * Reads specified PHY register value 4838 **/ 4839 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4840 u8 page, u16 reg, u8 phy_addr, u16 *value) 4841 { 4842 i40e_status status = I40E_ERR_TIMEOUT; 4843 u32 command = 0; 4844 u16 retry = 1000; 4845 u8 port_num = hw->func_caps.mdio_port_num; 4846 4847 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4848 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4849 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4850 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4851 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4852 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4853 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4854 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4855 do { 4856 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4857 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4858 status = 0; 4859 break; 4860 } 4861 usleep_range(10, 20); 4862 retry--; 4863 } while (retry); 4864 4865 if (status) { 4866 i40e_debug(hw, I40E_DEBUG_PHY, 4867 "PHY: Can't write command to external PHY.\n"); 4868 goto phy_read_end; 4869 } 4870 4871 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4872 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4873 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4874 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4875 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4876 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4877 status = I40E_ERR_TIMEOUT; 4878 retry = 1000; 4879 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4880 do { 4881 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4882 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4883 status = 0; 4884 break; 4885 } 4886 usleep_range(10, 20); 4887 retry--; 4888 } while (retry); 4889 4890 if (!status) { 4891 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4892 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4893 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4894 } else { 4895 i40e_debug(hw, I40E_DEBUG_PHY, 4896 "PHY: Can't read register value from external PHY.\n"); 4897 } 4898 4899 phy_read_end: 4900 return status; 4901 } 4902 4903 /** 4904 * i40e_write_phy_register_clause45 4905 * @hw: pointer to the HW structure 4906 * @page: registers page number 4907 * @reg: register address in the page 4908 * @phy_addr: PHY address on MDIO interface 4909 * @value: PHY register value 4910 * 4911 * Writes value to specified PHY register 4912 **/ 4913 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4914 u8 page, u16 reg, u8 phy_addr, u16 value) 4915 { 4916 i40e_status status = I40E_ERR_TIMEOUT; 4917 u32 command = 0; 4918 u16 retry = 1000; 4919 u8 port_num = hw->func_caps.mdio_port_num; 4920 4921 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4922 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4923 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4924 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4925 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4926 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4927 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4928 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4929 do { 4930 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4931 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4932 status = 0; 4933 break; 4934 } 4935 usleep_range(10, 20); 4936 retry--; 4937 } while (retry); 4938 if (status) { 4939 i40e_debug(hw, I40E_DEBUG_PHY, 4940 "PHY: Can't write command to external PHY.\n"); 4941 goto phy_write_end; 4942 } 4943 4944 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4945 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4946 4947 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4948 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4949 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4950 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4951 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4952 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4953 status = I40E_ERR_TIMEOUT; 4954 retry = 1000; 4955 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4956 do { 4957 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4958 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4959 status = 0; 4960 break; 4961 } 4962 usleep_range(10, 20); 4963 retry--; 4964 } while (retry); 4965 4966 phy_write_end: 4967 return status; 4968 } 4969 4970 /** 4971 * i40e_write_phy_register 4972 * @hw: pointer to the HW structure 4973 * @page: registers page number 4974 * @reg: register address in the page 4975 * @phy_addr: PHY address on MDIO interface 4976 * @value: PHY register value 4977 * 4978 * Writes value to specified PHY register 4979 **/ 4980 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4981 u8 page, u16 reg, u8 phy_addr, u16 value) 4982 { 4983 i40e_status status; 4984 4985 switch (hw->device_id) { 4986 case I40E_DEV_ID_1G_BASE_T_X722: 4987 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4988 value); 4989 break; 4990 case I40E_DEV_ID_5G_BASE_T_BC: 4991 case I40E_DEV_ID_10G_BASE_T: 4992 case I40E_DEV_ID_10G_BASE_T4: 4993 case I40E_DEV_ID_10G_BASE_T_BC: 4994 case I40E_DEV_ID_10G_BASE_T_X722: 4995 case I40E_DEV_ID_25G_B: 4996 case I40E_DEV_ID_25G_SFP28: 4997 status = i40e_write_phy_register_clause45(hw, page, reg, 4998 phy_addr, value); 4999 break; 5000 default: 5001 status = I40E_ERR_UNKNOWN_PHY; 5002 break; 5003 } 5004 5005 return status; 5006 } 5007 5008 /** 5009 * i40e_read_phy_register 5010 * @hw: pointer to the HW structure 5011 * @page: registers page number 5012 * @reg: register address in the page 5013 * @phy_addr: PHY address on MDIO interface 5014 * @value: PHY register value 5015 * 5016 * Reads specified PHY register value 5017 **/ 5018 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 5019 u8 page, u16 reg, u8 phy_addr, u16 *value) 5020 { 5021 i40e_status status; 5022 5023 switch (hw->device_id) { 5024 case I40E_DEV_ID_1G_BASE_T_X722: 5025 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 5026 value); 5027 break; 5028 case I40E_DEV_ID_5G_BASE_T_BC: 5029 case I40E_DEV_ID_10G_BASE_T: 5030 case I40E_DEV_ID_10G_BASE_T4: 5031 case I40E_DEV_ID_10G_BASE_T_BC: 5032 case I40E_DEV_ID_10G_BASE_T_X722: 5033 case I40E_DEV_ID_25G_B: 5034 case I40E_DEV_ID_25G_SFP28: 5035 status = i40e_read_phy_register_clause45(hw, page, reg, 5036 phy_addr, value); 5037 break; 5038 default: 5039 status = I40E_ERR_UNKNOWN_PHY; 5040 break; 5041 } 5042 5043 return status; 5044 } 5045 5046 /** 5047 * i40e_get_phy_address 5048 * @hw: pointer to the HW structure 5049 * @dev_num: PHY port num that address we want 5050 * 5051 * Gets PHY address for current port 5052 **/ 5053 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 5054 { 5055 u8 port_num = hw->func_caps.mdio_port_num; 5056 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 5057 5058 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 5059 } 5060 5061 /** 5062 * i40e_blink_phy_led 5063 * @hw: pointer to the HW structure 5064 * @time: time how long led will blinks in secs 5065 * @interval: gap between LED on and off in msecs 5066 * 5067 * Blinks PHY link LED 5068 **/ 5069 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 5070 u32 time, u32 interval) 5071 { 5072 i40e_status status = 0; 5073 u32 i; 5074 u16 led_ctl; 5075 u16 gpio_led_port; 5076 u16 led_reg; 5077 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 5078 u8 phy_addr = 0; 5079 u8 port_num; 5080 5081 i = rd32(hw, I40E_PFGEN_PORTNUM); 5082 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5083 phy_addr = i40e_get_phy_address(hw, port_num); 5084 5085 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5086 led_addr++) { 5087 status = i40e_read_phy_register_clause45(hw, 5088 I40E_PHY_COM_REG_PAGE, 5089 led_addr, phy_addr, 5090 &led_reg); 5091 if (status) 5092 goto phy_blinking_end; 5093 led_ctl = led_reg; 5094 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5095 led_reg = 0; 5096 status = i40e_write_phy_register_clause45(hw, 5097 I40E_PHY_COM_REG_PAGE, 5098 led_addr, phy_addr, 5099 led_reg); 5100 if (status) 5101 goto phy_blinking_end; 5102 break; 5103 } 5104 } 5105 5106 if (time > 0 && interval > 0) { 5107 for (i = 0; i < time * 1000; i += interval) { 5108 status = i40e_read_phy_register_clause45(hw, 5109 I40E_PHY_COM_REG_PAGE, 5110 led_addr, phy_addr, &led_reg); 5111 if (status) 5112 goto restore_config; 5113 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5114 led_reg = 0; 5115 else 5116 led_reg = I40E_PHY_LED_MANUAL_ON; 5117 status = i40e_write_phy_register_clause45(hw, 5118 I40E_PHY_COM_REG_PAGE, 5119 led_addr, phy_addr, led_reg); 5120 if (status) 5121 goto restore_config; 5122 msleep(interval); 5123 } 5124 } 5125 5126 restore_config: 5127 status = i40e_write_phy_register_clause45(hw, 5128 I40E_PHY_COM_REG_PAGE, 5129 led_addr, phy_addr, led_ctl); 5130 5131 phy_blinking_end: 5132 return status; 5133 } 5134 5135 /** 5136 * i40e_led_get_reg - read LED register 5137 * @hw: pointer to the HW structure 5138 * @led_addr: LED register address 5139 * @reg_val: read register value 5140 **/ 5141 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5142 u32 *reg_val) 5143 { 5144 enum i40e_status_code status; 5145 u8 phy_addr = 0; 5146 u8 port_num; 5147 u32 i; 5148 5149 *reg_val = 0; 5150 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5151 status = 5152 i40e_aq_get_phy_register(hw, 5153 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5154 I40E_PHY_COM_REG_PAGE, true, 5155 I40E_PHY_LED_PROV_REG_1, 5156 reg_val, NULL); 5157 } else { 5158 i = rd32(hw, I40E_PFGEN_PORTNUM); 5159 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5160 phy_addr = i40e_get_phy_address(hw, port_num); 5161 status = i40e_read_phy_register_clause45(hw, 5162 I40E_PHY_COM_REG_PAGE, 5163 led_addr, phy_addr, 5164 (u16 *)reg_val); 5165 } 5166 return status; 5167 } 5168 5169 /** 5170 * i40e_led_set_reg - write LED register 5171 * @hw: pointer to the HW structure 5172 * @led_addr: LED register address 5173 * @reg_val: register value to write 5174 **/ 5175 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5176 u32 reg_val) 5177 { 5178 enum i40e_status_code status; 5179 u8 phy_addr = 0; 5180 u8 port_num; 5181 u32 i; 5182 5183 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5184 status = 5185 i40e_aq_set_phy_register(hw, 5186 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5187 I40E_PHY_COM_REG_PAGE, true, 5188 I40E_PHY_LED_PROV_REG_1, 5189 reg_val, NULL); 5190 } else { 5191 i = rd32(hw, I40E_PFGEN_PORTNUM); 5192 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5193 phy_addr = i40e_get_phy_address(hw, port_num); 5194 status = i40e_write_phy_register_clause45(hw, 5195 I40E_PHY_COM_REG_PAGE, 5196 led_addr, phy_addr, 5197 (u16)reg_val); 5198 } 5199 5200 return status; 5201 } 5202 5203 /** 5204 * i40e_led_get_phy - return current on/off mode 5205 * @hw: pointer to the hw struct 5206 * @led_addr: address of led register to use 5207 * @val: original value of register to use 5208 * 5209 **/ 5210 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5211 u16 *val) 5212 { 5213 i40e_status status = 0; 5214 u16 gpio_led_port; 5215 u8 phy_addr = 0; 5216 u16 reg_val; 5217 u16 temp_addr; 5218 u8 port_num; 5219 u32 i; 5220 u32 reg_val_aq; 5221 5222 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5223 status = 5224 i40e_aq_get_phy_register(hw, 5225 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5226 I40E_PHY_COM_REG_PAGE, true, 5227 I40E_PHY_LED_PROV_REG_1, 5228 ®_val_aq, NULL); 5229 if (status == I40E_SUCCESS) 5230 *val = (u16)reg_val_aq; 5231 return status; 5232 } 5233 temp_addr = I40E_PHY_LED_PROV_REG_1; 5234 i = rd32(hw, I40E_PFGEN_PORTNUM); 5235 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5236 phy_addr = i40e_get_phy_address(hw, port_num); 5237 5238 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5239 temp_addr++) { 5240 status = i40e_read_phy_register_clause45(hw, 5241 I40E_PHY_COM_REG_PAGE, 5242 temp_addr, phy_addr, 5243 ®_val); 5244 if (status) 5245 return status; 5246 *val = reg_val; 5247 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5248 *led_addr = temp_addr; 5249 break; 5250 } 5251 } 5252 return status; 5253 } 5254 5255 /** 5256 * i40e_led_set_phy 5257 * @hw: pointer to the HW structure 5258 * @on: true or false 5259 * @led_addr: address of led register to use 5260 * @mode: original val plus bit for set or ignore 5261 * 5262 * Set led's on or off when controlled by the PHY 5263 * 5264 **/ 5265 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5266 u16 led_addr, u32 mode) 5267 { 5268 i40e_status status = 0; 5269 u32 led_ctl = 0; 5270 u32 led_reg = 0; 5271 5272 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5273 if (status) 5274 return status; 5275 led_ctl = led_reg; 5276 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5277 led_reg = 0; 5278 status = i40e_led_set_reg(hw, led_addr, led_reg); 5279 if (status) 5280 return status; 5281 } 5282 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5283 if (status) 5284 goto restore_config; 5285 if (on) 5286 led_reg = I40E_PHY_LED_MANUAL_ON; 5287 else 5288 led_reg = 0; 5289 5290 status = i40e_led_set_reg(hw, led_addr, led_reg); 5291 if (status) 5292 goto restore_config; 5293 if (mode & I40E_PHY_LED_MODE_ORIG) { 5294 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5295 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5296 } 5297 return status; 5298 5299 restore_config: 5300 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5301 return status; 5302 } 5303 5304 /** 5305 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5306 * @hw: pointer to the hw struct 5307 * @reg_addr: register address 5308 * @reg_val: ptr to register value 5309 * @cmd_details: pointer to command details structure or NULL 5310 * 5311 * Use the firmware to read the Rx control register, 5312 * especially useful if the Rx unit is under heavy pressure 5313 **/ 5314 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5315 u32 reg_addr, u32 *reg_val, 5316 struct i40e_asq_cmd_details *cmd_details) 5317 { 5318 struct i40e_aq_desc desc; 5319 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5320 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5321 i40e_status status; 5322 5323 if (!reg_val) 5324 return I40E_ERR_PARAM; 5325 5326 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5327 5328 cmd_resp->address = cpu_to_le32(reg_addr); 5329 5330 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5331 5332 if (status == 0) 5333 *reg_val = le32_to_cpu(cmd_resp->value); 5334 5335 return status; 5336 } 5337 5338 /** 5339 * i40e_read_rx_ctl - read from an Rx control register 5340 * @hw: pointer to the hw struct 5341 * @reg_addr: register address 5342 **/ 5343 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5344 { 5345 i40e_status status = 0; 5346 bool use_register; 5347 int retry = 5; 5348 u32 val = 0; 5349 5350 use_register = (((hw->aq.api_maj_ver == 1) && 5351 (hw->aq.api_min_ver < 5)) || 5352 (hw->mac.type == I40E_MAC_X722)); 5353 if (!use_register) { 5354 do_retry: 5355 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5356 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5357 usleep_range(1000, 2000); 5358 retry--; 5359 goto do_retry; 5360 } 5361 } 5362 5363 /* if the AQ access failed, try the old-fashioned way */ 5364 if (status || use_register) 5365 val = rd32(hw, reg_addr); 5366 5367 return val; 5368 } 5369 5370 /** 5371 * i40e_aq_rx_ctl_write_register 5372 * @hw: pointer to the hw struct 5373 * @reg_addr: register address 5374 * @reg_val: register value 5375 * @cmd_details: pointer to command details structure or NULL 5376 * 5377 * Use the firmware to write to an Rx control register, 5378 * especially useful if the Rx unit is under heavy pressure 5379 **/ 5380 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5381 u32 reg_addr, u32 reg_val, 5382 struct i40e_asq_cmd_details *cmd_details) 5383 { 5384 struct i40e_aq_desc desc; 5385 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5386 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5387 i40e_status status; 5388 5389 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5390 5391 cmd->address = cpu_to_le32(reg_addr); 5392 cmd->value = cpu_to_le32(reg_val); 5393 5394 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5395 5396 return status; 5397 } 5398 5399 /** 5400 * i40e_write_rx_ctl - write to an Rx control register 5401 * @hw: pointer to the hw struct 5402 * @reg_addr: register address 5403 * @reg_val: register value 5404 **/ 5405 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5406 { 5407 i40e_status status = 0; 5408 bool use_register; 5409 int retry = 5; 5410 5411 use_register = (((hw->aq.api_maj_ver == 1) && 5412 (hw->aq.api_min_ver < 5)) || 5413 (hw->mac.type == I40E_MAC_X722)); 5414 if (!use_register) { 5415 do_retry: 5416 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5417 reg_val, NULL); 5418 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5419 usleep_range(1000, 2000); 5420 retry--; 5421 goto do_retry; 5422 } 5423 } 5424 5425 /* if the AQ access failed, try the old-fashioned way */ 5426 if (status || use_register) 5427 wr32(hw, reg_addr, reg_val); 5428 } 5429 5430 /** 5431 * i40e_mdio_if_number_selection - MDIO I/F number selection 5432 * @hw: pointer to the hw struct 5433 * @set_mdio: use MDIO I/F number specified by mdio_num 5434 * @mdio_num: MDIO I/F number 5435 * @cmd: pointer to PHY Register command structure 5436 **/ 5437 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5438 u8 mdio_num, 5439 struct i40e_aqc_phy_register_access *cmd) 5440 { 5441 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5442 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5443 cmd->cmd_flags |= 5444 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5445 ((mdio_num << 5446 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5447 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5448 else 5449 i40e_debug(hw, I40E_DEBUG_PHY, 5450 "MDIO I/F number selection not supported by current FW version.\n"); 5451 } 5452 } 5453 5454 /** 5455 * i40e_aq_set_phy_register_ext 5456 * @hw: pointer to the hw struct 5457 * @phy_select: select which phy should be accessed 5458 * @dev_addr: PHY device address 5459 * @page_change: flag to indicate if phy page should be updated 5460 * @set_mdio: use MDIO I/F number specified by mdio_num 5461 * @mdio_num: MDIO I/F number 5462 * @reg_addr: PHY register address 5463 * @reg_val: new register value 5464 * @cmd_details: pointer to command details structure or NULL 5465 * 5466 * Write the external PHY register. 5467 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5468 * may use simple wrapper i40e_aq_set_phy_register. 5469 **/ 5470 enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5471 u8 phy_select, u8 dev_addr, bool page_change, 5472 bool set_mdio, u8 mdio_num, 5473 u32 reg_addr, u32 reg_val, 5474 struct i40e_asq_cmd_details *cmd_details) 5475 { 5476 struct i40e_aq_desc desc; 5477 struct i40e_aqc_phy_register_access *cmd = 5478 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5479 i40e_status status; 5480 5481 i40e_fill_default_direct_cmd_desc(&desc, 5482 i40e_aqc_opc_set_phy_register); 5483 5484 cmd->phy_interface = phy_select; 5485 cmd->dev_address = dev_addr; 5486 cmd->reg_address = cpu_to_le32(reg_addr); 5487 cmd->reg_value = cpu_to_le32(reg_val); 5488 5489 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5490 5491 if (!page_change) 5492 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5493 5494 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5495 5496 return status; 5497 } 5498 5499 /** 5500 * i40e_aq_get_phy_register_ext 5501 * @hw: pointer to the hw struct 5502 * @phy_select: select which phy should be accessed 5503 * @dev_addr: PHY device address 5504 * @page_change: flag to indicate if phy page should be updated 5505 * @set_mdio: use MDIO I/F number specified by mdio_num 5506 * @mdio_num: MDIO I/F number 5507 * @reg_addr: PHY register address 5508 * @reg_val: read register value 5509 * @cmd_details: pointer to command details structure or NULL 5510 * 5511 * Read the external PHY register. 5512 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5513 * may use simple wrapper i40e_aq_get_phy_register. 5514 **/ 5515 enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5516 u8 phy_select, u8 dev_addr, bool page_change, 5517 bool set_mdio, u8 mdio_num, 5518 u32 reg_addr, u32 *reg_val, 5519 struct i40e_asq_cmd_details *cmd_details) 5520 { 5521 struct i40e_aq_desc desc; 5522 struct i40e_aqc_phy_register_access *cmd = 5523 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5524 i40e_status status; 5525 5526 i40e_fill_default_direct_cmd_desc(&desc, 5527 i40e_aqc_opc_get_phy_register); 5528 5529 cmd->phy_interface = phy_select; 5530 cmd->dev_address = dev_addr; 5531 cmd->reg_address = cpu_to_le32(reg_addr); 5532 5533 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5534 5535 if (!page_change) 5536 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5537 5538 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5539 if (!status) 5540 *reg_val = le32_to_cpu(cmd->reg_value); 5541 5542 return status; 5543 } 5544 5545 /** 5546 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5547 * @hw: pointer to the hw struct 5548 * @buff: command buffer (size in bytes = buff_size) 5549 * @buff_size: buffer size in bytes 5550 * @track_id: package tracking id 5551 * @error_offset: returns error offset 5552 * @error_info: returns error information 5553 * @cmd_details: pointer to command details structure or NULL 5554 **/ 5555 enum 5556 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5557 u16 buff_size, u32 track_id, 5558 u32 *error_offset, u32 *error_info, 5559 struct i40e_asq_cmd_details *cmd_details) 5560 { 5561 struct i40e_aq_desc desc; 5562 struct i40e_aqc_write_personalization_profile *cmd = 5563 (struct i40e_aqc_write_personalization_profile *) 5564 &desc.params.raw; 5565 struct i40e_aqc_write_ddp_resp *resp; 5566 i40e_status status; 5567 5568 i40e_fill_default_direct_cmd_desc(&desc, 5569 i40e_aqc_opc_write_personalization_profile); 5570 5571 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5572 if (buff_size > I40E_AQ_LARGE_BUF) 5573 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5574 5575 desc.datalen = cpu_to_le16(buff_size); 5576 5577 cmd->profile_track_id = cpu_to_le32(track_id); 5578 5579 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5580 if (!status) { 5581 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5582 if (error_offset) 5583 *error_offset = le32_to_cpu(resp->error_offset); 5584 if (error_info) 5585 *error_info = le32_to_cpu(resp->error_info); 5586 } 5587 5588 return status; 5589 } 5590 5591 /** 5592 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5593 * @hw: pointer to the hw struct 5594 * @buff: command buffer (size in bytes = buff_size) 5595 * @buff_size: buffer size in bytes 5596 * @flags: AdminQ command flags 5597 * @cmd_details: pointer to command details structure or NULL 5598 **/ 5599 enum 5600 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5601 u16 buff_size, u8 flags, 5602 struct i40e_asq_cmd_details *cmd_details) 5603 { 5604 struct i40e_aq_desc desc; 5605 struct i40e_aqc_get_applied_profiles *cmd = 5606 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5607 i40e_status status; 5608 5609 i40e_fill_default_direct_cmd_desc(&desc, 5610 i40e_aqc_opc_get_personalization_profile_list); 5611 5612 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5613 if (buff_size > I40E_AQ_LARGE_BUF) 5614 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5615 desc.datalen = cpu_to_le16(buff_size); 5616 5617 cmd->flags = flags; 5618 5619 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5620 5621 return status; 5622 } 5623 5624 /** 5625 * i40e_find_segment_in_package 5626 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5627 * @pkg_hdr: pointer to the package header to be searched 5628 * 5629 * This function searches a package file for a particular segment type. On 5630 * success it returns a pointer to the segment header, otherwise it will 5631 * return NULL. 5632 **/ 5633 struct i40e_generic_seg_header * 5634 i40e_find_segment_in_package(u32 segment_type, 5635 struct i40e_package_header *pkg_hdr) 5636 { 5637 struct i40e_generic_seg_header *segment; 5638 u32 i; 5639 5640 /* Search all package segments for the requested segment type */ 5641 for (i = 0; i < pkg_hdr->segment_count; i++) { 5642 segment = 5643 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5644 pkg_hdr->segment_offset[i]); 5645 5646 if (segment->type == segment_type) 5647 return segment; 5648 } 5649 5650 return NULL; 5651 } 5652 5653 /* Get section table in profile */ 5654 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5655 do { \ 5656 struct i40e_profile_segment *p = (profile); \ 5657 u32 count; \ 5658 u32 *nvm; \ 5659 count = p->device_table_count; \ 5660 nvm = (u32 *)&p->device_table[count]; \ 5661 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5662 } while (0) 5663 5664 /* Get section header in profile */ 5665 #define I40E_SECTION_HEADER(profile, offset) \ 5666 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5667 5668 /** 5669 * i40e_find_section_in_profile 5670 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5671 * @profile: pointer to the i40e segment header to be searched 5672 * 5673 * This function searches i40e segment for a particular section type. On 5674 * success it returns a pointer to the section header, otherwise it will 5675 * return NULL. 5676 **/ 5677 struct i40e_profile_section_header * 5678 i40e_find_section_in_profile(u32 section_type, 5679 struct i40e_profile_segment *profile) 5680 { 5681 struct i40e_profile_section_header *sec; 5682 struct i40e_section_table *sec_tbl; 5683 u32 sec_off; 5684 u32 i; 5685 5686 if (profile->header.type != SEGMENT_TYPE_I40E) 5687 return NULL; 5688 5689 I40E_SECTION_TABLE(profile, sec_tbl); 5690 5691 for (i = 0; i < sec_tbl->section_count; i++) { 5692 sec_off = sec_tbl->section_offset[i]; 5693 sec = I40E_SECTION_HEADER(profile, sec_off); 5694 if (sec->section.type == section_type) 5695 return sec; 5696 } 5697 5698 return NULL; 5699 } 5700 5701 /** 5702 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5703 * @hw: pointer to the hw struct 5704 * @aq: command buffer containing all data to execute AQ 5705 **/ 5706 static enum 5707 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5708 struct i40e_profile_aq_section *aq) 5709 { 5710 i40e_status status; 5711 struct i40e_aq_desc desc; 5712 u8 *msg = NULL; 5713 u16 msglen; 5714 5715 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5716 desc.flags |= cpu_to_le16(aq->flags); 5717 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5718 5719 msglen = aq->datalen; 5720 if (msglen) { 5721 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5722 I40E_AQ_FLAG_RD)); 5723 if (msglen > I40E_AQ_LARGE_BUF) 5724 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5725 desc.datalen = cpu_to_le16(msglen); 5726 msg = &aq->data[0]; 5727 } 5728 5729 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5730 5731 if (status) { 5732 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5733 "unable to exec DDP AQ opcode %u, error %d\n", 5734 aq->opcode, status); 5735 return status; 5736 } 5737 5738 /* copy returned desc to aq_buf */ 5739 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5740 5741 return 0; 5742 } 5743 5744 /** 5745 * i40e_validate_profile 5746 * @hw: pointer to the hardware structure 5747 * @profile: pointer to the profile segment of the package to be validated 5748 * @track_id: package tracking id 5749 * @rollback: flag if the profile is for rollback. 5750 * 5751 * Validates supported devices and profile's sections. 5752 */ 5753 static enum i40e_status_code 5754 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5755 u32 track_id, bool rollback) 5756 { 5757 struct i40e_profile_section_header *sec = NULL; 5758 i40e_status status = 0; 5759 struct i40e_section_table *sec_tbl; 5760 u32 vendor_dev_id; 5761 u32 dev_cnt; 5762 u32 sec_off; 5763 u32 i; 5764 5765 if (track_id == I40E_DDP_TRACKID_INVALID) { 5766 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5767 return I40E_NOT_SUPPORTED; 5768 } 5769 5770 dev_cnt = profile->device_table_count; 5771 for (i = 0; i < dev_cnt; i++) { 5772 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5773 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5774 hw->device_id == (vendor_dev_id & 0xFFFF)) 5775 break; 5776 } 5777 if (dev_cnt && i == dev_cnt) { 5778 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5779 "Device doesn't support DDP\n"); 5780 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5781 } 5782 5783 I40E_SECTION_TABLE(profile, sec_tbl); 5784 5785 /* Validate sections types */ 5786 for (i = 0; i < sec_tbl->section_count; i++) { 5787 sec_off = sec_tbl->section_offset[i]; 5788 sec = I40E_SECTION_HEADER(profile, sec_off); 5789 if (rollback) { 5790 if (sec->section.type == SECTION_TYPE_MMIO || 5791 sec->section.type == SECTION_TYPE_AQ || 5792 sec->section.type == SECTION_TYPE_RB_AQ) { 5793 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5794 "Not a roll-back package\n"); 5795 return I40E_NOT_SUPPORTED; 5796 } 5797 } else { 5798 if (sec->section.type == SECTION_TYPE_RB_AQ || 5799 sec->section.type == SECTION_TYPE_RB_MMIO) { 5800 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5801 "Not an original package\n"); 5802 return I40E_NOT_SUPPORTED; 5803 } 5804 } 5805 } 5806 5807 return status; 5808 } 5809 5810 /** 5811 * i40e_write_profile 5812 * @hw: pointer to the hardware structure 5813 * @profile: pointer to the profile segment of the package to be downloaded 5814 * @track_id: package tracking id 5815 * 5816 * Handles the download of a complete package. 5817 */ 5818 enum i40e_status_code 5819 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5820 u32 track_id) 5821 { 5822 i40e_status status = 0; 5823 struct i40e_section_table *sec_tbl; 5824 struct i40e_profile_section_header *sec = NULL; 5825 struct i40e_profile_aq_section *ddp_aq; 5826 u32 section_size = 0; 5827 u32 offset = 0, info = 0; 5828 u32 sec_off; 5829 u32 i; 5830 5831 status = i40e_validate_profile(hw, profile, track_id, false); 5832 if (status) 5833 return status; 5834 5835 I40E_SECTION_TABLE(profile, sec_tbl); 5836 5837 for (i = 0; i < sec_tbl->section_count; i++) { 5838 sec_off = sec_tbl->section_offset[i]; 5839 sec = I40E_SECTION_HEADER(profile, sec_off); 5840 /* Process generic admin command */ 5841 if (sec->section.type == SECTION_TYPE_AQ) { 5842 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5843 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5844 if (status) { 5845 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5846 "Failed to execute aq: section %d, opcode %u\n", 5847 i, ddp_aq->opcode); 5848 break; 5849 } 5850 sec->section.type = SECTION_TYPE_RB_AQ; 5851 } 5852 5853 /* Skip any non-mmio sections */ 5854 if (sec->section.type != SECTION_TYPE_MMIO) 5855 continue; 5856 5857 section_size = sec->section.size + 5858 sizeof(struct i40e_profile_section_header); 5859 5860 /* Write MMIO section */ 5861 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5862 track_id, &offset, &info, NULL); 5863 if (status) { 5864 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5865 "Failed to write profile: section %d, offset %d, info %d\n", 5866 i, offset, info); 5867 break; 5868 } 5869 } 5870 return status; 5871 } 5872 5873 /** 5874 * i40e_rollback_profile 5875 * @hw: pointer to the hardware structure 5876 * @profile: pointer to the profile segment of the package to be removed 5877 * @track_id: package tracking id 5878 * 5879 * Rolls back previously loaded package. 5880 */ 5881 enum i40e_status_code 5882 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5883 u32 track_id) 5884 { 5885 struct i40e_profile_section_header *sec = NULL; 5886 i40e_status status = 0; 5887 struct i40e_section_table *sec_tbl; 5888 u32 offset = 0, info = 0; 5889 u32 section_size = 0; 5890 u32 sec_off; 5891 int i; 5892 5893 status = i40e_validate_profile(hw, profile, track_id, true); 5894 if (status) 5895 return status; 5896 5897 I40E_SECTION_TABLE(profile, sec_tbl); 5898 5899 /* For rollback write sections in reverse */ 5900 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5901 sec_off = sec_tbl->section_offset[i]; 5902 sec = I40E_SECTION_HEADER(profile, sec_off); 5903 5904 /* Skip any non-rollback sections */ 5905 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5906 continue; 5907 5908 section_size = sec->section.size + 5909 sizeof(struct i40e_profile_section_header); 5910 5911 /* Write roll-back MMIO section */ 5912 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5913 track_id, &offset, &info, NULL); 5914 if (status) { 5915 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5916 "Failed to write profile: section %d, offset %d, info %d\n", 5917 i, offset, info); 5918 break; 5919 } 5920 } 5921 return status; 5922 } 5923 5924 /** 5925 * i40e_add_pinfo_to_list 5926 * @hw: pointer to the hardware structure 5927 * @profile: pointer to the profile segment of the package 5928 * @profile_info_sec: buffer for information section 5929 * @track_id: package tracking id 5930 * 5931 * Register a profile to the list of loaded profiles. 5932 */ 5933 enum i40e_status_code 5934 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5935 struct i40e_profile_segment *profile, 5936 u8 *profile_info_sec, u32 track_id) 5937 { 5938 i40e_status status = 0; 5939 struct i40e_profile_section_header *sec = NULL; 5940 struct i40e_profile_info *pinfo; 5941 u32 offset = 0, info = 0; 5942 5943 sec = (struct i40e_profile_section_header *)profile_info_sec; 5944 sec->tbl_size = 1; 5945 sec->data_end = sizeof(struct i40e_profile_section_header) + 5946 sizeof(struct i40e_profile_info); 5947 sec->section.type = SECTION_TYPE_INFO; 5948 sec->section.offset = sizeof(struct i40e_profile_section_header); 5949 sec->section.size = sizeof(struct i40e_profile_info); 5950 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5951 sec->section.offset); 5952 pinfo->track_id = track_id; 5953 pinfo->version = profile->version; 5954 pinfo->op = I40E_DDP_ADD_TRACKID; 5955 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5956 5957 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5958 track_id, &offset, &info, NULL); 5959 5960 return status; 5961 } 5962 5963 /** 5964 * i40e_aq_add_cloud_filters 5965 * @hw: pointer to the hardware structure 5966 * @seid: VSI seid to add cloud filters from 5967 * @filters: Buffer which contains the filters to be added 5968 * @filter_count: number of filters contained in the buffer 5969 * 5970 * Set the cloud filters for a given VSI. The contents of the 5971 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5972 * of the function. 5973 * 5974 **/ 5975 enum i40e_status_code 5976 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5977 struct i40e_aqc_cloud_filters_element_data *filters, 5978 u8 filter_count) 5979 { 5980 struct i40e_aq_desc desc; 5981 struct i40e_aqc_add_remove_cloud_filters *cmd = 5982 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5983 enum i40e_status_code status; 5984 u16 buff_len; 5985 5986 i40e_fill_default_direct_cmd_desc(&desc, 5987 i40e_aqc_opc_add_cloud_filters); 5988 5989 buff_len = filter_count * sizeof(*filters); 5990 desc.datalen = cpu_to_le16(buff_len); 5991 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5992 cmd->num_filters = filter_count; 5993 cmd->seid = cpu_to_le16(seid); 5994 5995 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5996 5997 return status; 5998 } 5999 6000 /** 6001 * i40e_aq_add_cloud_filters_bb 6002 * @hw: pointer to the hardware structure 6003 * @seid: VSI seid to add cloud filters from 6004 * @filters: Buffer which contains the filters in big buffer to be added 6005 * @filter_count: number of filters contained in the buffer 6006 * 6007 * Set the big buffer cloud filters for a given VSI. The contents of the 6008 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6009 * function. 6010 * 6011 **/ 6012 enum i40e_status_code 6013 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6014 struct i40e_aqc_cloud_filters_element_bb *filters, 6015 u8 filter_count) 6016 { 6017 struct i40e_aq_desc desc; 6018 struct i40e_aqc_add_remove_cloud_filters *cmd = 6019 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6020 i40e_status status; 6021 u16 buff_len; 6022 int i; 6023 6024 i40e_fill_default_direct_cmd_desc(&desc, 6025 i40e_aqc_opc_add_cloud_filters); 6026 6027 buff_len = filter_count * sizeof(*filters); 6028 desc.datalen = cpu_to_le16(buff_len); 6029 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6030 cmd->num_filters = filter_count; 6031 cmd->seid = cpu_to_le16(seid); 6032 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6033 6034 for (i = 0; i < filter_count; i++) { 6035 u16 tnl_type; 6036 u32 ti; 6037 6038 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6039 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6040 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6041 6042 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6043 * one more byte further than normally used for Tenant ID in 6044 * other tunnel types. 6045 */ 6046 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6047 ti = le32_to_cpu(filters[i].element.tenant_id); 6048 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6049 } 6050 } 6051 6052 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6053 6054 return status; 6055 } 6056 6057 /** 6058 * i40e_aq_rem_cloud_filters 6059 * @hw: pointer to the hardware structure 6060 * @seid: VSI seid to remove cloud filters from 6061 * @filters: Buffer which contains the filters to be removed 6062 * @filter_count: number of filters contained in the buffer 6063 * 6064 * Remove the cloud filters for a given VSI. The contents of the 6065 * i40e_aqc_cloud_filters_element_data are filled in by the caller 6066 * of the function. 6067 * 6068 **/ 6069 enum i40e_status_code 6070 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 6071 struct i40e_aqc_cloud_filters_element_data *filters, 6072 u8 filter_count) 6073 { 6074 struct i40e_aq_desc desc; 6075 struct i40e_aqc_add_remove_cloud_filters *cmd = 6076 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6077 enum i40e_status_code status; 6078 u16 buff_len; 6079 6080 i40e_fill_default_direct_cmd_desc(&desc, 6081 i40e_aqc_opc_remove_cloud_filters); 6082 6083 buff_len = filter_count * sizeof(*filters); 6084 desc.datalen = cpu_to_le16(buff_len); 6085 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6086 cmd->num_filters = filter_count; 6087 cmd->seid = cpu_to_le16(seid); 6088 6089 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6090 6091 return status; 6092 } 6093 6094 /** 6095 * i40e_aq_rem_cloud_filters_bb 6096 * @hw: pointer to the hardware structure 6097 * @seid: VSI seid to remove cloud filters from 6098 * @filters: Buffer which contains the filters in big buffer to be removed 6099 * @filter_count: number of filters contained in the buffer 6100 * 6101 * Remove the big buffer cloud filters for a given VSI. The contents of the 6102 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6103 * function. 6104 * 6105 **/ 6106 enum i40e_status_code 6107 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6108 struct i40e_aqc_cloud_filters_element_bb *filters, 6109 u8 filter_count) 6110 { 6111 struct i40e_aq_desc desc; 6112 struct i40e_aqc_add_remove_cloud_filters *cmd = 6113 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6114 i40e_status status; 6115 u16 buff_len; 6116 int i; 6117 6118 i40e_fill_default_direct_cmd_desc(&desc, 6119 i40e_aqc_opc_remove_cloud_filters); 6120 6121 buff_len = filter_count * sizeof(*filters); 6122 desc.datalen = cpu_to_le16(buff_len); 6123 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6124 cmd->num_filters = filter_count; 6125 cmd->seid = cpu_to_le16(seid); 6126 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6127 6128 for (i = 0; i < filter_count; i++) { 6129 u16 tnl_type; 6130 u32 ti; 6131 6132 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6133 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6134 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6135 6136 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6137 * one more byte further than normally used for Tenant ID in 6138 * other tunnel types. 6139 */ 6140 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6141 ti = le32_to_cpu(filters[i].element.tenant_id); 6142 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6143 } 6144 } 6145 6146 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6147 6148 return status; 6149 } 6150