1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_type.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 #include <linux/avf/virtchnl.h> 9 10 /** 11 * i40e_set_mac_type - Sets MAC type 12 * @hw: pointer to the HW structure 13 * 14 * This function sets the mac type of the adapter based on the 15 * vendor ID and device ID stored in the hw structure. 16 **/ 17 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 18 { 19 i40e_status status = 0; 20 21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 22 switch (hw->device_id) { 23 case I40E_DEV_ID_SFP_XL710: 24 case I40E_DEV_ID_QEMU: 25 case I40E_DEV_ID_KX_B: 26 case I40E_DEV_ID_KX_C: 27 case I40E_DEV_ID_QSFP_A: 28 case I40E_DEV_ID_QSFP_B: 29 case I40E_DEV_ID_QSFP_C: 30 case I40E_DEV_ID_10G_BASE_T: 31 case I40E_DEV_ID_10G_BASE_T4: 32 case I40E_DEV_ID_10G_BASE_T_BC: 33 case I40E_DEV_ID_10G_B: 34 case I40E_DEV_ID_10G_SFP: 35 case I40E_DEV_ID_20G_KR2: 36 case I40E_DEV_ID_20G_KR2_A: 37 case I40E_DEV_ID_25G_B: 38 case I40E_DEV_ID_25G_SFP28: 39 case I40E_DEV_ID_X710_N3000: 40 case I40E_DEV_ID_XXV710_N3000: 41 hw->mac.type = I40E_MAC_XL710; 42 break; 43 case I40E_DEV_ID_KX_X722: 44 case I40E_DEV_ID_QSFP_X722: 45 case I40E_DEV_ID_SFP_X722: 46 case I40E_DEV_ID_1G_BASE_T_X722: 47 case I40E_DEV_ID_10G_BASE_T_X722: 48 case I40E_DEV_ID_SFP_I_X722: 49 hw->mac.type = I40E_MAC_X722; 50 break; 51 default: 52 hw->mac.type = I40E_MAC_GENERIC; 53 break; 54 } 55 } else { 56 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 57 } 58 59 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 60 hw->mac.type, status); 61 return status; 62 } 63 64 /** 65 * i40e_aq_str - convert AQ err code to a string 66 * @hw: pointer to the HW structure 67 * @aq_err: the AQ error code to convert 68 **/ 69 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 70 { 71 switch (aq_err) { 72 case I40E_AQ_RC_OK: 73 return "OK"; 74 case I40E_AQ_RC_EPERM: 75 return "I40E_AQ_RC_EPERM"; 76 case I40E_AQ_RC_ENOENT: 77 return "I40E_AQ_RC_ENOENT"; 78 case I40E_AQ_RC_ESRCH: 79 return "I40E_AQ_RC_ESRCH"; 80 case I40E_AQ_RC_EINTR: 81 return "I40E_AQ_RC_EINTR"; 82 case I40E_AQ_RC_EIO: 83 return "I40E_AQ_RC_EIO"; 84 case I40E_AQ_RC_ENXIO: 85 return "I40E_AQ_RC_ENXIO"; 86 case I40E_AQ_RC_E2BIG: 87 return "I40E_AQ_RC_E2BIG"; 88 case I40E_AQ_RC_EAGAIN: 89 return "I40E_AQ_RC_EAGAIN"; 90 case I40E_AQ_RC_ENOMEM: 91 return "I40E_AQ_RC_ENOMEM"; 92 case I40E_AQ_RC_EACCES: 93 return "I40E_AQ_RC_EACCES"; 94 case I40E_AQ_RC_EFAULT: 95 return "I40E_AQ_RC_EFAULT"; 96 case I40E_AQ_RC_EBUSY: 97 return "I40E_AQ_RC_EBUSY"; 98 case I40E_AQ_RC_EEXIST: 99 return "I40E_AQ_RC_EEXIST"; 100 case I40E_AQ_RC_EINVAL: 101 return "I40E_AQ_RC_EINVAL"; 102 case I40E_AQ_RC_ENOTTY: 103 return "I40E_AQ_RC_ENOTTY"; 104 case I40E_AQ_RC_ENOSPC: 105 return "I40E_AQ_RC_ENOSPC"; 106 case I40E_AQ_RC_ENOSYS: 107 return "I40E_AQ_RC_ENOSYS"; 108 case I40E_AQ_RC_ERANGE: 109 return "I40E_AQ_RC_ERANGE"; 110 case I40E_AQ_RC_EFLUSHED: 111 return "I40E_AQ_RC_EFLUSHED"; 112 case I40E_AQ_RC_BAD_ADDR: 113 return "I40E_AQ_RC_BAD_ADDR"; 114 case I40E_AQ_RC_EMODE: 115 return "I40E_AQ_RC_EMODE"; 116 case I40E_AQ_RC_EFBIG: 117 return "I40E_AQ_RC_EFBIG"; 118 } 119 120 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 121 return hw->err_str; 122 } 123 124 /** 125 * i40e_stat_str - convert status err code to a string 126 * @hw: pointer to the HW structure 127 * @stat_err: the status error code to convert 128 **/ 129 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 130 { 131 switch (stat_err) { 132 case 0: 133 return "OK"; 134 case I40E_ERR_NVM: 135 return "I40E_ERR_NVM"; 136 case I40E_ERR_NVM_CHECKSUM: 137 return "I40E_ERR_NVM_CHECKSUM"; 138 case I40E_ERR_PHY: 139 return "I40E_ERR_PHY"; 140 case I40E_ERR_CONFIG: 141 return "I40E_ERR_CONFIG"; 142 case I40E_ERR_PARAM: 143 return "I40E_ERR_PARAM"; 144 case I40E_ERR_MAC_TYPE: 145 return "I40E_ERR_MAC_TYPE"; 146 case I40E_ERR_UNKNOWN_PHY: 147 return "I40E_ERR_UNKNOWN_PHY"; 148 case I40E_ERR_LINK_SETUP: 149 return "I40E_ERR_LINK_SETUP"; 150 case I40E_ERR_ADAPTER_STOPPED: 151 return "I40E_ERR_ADAPTER_STOPPED"; 152 case I40E_ERR_INVALID_MAC_ADDR: 153 return "I40E_ERR_INVALID_MAC_ADDR"; 154 case I40E_ERR_DEVICE_NOT_SUPPORTED: 155 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 156 case I40E_ERR_MASTER_REQUESTS_PENDING: 157 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 158 case I40E_ERR_INVALID_LINK_SETTINGS: 159 return "I40E_ERR_INVALID_LINK_SETTINGS"; 160 case I40E_ERR_AUTONEG_NOT_COMPLETE: 161 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 162 case I40E_ERR_RESET_FAILED: 163 return "I40E_ERR_RESET_FAILED"; 164 case I40E_ERR_SWFW_SYNC: 165 return "I40E_ERR_SWFW_SYNC"; 166 case I40E_ERR_NO_AVAILABLE_VSI: 167 return "I40E_ERR_NO_AVAILABLE_VSI"; 168 case I40E_ERR_NO_MEMORY: 169 return "I40E_ERR_NO_MEMORY"; 170 case I40E_ERR_BAD_PTR: 171 return "I40E_ERR_BAD_PTR"; 172 case I40E_ERR_RING_FULL: 173 return "I40E_ERR_RING_FULL"; 174 case I40E_ERR_INVALID_PD_ID: 175 return "I40E_ERR_INVALID_PD_ID"; 176 case I40E_ERR_INVALID_QP_ID: 177 return "I40E_ERR_INVALID_QP_ID"; 178 case I40E_ERR_INVALID_CQ_ID: 179 return "I40E_ERR_INVALID_CQ_ID"; 180 case I40E_ERR_INVALID_CEQ_ID: 181 return "I40E_ERR_INVALID_CEQ_ID"; 182 case I40E_ERR_INVALID_AEQ_ID: 183 return "I40E_ERR_INVALID_AEQ_ID"; 184 case I40E_ERR_INVALID_SIZE: 185 return "I40E_ERR_INVALID_SIZE"; 186 case I40E_ERR_INVALID_ARP_INDEX: 187 return "I40E_ERR_INVALID_ARP_INDEX"; 188 case I40E_ERR_INVALID_FPM_FUNC_ID: 189 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 190 case I40E_ERR_QP_INVALID_MSG_SIZE: 191 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 192 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 193 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 194 case I40E_ERR_INVALID_FRAG_COUNT: 195 return "I40E_ERR_INVALID_FRAG_COUNT"; 196 case I40E_ERR_QUEUE_EMPTY: 197 return "I40E_ERR_QUEUE_EMPTY"; 198 case I40E_ERR_INVALID_ALIGNMENT: 199 return "I40E_ERR_INVALID_ALIGNMENT"; 200 case I40E_ERR_FLUSHED_QUEUE: 201 return "I40E_ERR_FLUSHED_QUEUE"; 202 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 203 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 204 case I40E_ERR_INVALID_IMM_DATA_SIZE: 205 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 206 case I40E_ERR_TIMEOUT: 207 return "I40E_ERR_TIMEOUT"; 208 case I40E_ERR_OPCODE_MISMATCH: 209 return "I40E_ERR_OPCODE_MISMATCH"; 210 case I40E_ERR_CQP_COMPL_ERROR: 211 return "I40E_ERR_CQP_COMPL_ERROR"; 212 case I40E_ERR_INVALID_VF_ID: 213 return "I40E_ERR_INVALID_VF_ID"; 214 case I40E_ERR_INVALID_HMCFN_ID: 215 return "I40E_ERR_INVALID_HMCFN_ID"; 216 case I40E_ERR_BACKING_PAGE_ERROR: 217 return "I40E_ERR_BACKING_PAGE_ERROR"; 218 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 219 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 220 case I40E_ERR_INVALID_PBLE_INDEX: 221 return "I40E_ERR_INVALID_PBLE_INDEX"; 222 case I40E_ERR_INVALID_SD_INDEX: 223 return "I40E_ERR_INVALID_SD_INDEX"; 224 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 225 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 226 case I40E_ERR_INVALID_SD_TYPE: 227 return "I40E_ERR_INVALID_SD_TYPE"; 228 case I40E_ERR_MEMCPY_FAILED: 229 return "I40E_ERR_MEMCPY_FAILED"; 230 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 231 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 232 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 233 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 234 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 235 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 236 case I40E_ERR_SRQ_ENABLED: 237 return "I40E_ERR_SRQ_ENABLED"; 238 case I40E_ERR_ADMIN_QUEUE_ERROR: 239 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 240 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 241 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 242 case I40E_ERR_BUF_TOO_SHORT: 243 return "I40E_ERR_BUF_TOO_SHORT"; 244 case I40E_ERR_ADMIN_QUEUE_FULL: 245 return "I40E_ERR_ADMIN_QUEUE_FULL"; 246 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 247 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 248 case I40E_ERR_BAD_IWARP_CQE: 249 return "I40E_ERR_BAD_IWARP_CQE"; 250 case I40E_ERR_NVM_BLANK_MODE: 251 return "I40E_ERR_NVM_BLANK_MODE"; 252 case I40E_ERR_NOT_IMPLEMENTED: 253 return "I40E_ERR_NOT_IMPLEMENTED"; 254 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 255 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 256 case I40E_ERR_DIAG_TEST_FAILED: 257 return "I40E_ERR_DIAG_TEST_FAILED"; 258 case I40E_ERR_NOT_READY: 259 return "I40E_ERR_NOT_READY"; 260 case I40E_NOT_SUPPORTED: 261 return "I40E_NOT_SUPPORTED"; 262 case I40E_ERR_FIRMWARE_API_VERSION: 263 return "I40E_ERR_FIRMWARE_API_VERSION"; 264 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 265 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 266 } 267 268 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 269 return hw->err_str; 270 } 271 272 /** 273 * i40e_debug_aq 274 * @hw: debug mask related to admin queue 275 * @mask: debug mask 276 * @desc: pointer to admin queue descriptor 277 * @buffer: pointer to command buffer 278 * @buf_len: max length of buffer 279 * 280 * Dumps debug log about adminq command with descriptor contents. 281 **/ 282 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 283 void *buffer, u16 buf_len) 284 { 285 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 286 u32 effective_mask = hw->debug_mask & mask; 287 char prefix[27]; 288 u16 len; 289 u8 *buf = (u8 *)buffer; 290 291 if (!effective_mask || !desc) 292 return; 293 294 len = le16_to_cpu(aq_desc->datalen); 295 296 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 297 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 298 le16_to_cpu(aq_desc->opcode), 299 le16_to_cpu(aq_desc->flags), 300 le16_to_cpu(aq_desc->datalen), 301 le16_to_cpu(aq_desc->retval)); 302 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 303 "\tcookie (h,l) 0x%08X 0x%08X\n", 304 le32_to_cpu(aq_desc->cookie_high), 305 le32_to_cpu(aq_desc->cookie_low)); 306 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 307 "\tparam (0,1) 0x%08X 0x%08X\n", 308 le32_to_cpu(aq_desc->params.internal.param0), 309 le32_to_cpu(aq_desc->params.internal.param1)); 310 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 311 "\taddr (h,l) 0x%08X 0x%08X\n", 312 le32_to_cpu(aq_desc->params.external.addr_high), 313 le32_to_cpu(aq_desc->params.external.addr_low)); 314 315 if (buffer && buf_len != 0 && len != 0 && 316 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 317 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 318 if (buf_len < len) 319 len = buf_len; 320 321 snprintf(prefix, sizeof(prefix), 322 "i40e %02x:%02x.%x: \t0x", 323 hw->bus.bus_id, 324 hw->bus.device, 325 hw->bus.func); 326 327 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 328 16, 1, buf, len, false); 329 } 330 } 331 332 /** 333 * i40e_check_asq_alive 334 * @hw: pointer to the hw struct 335 * 336 * Returns true if Queue is enabled else false. 337 **/ 338 bool i40e_check_asq_alive(struct i40e_hw *hw) 339 { 340 if (hw->aq.asq.len) 341 return !!(rd32(hw, hw->aq.asq.len) & 342 I40E_PF_ATQLEN_ATQENABLE_MASK); 343 else 344 return false; 345 } 346 347 /** 348 * i40e_aq_queue_shutdown 349 * @hw: pointer to the hw struct 350 * @unloading: is the driver unloading itself 351 * 352 * Tell the Firmware that we're shutting down the AdminQ and whether 353 * or not the driver is unloading as well. 354 **/ 355 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 356 bool unloading) 357 { 358 struct i40e_aq_desc desc; 359 struct i40e_aqc_queue_shutdown *cmd = 360 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 361 i40e_status status; 362 363 i40e_fill_default_direct_cmd_desc(&desc, 364 i40e_aqc_opc_queue_shutdown); 365 366 if (unloading) 367 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 368 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 369 370 return status; 371 } 372 373 /** 374 * i40e_aq_get_set_rss_lut 375 * @hw: pointer to the hardware structure 376 * @vsi_id: vsi fw index 377 * @pf_lut: for PF table set true, for VSI table set false 378 * @lut: pointer to the lut buffer provided by the caller 379 * @lut_size: size of the lut buffer 380 * @set: set true to set the table, false to get the table 381 * 382 * Internal function to get or set RSS look up table 383 **/ 384 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 385 u16 vsi_id, bool pf_lut, 386 u8 *lut, u16 lut_size, 387 bool set) 388 { 389 i40e_status status; 390 struct i40e_aq_desc desc; 391 struct i40e_aqc_get_set_rss_lut *cmd_resp = 392 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 393 394 if (set) 395 i40e_fill_default_direct_cmd_desc(&desc, 396 i40e_aqc_opc_set_rss_lut); 397 else 398 i40e_fill_default_direct_cmd_desc(&desc, 399 i40e_aqc_opc_get_rss_lut); 400 401 /* Indirect command */ 402 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 403 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 404 405 cmd_resp->vsi_id = 406 cpu_to_le16((u16)((vsi_id << 407 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 408 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 409 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 410 411 if (pf_lut) 412 cmd_resp->flags |= cpu_to_le16((u16) 413 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 414 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 415 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 416 else 417 cmd_resp->flags |= cpu_to_le16((u16) 418 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 419 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 420 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 421 422 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 423 424 return status; 425 } 426 427 /** 428 * i40e_aq_get_rss_lut 429 * @hw: pointer to the hardware structure 430 * @vsi_id: vsi fw index 431 * @pf_lut: for PF table set true, for VSI table set false 432 * @lut: pointer to the lut buffer provided by the caller 433 * @lut_size: size of the lut buffer 434 * 435 * get the RSS lookup table, PF or VSI type 436 **/ 437 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 438 bool pf_lut, u8 *lut, u16 lut_size) 439 { 440 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 441 false); 442 } 443 444 /** 445 * i40e_aq_set_rss_lut 446 * @hw: pointer to the hardware structure 447 * @vsi_id: vsi fw index 448 * @pf_lut: for PF table set true, for VSI table set false 449 * @lut: pointer to the lut buffer provided by the caller 450 * @lut_size: size of the lut buffer 451 * 452 * set the RSS lookup table, PF or VSI type 453 **/ 454 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 455 bool pf_lut, u8 *lut, u16 lut_size) 456 { 457 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 458 } 459 460 /** 461 * i40e_aq_get_set_rss_key 462 * @hw: pointer to the hw struct 463 * @vsi_id: vsi fw index 464 * @key: pointer to key info struct 465 * @set: set true to set the key, false to get the key 466 * 467 * get the RSS key per VSI 468 **/ 469 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 470 u16 vsi_id, 471 struct i40e_aqc_get_set_rss_key_data *key, 472 bool set) 473 { 474 i40e_status status; 475 struct i40e_aq_desc desc; 476 struct i40e_aqc_get_set_rss_key *cmd_resp = 477 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 478 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 479 480 if (set) 481 i40e_fill_default_direct_cmd_desc(&desc, 482 i40e_aqc_opc_set_rss_key); 483 else 484 i40e_fill_default_direct_cmd_desc(&desc, 485 i40e_aqc_opc_get_rss_key); 486 487 /* Indirect command */ 488 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 489 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 490 491 cmd_resp->vsi_id = 492 cpu_to_le16((u16)((vsi_id << 493 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 494 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 495 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 496 497 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 498 499 return status; 500 } 501 502 /** 503 * i40e_aq_get_rss_key 504 * @hw: pointer to the hw struct 505 * @vsi_id: vsi fw index 506 * @key: pointer to key info struct 507 * 508 **/ 509 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 510 u16 vsi_id, 511 struct i40e_aqc_get_set_rss_key_data *key) 512 { 513 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 514 } 515 516 /** 517 * i40e_aq_set_rss_key 518 * @hw: pointer to the hw struct 519 * @vsi_id: vsi fw index 520 * @key: pointer to key info struct 521 * 522 * set the RSS key per VSI 523 **/ 524 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 525 u16 vsi_id, 526 struct i40e_aqc_get_set_rss_key_data *key) 527 { 528 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 529 } 530 531 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 532 * hardware to a bit-field that can be used by SW to more easily determine the 533 * packet type. 534 * 535 * Macros are used to shorten the table lines and make this table human 536 * readable. 537 * 538 * We store the PTYPE in the top byte of the bit field - this is just so that 539 * we can check that the table doesn't have a row missing, as the index into 540 * the table should be the PTYPE. 541 * 542 * Typical work flow: 543 * 544 * IF NOT i40e_ptype_lookup[ptype].known 545 * THEN 546 * Packet is unknown 547 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 548 * Use the rest of the fields to look at the tunnels, inner protocols, etc 549 * ELSE 550 * Use the enum i40e_rx_l2_ptype to decode the packet type 551 * ENDIF 552 */ 553 554 /* macro to make the table lines short */ 555 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 556 { PTYPE, \ 557 1, \ 558 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 559 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 560 I40E_RX_PTYPE_##OUTER_FRAG, \ 561 I40E_RX_PTYPE_TUNNEL_##T, \ 562 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 563 I40E_RX_PTYPE_##TEF, \ 564 I40E_RX_PTYPE_INNER_PROT_##I, \ 565 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 566 567 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 568 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 569 570 /* shorter macros makes the table fit but are terse */ 571 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 572 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 573 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 574 575 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 576 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 577 /* L2 Packet types */ 578 I40E_PTT_UNUSED_ENTRY(0), 579 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 580 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 581 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 582 I40E_PTT_UNUSED_ENTRY(4), 583 I40E_PTT_UNUSED_ENTRY(5), 584 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 585 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 586 I40E_PTT_UNUSED_ENTRY(8), 587 I40E_PTT_UNUSED_ENTRY(9), 588 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 589 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 590 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 591 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 599 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 600 601 /* Non Tunneled IPv4 */ 602 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 603 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 604 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 605 I40E_PTT_UNUSED_ENTRY(25), 606 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 607 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 608 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 609 610 /* IPv4 --> IPv4 */ 611 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 612 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 613 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 614 I40E_PTT_UNUSED_ENTRY(32), 615 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 616 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 617 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 618 619 /* IPv4 --> IPv6 */ 620 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 621 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 622 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 623 I40E_PTT_UNUSED_ENTRY(39), 624 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 625 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 626 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 627 628 /* IPv4 --> GRE/NAT */ 629 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 630 631 /* IPv4 --> GRE/NAT --> IPv4 */ 632 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 633 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 634 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 635 I40E_PTT_UNUSED_ENTRY(47), 636 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 637 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 638 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 639 640 /* IPv4 --> GRE/NAT --> IPv6 */ 641 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 642 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 643 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 644 I40E_PTT_UNUSED_ENTRY(54), 645 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 646 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 647 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 648 649 /* IPv4 --> GRE/NAT --> MAC */ 650 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 651 652 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 653 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 654 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 655 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 656 I40E_PTT_UNUSED_ENTRY(62), 657 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 658 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 659 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 660 661 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 662 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 663 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 664 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 665 I40E_PTT_UNUSED_ENTRY(69), 666 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 667 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 668 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 669 670 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 671 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 672 673 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 674 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 675 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 676 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 677 I40E_PTT_UNUSED_ENTRY(77), 678 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 679 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 680 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 681 682 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 683 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 684 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 685 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 686 I40E_PTT_UNUSED_ENTRY(84), 687 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 688 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 689 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 690 691 /* Non Tunneled IPv6 */ 692 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 693 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 694 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 695 I40E_PTT_UNUSED_ENTRY(91), 696 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 697 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 698 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 699 700 /* IPv6 --> IPv4 */ 701 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 702 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 703 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 704 I40E_PTT_UNUSED_ENTRY(98), 705 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 706 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 707 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 708 709 /* IPv6 --> IPv6 */ 710 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 711 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 712 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 713 I40E_PTT_UNUSED_ENTRY(105), 714 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 715 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 716 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 717 718 /* IPv6 --> GRE/NAT */ 719 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 720 721 /* IPv6 --> GRE/NAT -> IPv4 */ 722 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 723 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 724 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 725 I40E_PTT_UNUSED_ENTRY(113), 726 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 727 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 728 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 729 730 /* IPv6 --> GRE/NAT -> IPv6 */ 731 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 732 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 733 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 734 I40E_PTT_UNUSED_ENTRY(120), 735 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 736 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 737 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 738 739 /* IPv6 --> GRE/NAT -> MAC */ 740 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 741 742 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 743 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 744 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 745 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 746 I40E_PTT_UNUSED_ENTRY(128), 747 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 748 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 749 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 750 751 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 752 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 753 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 754 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 755 I40E_PTT_UNUSED_ENTRY(135), 756 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 757 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 758 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 759 760 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 761 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 762 763 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 764 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 765 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 766 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 767 I40E_PTT_UNUSED_ENTRY(143), 768 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 769 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 770 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 771 772 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 773 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 774 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 775 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 776 I40E_PTT_UNUSED_ENTRY(150), 777 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 778 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 779 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 780 781 /* unused entries */ 782 I40E_PTT_UNUSED_ENTRY(154), 783 I40E_PTT_UNUSED_ENTRY(155), 784 I40E_PTT_UNUSED_ENTRY(156), 785 I40E_PTT_UNUSED_ENTRY(157), 786 I40E_PTT_UNUSED_ENTRY(158), 787 I40E_PTT_UNUSED_ENTRY(159), 788 789 I40E_PTT_UNUSED_ENTRY(160), 790 I40E_PTT_UNUSED_ENTRY(161), 791 I40E_PTT_UNUSED_ENTRY(162), 792 I40E_PTT_UNUSED_ENTRY(163), 793 I40E_PTT_UNUSED_ENTRY(164), 794 I40E_PTT_UNUSED_ENTRY(165), 795 I40E_PTT_UNUSED_ENTRY(166), 796 I40E_PTT_UNUSED_ENTRY(167), 797 I40E_PTT_UNUSED_ENTRY(168), 798 I40E_PTT_UNUSED_ENTRY(169), 799 800 I40E_PTT_UNUSED_ENTRY(170), 801 I40E_PTT_UNUSED_ENTRY(171), 802 I40E_PTT_UNUSED_ENTRY(172), 803 I40E_PTT_UNUSED_ENTRY(173), 804 I40E_PTT_UNUSED_ENTRY(174), 805 I40E_PTT_UNUSED_ENTRY(175), 806 I40E_PTT_UNUSED_ENTRY(176), 807 I40E_PTT_UNUSED_ENTRY(177), 808 I40E_PTT_UNUSED_ENTRY(178), 809 I40E_PTT_UNUSED_ENTRY(179), 810 811 I40E_PTT_UNUSED_ENTRY(180), 812 I40E_PTT_UNUSED_ENTRY(181), 813 I40E_PTT_UNUSED_ENTRY(182), 814 I40E_PTT_UNUSED_ENTRY(183), 815 I40E_PTT_UNUSED_ENTRY(184), 816 I40E_PTT_UNUSED_ENTRY(185), 817 I40E_PTT_UNUSED_ENTRY(186), 818 I40E_PTT_UNUSED_ENTRY(187), 819 I40E_PTT_UNUSED_ENTRY(188), 820 I40E_PTT_UNUSED_ENTRY(189), 821 822 I40E_PTT_UNUSED_ENTRY(190), 823 I40E_PTT_UNUSED_ENTRY(191), 824 I40E_PTT_UNUSED_ENTRY(192), 825 I40E_PTT_UNUSED_ENTRY(193), 826 I40E_PTT_UNUSED_ENTRY(194), 827 I40E_PTT_UNUSED_ENTRY(195), 828 I40E_PTT_UNUSED_ENTRY(196), 829 I40E_PTT_UNUSED_ENTRY(197), 830 I40E_PTT_UNUSED_ENTRY(198), 831 I40E_PTT_UNUSED_ENTRY(199), 832 833 I40E_PTT_UNUSED_ENTRY(200), 834 I40E_PTT_UNUSED_ENTRY(201), 835 I40E_PTT_UNUSED_ENTRY(202), 836 I40E_PTT_UNUSED_ENTRY(203), 837 I40E_PTT_UNUSED_ENTRY(204), 838 I40E_PTT_UNUSED_ENTRY(205), 839 I40E_PTT_UNUSED_ENTRY(206), 840 I40E_PTT_UNUSED_ENTRY(207), 841 I40E_PTT_UNUSED_ENTRY(208), 842 I40E_PTT_UNUSED_ENTRY(209), 843 844 I40E_PTT_UNUSED_ENTRY(210), 845 I40E_PTT_UNUSED_ENTRY(211), 846 I40E_PTT_UNUSED_ENTRY(212), 847 I40E_PTT_UNUSED_ENTRY(213), 848 I40E_PTT_UNUSED_ENTRY(214), 849 I40E_PTT_UNUSED_ENTRY(215), 850 I40E_PTT_UNUSED_ENTRY(216), 851 I40E_PTT_UNUSED_ENTRY(217), 852 I40E_PTT_UNUSED_ENTRY(218), 853 I40E_PTT_UNUSED_ENTRY(219), 854 855 I40E_PTT_UNUSED_ENTRY(220), 856 I40E_PTT_UNUSED_ENTRY(221), 857 I40E_PTT_UNUSED_ENTRY(222), 858 I40E_PTT_UNUSED_ENTRY(223), 859 I40E_PTT_UNUSED_ENTRY(224), 860 I40E_PTT_UNUSED_ENTRY(225), 861 I40E_PTT_UNUSED_ENTRY(226), 862 I40E_PTT_UNUSED_ENTRY(227), 863 I40E_PTT_UNUSED_ENTRY(228), 864 I40E_PTT_UNUSED_ENTRY(229), 865 866 I40E_PTT_UNUSED_ENTRY(230), 867 I40E_PTT_UNUSED_ENTRY(231), 868 I40E_PTT_UNUSED_ENTRY(232), 869 I40E_PTT_UNUSED_ENTRY(233), 870 I40E_PTT_UNUSED_ENTRY(234), 871 I40E_PTT_UNUSED_ENTRY(235), 872 I40E_PTT_UNUSED_ENTRY(236), 873 I40E_PTT_UNUSED_ENTRY(237), 874 I40E_PTT_UNUSED_ENTRY(238), 875 I40E_PTT_UNUSED_ENTRY(239), 876 877 I40E_PTT_UNUSED_ENTRY(240), 878 I40E_PTT_UNUSED_ENTRY(241), 879 I40E_PTT_UNUSED_ENTRY(242), 880 I40E_PTT_UNUSED_ENTRY(243), 881 I40E_PTT_UNUSED_ENTRY(244), 882 I40E_PTT_UNUSED_ENTRY(245), 883 I40E_PTT_UNUSED_ENTRY(246), 884 I40E_PTT_UNUSED_ENTRY(247), 885 I40E_PTT_UNUSED_ENTRY(248), 886 I40E_PTT_UNUSED_ENTRY(249), 887 888 I40E_PTT_UNUSED_ENTRY(250), 889 I40E_PTT_UNUSED_ENTRY(251), 890 I40E_PTT_UNUSED_ENTRY(252), 891 I40E_PTT_UNUSED_ENTRY(253), 892 I40E_PTT_UNUSED_ENTRY(254), 893 I40E_PTT_UNUSED_ENTRY(255) 894 }; 895 896 /** 897 * i40e_init_shared_code - Initialize the shared code 898 * @hw: pointer to hardware structure 899 * 900 * This assigns the MAC type and PHY code and inits the NVM. 901 * Does not touch the hardware. This function must be called prior to any 902 * other function in the shared code. The i40e_hw structure should be 903 * memset to 0 prior to calling this function. The following fields in 904 * hw structure should be filled in prior to calling this function: 905 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 906 * subsystem_vendor_id, and revision_id 907 **/ 908 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 909 { 910 i40e_status status = 0; 911 u32 port, ari, func_rid; 912 913 i40e_set_mac_type(hw); 914 915 switch (hw->mac.type) { 916 case I40E_MAC_XL710: 917 case I40E_MAC_X722: 918 break; 919 default: 920 return I40E_ERR_DEVICE_NOT_SUPPORTED; 921 } 922 923 hw->phy.get_link_info = true; 924 925 /* Determine port number and PF number*/ 926 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 927 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 928 hw->port = (u8)port; 929 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 930 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 931 func_rid = rd32(hw, I40E_PF_FUNC_RID); 932 if (ari) 933 hw->pf_id = (u8)(func_rid & 0xff); 934 else 935 hw->pf_id = (u8)(func_rid & 0x7); 936 937 status = i40e_init_nvm(hw); 938 return status; 939 } 940 941 /** 942 * i40e_aq_mac_address_read - Retrieve the MAC addresses 943 * @hw: pointer to the hw struct 944 * @flags: a return indicator of what addresses were added to the addr store 945 * @addrs: the requestor's mac addr store 946 * @cmd_details: pointer to command details structure or NULL 947 **/ 948 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 949 u16 *flags, 950 struct i40e_aqc_mac_address_read_data *addrs, 951 struct i40e_asq_cmd_details *cmd_details) 952 { 953 struct i40e_aq_desc desc; 954 struct i40e_aqc_mac_address_read *cmd_data = 955 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 956 i40e_status status; 957 958 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 959 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 960 961 status = i40e_asq_send_command(hw, &desc, addrs, 962 sizeof(*addrs), cmd_details); 963 *flags = le16_to_cpu(cmd_data->command_flags); 964 965 return status; 966 } 967 968 /** 969 * i40e_aq_mac_address_write - Change the MAC addresses 970 * @hw: pointer to the hw struct 971 * @flags: indicates which MAC to be written 972 * @mac_addr: address to write 973 * @cmd_details: pointer to command details structure or NULL 974 **/ 975 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 976 u16 flags, u8 *mac_addr, 977 struct i40e_asq_cmd_details *cmd_details) 978 { 979 struct i40e_aq_desc desc; 980 struct i40e_aqc_mac_address_write *cmd_data = 981 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 982 i40e_status status; 983 984 i40e_fill_default_direct_cmd_desc(&desc, 985 i40e_aqc_opc_mac_address_write); 986 cmd_data->command_flags = cpu_to_le16(flags); 987 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 988 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 989 ((u32)mac_addr[3] << 16) | 990 ((u32)mac_addr[4] << 8) | 991 mac_addr[5]); 992 993 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 994 995 return status; 996 } 997 998 /** 999 * i40e_get_mac_addr - get MAC address 1000 * @hw: pointer to the HW structure 1001 * @mac_addr: pointer to MAC address 1002 * 1003 * Reads the adapter's MAC address from register 1004 **/ 1005 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1006 { 1007 struct i40e_aqc_mac_address_read_data addrs; 1008 i40e_status status; 1009 u16 flags = 0; 1010 1011 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1012 1013 if (flags & I40E_AQC_LAN_ADDR_VALID) 1014 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1015 1016 return status; 1017 } 1018 1019 /** 1020 * i40e_get_port_mac_addr - get Port MAC address 1021 * @hw: pointer to the HW structure 1022 * @mac_addr: pointer to Port MAC address 1023 * 1024 * Reads the adapter's Port MAC address 1025 **/ 1026 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1027 { 1028 struct i40e_aqc_mac_address_read_data addrs; 1029 i40e_status status; 1030 u16 flags = 0; 1031 1032 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1033 if (status) 1034 return status; 1035 1036 if (flags & I40E_AQC_PORT_ADDR_VALID) 1037 ether_addr_copy(mac_addr, addrs.port_mac); 1038 else 1039 status = I40E_ERR_INVALID_MAC_ADDR; 1040 1041 return status; 1042 } 1043 1044 /** 1045 * i40e_pre_tx_queue_cfg - pre tx queue configure 1046 * @hw: pointer to the HW structure 1047 * @queue: target PF queue index 1048 * @enable: state change request 1049 * 1050 * Handles hw requirement to indicate intention to enable 1051 * or disable target queue. 1052 **/ 1053 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1054 { 1055 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1056 u32 reg_block = 0; 1057 u32 reg_val; 1058 1059 if (abs_queue_idx >= 128) { 1060 reg_block = abs_queue_idx / 128; 1061 abs_queue_idx %= 128; 1062 } 1063 1064 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1065 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1066 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1067 1068 if (enable) 1069 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1070 else 1071 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1072 1073 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1074 } 1075 1076 /** 1077 * i40e_read_pba_string - Reads part number string from EEPROM 1078 * @hw: pointer to hardware structure 1079 * @pba_num: stores the part number string from the EEPROM 1080 * @pba_num_size: part number string buffer length 1081 * 1082 * Reads the part number string from the EEPROM. 1083 **/ 1084 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1085 u32 pba_num_size) 1086 { 1087 i40e_status status = 0; 1088 u16 pba_word = 0; 1089 u16 pba_size = 0; 1090 u16 pba_ptr = 0; 1091 u16 i = 0; 1092 1093 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1094 if (status || (pba_word != 0xFAFA)) { 1095 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1096 return status; 1097 } 1098 1099 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1100 if (status) { 1101 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1102 return status; 1103 } 1104 1105 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1106 if (status) { 1107 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1108 return status; 1109 } 1110 1111 /* Subtract one to get PBA word count (PBA Size word is included in 1112 * total size) 1113 */ 1114 pba_size--; 1115 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1116 hw_dbg(hw, "Buffer too small for PBA data.\n"); 1117 return I40E_ERR_PARAM; 1118 } 1119 1120 for (i = 0; i < pba_size; i++) { 1121 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1122 if (status) { 1123 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1124 return status; 1125 } 1126 1127 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1128 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1129 } 1130 pba_num[(pba_size * 2)] = '\0'; 1131 1132 return status; 1133 } 1134 1135 /** 1136 * i40e_get_media_type - Gets media type 1137 * @hw: pointer to the hardware structure 1138 **/ 1139 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1140 { 1141 enum i40e_media_type media; 1142 1143 switch (hw->phy.link_info.phy_type) { 1144 case I40E_PHY_TYPE_10GBASE_SR: 1145 case I40E_PHY_TYPE_10GBASE_LR: 1146 case I40E_PHY_TYPE_1000BASE_SX: 1147 case I40E_PHY_TYPE_1000BASE_LX: 1148 case I40E_PHY_TYPE_40GBASE_SR4: 1149 case I40E_PHY_TYPE_40GBASE_LR4: 1150 case I40E_PHY_TYPE_25GBASE_LR: 1151 case I40E_PHY_TYPE_25GBASE_SR: 1152 media = I40E_MEDIA_TYPE_FIBER; 1153 break; 1154 case I40E_PHY_TYPE_100BASE_TX: 1155 case I40E_PHY_TYPE_1000BASE_T: 1156 case I40E_PHY_TYPE_2_5GBASE_T: 1157 case I40E_PHY_TYPE_5GBASE_T: 1158 case I40E_PHY_TYPE_10GBASE_T: 1159 media = I40E_MEDIA_TYPE_BASET; 1160 break; 1161 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1162 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1163 case I40E_PHY_TYPE_10GBASE_CR1: 1164 case I40E_PHY_TYPE_40GBASE_CR4: 1165 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1166 case I40E_PHY_TYPE_40GBASE_AOC: 1167 case I40E_PHY_TYPE_10GBASE_AOC: 1168 case I40E_PHY_TYPE_25GBASE_CR: 1169 case I40E_PHY_TYPE_25GBASE_AOC: 1170 case I40E_PHY_TYPE_25GBASE_ACC: 1171 media = I40E_MEDIA_TYPE_DA; 1172 break; 1173 case I40E_PHY_TYPE_1000BASE_KX: 1174 case I40E_PHY_TYPE_10GBASE_KX4: 1175 case I40E_PHY_TYPE_10GBASE_KR: 1176 case I40E_PHY_TYPE_40GBASE_KR4: 1177 case I40E_PHY_TYPE_20GBASE_KR2: 1178 case I40E_PHY_TYPE_25GBASE_KR: 1179 media = I40E_MEDIA_TYPE_BACKPLANE; 1180 break; 1181 case I40E_PHY_TYPE_SGMII: 1182 case I40E_PHY_TYPE_XAUI: 1183 case I40E_PHY_TYPE_XFI: 1184 case I40E_PHY_TYPE_XLAUI: 1185 case I40E_PHY_TYPE_XLPPI: 1186 default: 1187 media = I40E_MEDIA_TYPE_UNKNOWN; 1188 break; 1189 } 1190 1191 return media; 1192 } 1193 1194 /** 1195 * i40e_poll_globr - Poll for Global Reset completion 1196 * @hw: pointer to the hardware structure 1197 * @retry_limit: how many times to retry before failure 1198 **/ 1199 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1200 u32 retry_limit) 1201 { 1202 u32 cnt, reg = 0; 1203 1204 for (cnt = 0; cnt < retry_limit; cnt++) { 1205 reg = rd32(hw, I40E_GLGEN_RSTAT); 1206 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1207 return 0; 1208 msleep(100); 1209 } 1210 1211 hw_dbg(hw, "Global reset failed.\n"); 1212 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1213 1214 return I40E_ERR_RESET_FAILED; 1215 } 1216 1217 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1218 #define I40E_PF_RESET_WAIT_COUNT 200 1219 /** 1220 * i40e_pf_reset - Reset the PF 1221 * @hw: pointer to the hardware structure 1222 * 1223 * Assuming someone else has triggered a global reset, 1224 * assure the global reset is complete and then reset the PF 1225 **/ 1226 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1227 { 1228 u32 cnt = 0; 1229 u32 cnt1 = 0; 1230 u32 reg = 0; 1231 u32 grst_del; 1232 1233 /* Poll for Global Reset steady state in case of recent GRST. 1234 * The grst delay value is in 100ms units, and we'll wait a 1235 * couple counts longer to be sure we don't just miss the end. 1236 */ 1237 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1238 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1239 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1240 1241 /* It can take upto 15 secs for GRST steady state. 1242 * Bump it to 16 secs max to be safe. 1243 */ 1244 grst_del = grst_del * 20; 1245 1246 for (cnt = 0; cnt < grst_del; cnt++) { 1247 reg = rd32(hw, I40E_GLGEN_RSTAT); 1248 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1249 break; 1250 msleep(100); 1251 } 1252 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1253 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1254 return I40E_ERR_RESET_FAILED; 1255 } 1256 1257 /* Now Wait for the FW to be ready */ 1258 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1259 reg = rd32(hw, I40E_GLNVM_ULD); 1260 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1261 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1262 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1263 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1264 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1265 break; 1266 } 1267 usleep_range(10000, 20000); 1268 } 1269 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1270 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1271 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1272 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1273 return I40E_ERR_RESET_FAILED; 1274 } 1275 1276 /* If there was a Global Reset in progress when we got here, 1277 * we don't need to do the PF Reset 1278 */ 1279 if (!cnt) { 1280 u32 reg2 = 0; 1281 if (hw->revision_id == 0) 1282 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1283 else 1284 cnt = I40E_PF_RESET_WAIT_COUNT; 1285 reg = rd32(hw, I40E_PFGEN_CTRL); 1286 wr32(hw, I40E_PFGEN_CTRL, 1287 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1288 for (; cnt; cnt--) { 1289 reg = rd32(hw, I40E_PFGEN_CTRL); 1290 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1291 break; 1292 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1293 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1294 break; 1295 usleep_range(1000, 2000); 1296 } 1297 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1298 if (i40e_poll_globr(hw, grst_del)) 1299 return I40E_ERR_RESET_FAILED; 1300 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1301 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1302 return I40E_ERR_RESET_FAILED; 1303 } 1304 } 1305 1306 i40e_clear_pxe_mode(hw); 1307 1308 return 0; 1309 } 1310 1311 /** 1312 * i40e_clear_hw - clear out any left over hw state 1313 * @hw: pointer to the hw struct 1314 * 1315 * Clear queues and interrupts, typically called at init time, 1316 * but after the capabilities have been found so we know how many 1317 * queues and msix vectors have been allocated. 1318 **/ 1319 void i40e_clear_hw(struct i40e_hw *hw) 1320 { 1321 u32 num_queues, base_queue; 1322 u32 num_pf_int; 1323 u32 num_vf_int; 1324 u32 num_vfs; 1325 u32 i, j; 1326 u32 val; 1327 u32 eol = 0x7ff; 1328 1329 /* get number of interrupts, queues, and VFs */ 1330 val = rd32(hw, I40E_GLPCI_CNF2); 1331 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1332 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1333 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1334 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1335 1336 val = rd32(hw, I40E_PFLAN_QALLOC); 1337 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1338 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1339 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1340 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1341 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1342 num_queues = (j - base_queue) + 1; 1343 else 1344 num_queues = 0; 1345 1346 val = rd32(hw, I40E_PF_VT_PFALLOC); 1347 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1348 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1349 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1350 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1351 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1352 num_vfs = (j - i) + 1; 1353 else 1354 num_vfs = 0; 1355 1356 /* stop all the interrupts */ 1357 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1358 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1359 for (i = 0; i < num_pf_int - 2; i++) 1360 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1361 1362 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1363 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1364 wr32(hw, I40E_PFINT_LNKLST0, val); 1365 for (i = 0; i < num_pf_int - 2; i++) 1366 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1367 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1368 for (i = 0; i < num_vfs; i++) 1369 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1370 for (i = 0; i < num_vf_int - 2; i++) 1371 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1372 1373 /* warn the HW of the coming Tx disables */ 1374 for (i = 0; i < num_queues; i++) { 1375 u32 abs_queue_idx = base_queue + i; 1376 u32 reg_block = 0; 1377 1378 if (abs_queue_idx >= 128) { 1379 reg_block = abs_queue_idx / 128; 1380 abs_queue_idx %= 128; 1381 } 1382 1383 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1384 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1385 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1386 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1387 1388 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1389 } 1390 udelay(400); 1391 1392 /* stop all the queues */ 1393 for (i = 0; i < num_queues; i++) { 1394 wr32(hw, I40E_QINT_TQCTL(i), 0); 1395 wr32(hw, I40E_QTX_ENA(i), 0); 1396 wr32(hw, I40E_QINT_RQCTL(i), 0); 1397 wr32(hw, I40E_QRX_ENA(i), 0); 1398 } 1399 1400 /* short wait for all queue disables to settle */ 1401 udelay(50); 1402 } 1403 1404 /** 1405 * i40e_clear_pxe_mode - clear pxe operations mode 1406 * @hw: pointer to the hw struct 1407 * 1408 * Make sure all PXE mode settings are cleared, including things 1409 * like descriptor fetch/write-back mode. 1410 **/ 1411 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1412 { 1413 u32 reg; 1414 1415 if (i40e_check_asq_alive(hw)) 1416 i40e_aq_clear_pxe_mode(hw, NULL); 1417 1418 /* Clear single descriptor fetch/write-back mode */ 1419 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1420 1421 if (hw->revision_id == 0) { 1422 /* As a work around clear PXE_MODE instead of setting it */ 1423 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1424 } else { 1425 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1426 } 1427 } 1428 1429 /** 1430 * i40e_led_is_mine - helper to find matching led 1431 * @hw: pointer to the hw struct 1432 * @idx: index into GPIO registers 1433 * 1434 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1435 */ 1436 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1437 { 1438 u32 gpio_val = 0; 1439 u32 port; 1440 1441 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1442 !hw->func_caps.led[idx]) 1443 return 0; 1444 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1445 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1446 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1447 1448 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1449 * if it is not our port then ignore 1450 */ 1451 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1452 (port != hw->port)) 1453 return 0; 1454 1455 return gpio_val; 1456 } 1457 1458 #define I40E_COMBINED_ACTIVITY 0xA 1459 #define I40E_FILTER_ACTIVITY 0xE 1460 #define I40E_LINK_ACTIVITY 0xC 1461 #define I40E_MAC_ACTIVITY 0xD 1462 #define I40E_FW_LED BIT(4) 1463 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1464 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1465 1466 #define I40E_LED0 22 1467 1468 #define I40E_PIN_FUNC_SDP 0x0 1469 #define I40E_PIN_FUNC_LED 0x1 1470 1471 /** 1472 * i40e_led_get - return current on/off mode 1473 * @hw: pointer to the hw struct 1474 * 1475 * The value returned is the 'mode' field as defined in the 1476 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1477 * values are variations of possible behaviors relating to 1478 * blink, link, and wire. 1479 **/ 1480 u32 i40e_led_get(struct i40e_hw *hw) 1481 { 1482 u32 mode = 0; 1483 int i; 1484 1485 /* as per the documentation GPIO 22-29 are the LED 1486 * GPIO pins named LED0..LED7 1487 */ 1488 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1489 u32 gpio_val = i40e_led_is_mine(hw, i); 1490 1491 if (!gpio_val) 1492 continue; 1493 1494 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1495 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1496 break; 1497 } 1498 1499 return mode; 1500 } 1501 1502 /** 1503 * i40e_led_set - set new on/off mode 1504 * @hw: pointer to the hw struct 1505 * @mode: 0=off, 0xf=on (else see manual for mode details) 1506 * @blink: true if the LED should blink when on, false if steady 1507 * 1508 * if this function is used to turn on the blink it should 1509 * be used to disable the blink when restoring the original state. 1510 **/ 1511 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1512 { 1513 int i; 1514 1515 if (mode & ~I40E_LED_MODE_VALID) { 1516 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1517 return; 1518 } 1519 1520 /* as per the documentation GPIO 22-29 are the LED 1521 * GPIO pins named LED0..LED7 1522 */ 1523 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1524 u32 gpio_val = i40e_led_is_mine(hw, i); 1525 1526 if (!gpio_val) 1527 continue; 1528 1529 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1530 u32 pin_func = 0; 1531 1532 if (mode & I40E_FW_LED) 1533 pin_func = I40E_PIN_FUNC_SDP; 1534 else 1535 pin_func = I40E_PIN_FUNC_LED; 1536 1537 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1538 gpio_val |= ((pin_func << 1539 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1540 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1541 } 1542 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1543 /* this & is a bit of paranoia, but serves as a range check */ 1544 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1545 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1546 1547 if (blink) 1548 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1549 else 1550 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1551 1552 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1553 break; 1554 } 1555 } 1556 1557 /* Admin command wrappers */ 1558 1559 /** 1560 * i40e_aq_get_phy_capabilities 1561 * @hw: pointer to the hw struct 1562 * @abilities: structure for PHY capabilities to be filled 1563 * @qualified_modules: report Qualified Modules 1564 * @report_init: report init capabilities (active are default) 1565 * @cmd_details: pointer to command details structure or NULL 1566 * 1567 * Returns the various PHY abilities supported on the Port. 1568 **/ 1569 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1570 bool qualified_modules, bool report_init, 1571 struct i40e_aq_get_phy_abilities_resp *abilities, 1572 struct i40e_asq_cmd_details *cmd_details) 1573 { 1574 struct i40e_aq_desc desc; 1575 i40e_status status; 1576 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1577 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1578 1579 if (!abilities) 1580 return I40E_ERR_PARAM; 1581 1582 do { 1583 i40e_fill_default_direct_cmd_desc(&desc, 1584 i40e_aqc_opc_get_phy_abilities); 1585 1586 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1587 if (abilities_size > I40E_AQ_LARGE_BUF) 1588 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1589 1590 if (qualified_modules) 1591 desc.params.external.param0 |= 1592 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1593 1594 if (report_init) 1595 desc.params.external.param0 |= 1596 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1597 1598 status = i40e_asq_send_command(hw, &desc, abilities, 1599 abilities_size, cmd_details); 1600 1601 switch (hw->aq.asq_last_status) { 1602 case I40E_AQ_RC_EIO: 1603 status = I40E_ERR_UNKNOWN_PHY; 1604 break; 1605 case I40E_AQ_RC_EAGAIN: 1606 usleep_range(1000, 2000); 1607 total_delay++; 1608 status = I40E_ERR_TIMEOUT; 1609 break; 1610 /* also covers I40E_AQ_RC_OK */ 1611 default: 1612 break; 1613 } 1614 1615 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1616 (total_delay < max_delay)); 1617 1618 if (status) 1619 return status; 1620 1621 if (report_init) { 1622 if (hw->mac.type == I40E_MAC_XL710 && 1623 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1624 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1625 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1626 } else { 1627 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1628 hw->phy.phy_types |= 1629 ((u64)abilities->phy_type_ext << 32); 1630 } 1631 } 1632 1633 return status; 1634 } 1635 1636 /** 1637 * i40e_aq_set_phy_config 1638 * @hw: pointer to the hw struct 1639 * @config: structure with PHY configuration to be set 1640 * @cmd_details: pointer to command details structure or NULL 1641 * 1642 * Set the various PHY configuration parameters 1643 * supported on the Port.One or more of the Set PHY config parameters may be 1644 * ignored in an MFP mode as the PF may not have the privilege to set some 1645 * of the PHY Config parameters. This status will be indicated by the 1646 * command response. 1647 **/ 1648 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1649 struct i40e_aq_set_phy_config *config, 1650 struct i40e_asq_cmd_details *cmd_details) 1651 { 1652 struct i40e_aq_desc desc; 1653 struct i40e_aq_set_phy_config *cmd = 1654 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1655 enum i40e_status_code status; 1656 1657 if (!config) 1658 return I40E_ERR_PARAM; 1659 1660 i40e_fill_default_direct_cmd_desc(&desc, 1661 i40e_aqc_opc_set_phy_config); 1662 1663 *cmd = *config; 1664 1665 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1666 1667 return status; 1668 } 1669 1670 static noinline_for_stack enum i40e_status_code 1671 i40e_set_fc_status(struct i40e_hw *hw, 1672 struct i40e_aq_get_phy_abilities_resp *abilities, 1673 bool atomic_restart) 1674 { 1675 struct i40e_aq_set_phy_config config; 1676 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1677 u8 pause_mask = 0x0; 1678 1679 switch (fc_mode) { 1680 case I40E_FC_FULL: 1681 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1682 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1683 break; 1684 case I40E_FC_RX_PAUSE: 1685 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1686 break; 1687 case I40E_FC_TX_PAUSE: 1688 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1689 break; 1690 default: 1691 break; 1692 } 1693 1694 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1695 /* clear the old pause settings */ 1696 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1697 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1698 /* set the new abilities */ 1699 config.abilities |= pause_mask; 1700 /* If the abilities have changed, then set the new config */ 1701 if (config.abilities == abilities->abilities) 1702 return 0; 1703 1704 /* Auto restart link so settings take effect */ 1705 if (atomic_restart) 1706 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1707 /* Copy over all the old settings */ 1708 config.phy_type = abilities->phy_type; 1709 config.phy_type_ext = abilities->phy_type_ext; 1710 config.link_speed = abilities->link_speed; 1711 config.eee_capability = abilities->eee_capability; 1712 config.eeer = abilities->eeer_val; 1713 config.low_power_ctrl = abilities->d3_lpan; 1714 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1715 I40E_AQ_PHY_FEC_CONFIG_MASK; 1716 1717 return i40e_aq_set_phy_config(hw, &config, NULL); 1718 } 1719 1720 /** 1721 * i40e_set_fc 1722 * @hw: pointer to the hw struct 1723 * @aq_failures: buffer to return AdminQ failure information 1724 * @atomic_restart: whether to enable atomic link restart 1725 * 1726 * Set the requested flow control mode using set_phy_config. 1727 **/ 1728 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1729 bool atomic_restart) 1730 { 1731 struct i40e_aq_get_phy_abilities_resp abilities; 1732 enum i40e_status_code status; 1733 1734 *aq_failures = 0x0; 1735 1736 /* Get the current phy config */ 1737 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1738 NULL); 1739 if (status) { 1740 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1741 return status; 1742 } 1743 1744 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1745 if (status) 1746 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1747 1748 /* Update the link info */ 1749 status = i40e_update_link_info(hw); 1750 if (status) { 1751 /* Wait a little bit (on 40G cards it sometimes takes a really 1752 * long time for link to come back from the atomic reset) 1753 * and try once more 1754 */ 1755 msleep(1000); 1756 status = i40e_update_link_info(hw); 1757 } 1758 if (status) 1759 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1760 1761 return status; 1762 } 1763 1764 /** 1765 * i40e_aq_clear_pxe_mode 1766 * @hw: pointer to the hw struct 1767 * @cmd_details: pointer to command details structure or NULL 1768 * 1769 * Tell the firmware that the driver is taking over from PXE 1770 **/ 1771 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1772 struct i40e_asq_cmd_details *cmd_details) 1773 { 1774 i40e_status status; 1775 struct i40e_aq_desc desc; 1776 struct i40e_aqc_clear_pxe *cmd = 1777 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1778 1779 i40e_fill_default_direct_cmd_desc(&desc, 1780 i40e_aqc_opc_clear_pxe_mode); 1781 1782 cmd->rx_cnt = 0x2; 1783 1784 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1785 1786 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1787 1788 return status; 1789 } 1790 1791 /** 1792 * i40e_aq_set_link_restart_an 1793 * @hw: pointer to the hw struct 1794 * @enable_link: if true: enable link, if false: disable link 1795 * @cmd_details: pointer to command details structure or NULL 1796 * 1797 * Sets up the link and restarts the Auto-Negotiation over the link. 1798 **/ 1799 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1800 bool enable_link, 1801 struct i40e_asq_cmd_details *cmd_details) 1802 { 1803 struct i40e_aq_desc desc; 1804 struct i40e_aqc_set_link_restart_an *cmd = 1805 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1806 i40e_status status; 1807 1808 i40e_fill_default_direct_cmd_desc(&desc, 1809 i40e_aqc_opc_set_link_restart_an); 1810 1811 cmd->command = I40E_AQ_PHY_RESTART_AN; 1812 if (enable_link) 1813 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1814 else 1815 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1816 1817 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1818 1819 return status; 1820 } 1821 1822 /** 1823 * i40e_aq_get_link_info 1824 * @hw: pointer to the hw struct 1825 * @enable_lse: enable/disable LinkStatusEvent reporting 1826 * @link: pointer to link status structure - optional 1827 * @cmd_details: pointer to command details structure or NULL 1828 * 1829 * Returns the link status of the adapter. 1830 **/ 1831 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1832 bool enable_lse, struct i40e_link_status *link, 1833 struct i40e_asq_cmd_details *cmd_details) 1834 { 1835 struct i40e_aq_desc desc; 1836 struct i40e_aqc_get_link_status *resp = 1837 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1838 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1839 i40e_status status; 1840 bool tx_pause, rx_pause; 1841 u16 command_flags; 1842 1843 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1844 1845 if (enable_lse) 1846 command_flags = I40E_AQ_LSE_ENABLE; 1847 else 1848 command_flags = I40E_AQ_LSE_DISABLE; 1849 resp->command_flags = cpu_to_le16(command_flags); 1850 1851 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1852 1853 if (status) 1854 goto aq_get_link_info_exit; 1855 1856 /* save off old link status information */ 1857 hw->phy.link_info_old = *hw_link_info; 1858 1859 /* update link status */ 1860 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1861 hw->phy.media_type = i40e_get_media_type(hw); 1862 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1863 hw_link_info->link_info = resp->link_info; 1864 hw_link_info->an_info = resp->an_info; 1865 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1866 I40E_AQ_CONFIG_FEC_RS_ENA); 1867 hw_link_info->ext_info = resp->ext_info; 1868 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1869 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1870 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1871 1872 /* update fc info */ 1873 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1874 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1875 if (tx_pause & rx_pause) 1876 hw->fc.current_mode = I40E_FC_FULL; 1877 else if (tx_pause) 1878 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1879 else if (rx_pause) 1880 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1881 else 1882 hw->fc.current_mode = I40E_FC_NONE; 1883 1884 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1885 hw_link_info->crc_enable = true; 1886 else 1887 hw_link_info->crc_enable = false; 1888 1889 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1890 hw_link_info->lse_enable = true; 1891 else 1892 hw_link_info->lse_enable = false; 1893 1894 if ((hw->mac.type == I40E_MAC_XL710) && 1895 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1896 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1897 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1898 1899 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1900 hw->mac.type != I40E_MAC_X722) { 1901 __le32 tmp; 1902 1903 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1904 hw->phy.phy_types = le32_to_cpu(tmp); 1905 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1906 } 1907 1908 /* save link status information */ 1909 if (link) 1910 *link = *hw_link_info; 1911 1912 /* flag cleared so helper functions don't call AQ again */ 1913 hw->phy.get_link_info = false; 1914 1915 aq_get_link_info_exit: 1916 return status; 1917 } 1918 1919 /** 1920 * i40e_aq_set_phy_int_mask 1921 * @hw: pointer to the hw struct 1922 * @mask: interrupt mask to be set 1923 * @cmd_details: pointer to command details structure or NULL 1924 * 1925 * Set link interrupt mask. 1926 **/ 1927 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1928 u16 mask, 1929 struct i40e_asq_cmd_details *cmd_details) 1930 { 1931 struct i40e_aq_desc desc; 1932 struct i40e_aqc_set_phy_int_mask *cmd = 1933 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1934 i40e_status status; 1935 1936 i40e_fill_default_direct_cmd_desc(&desc, 1937 i40e_aqc_opc_set_phy_int_mask); 1938 1939 cmd->event_mask = cpu_to_le16(mask); 1940 1941 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1942 1943 return status; 1944 } 1945 1946 /** 1947 * i40e_aq_set_phy_debug 1948 * @hw: pointer to the hw struct 1949 * @cmd_flags: debug command flags 1950 * @cmd_details: pointer to command details structure or NULL 1951 * 1952 * Reset the external PHY. 1953 **/ 1954 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1955 struct i40e_asq_cmd_details *cmd_details) 1956 { 1957 struct i40e_aq_desc desc; 1958 struct i40e_aqc_set_phy_debug *cmd = 1959 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1960 i40e_status status; 1961 1962 i40e_fill_default_direct_cmd_desc(&desc, 1963 i40e_aqc_opc_set_phy_debug); 1964 1965 cmd->command_flags = cmd_flags; 1966 1967 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1968 1969 return status; 1970 } 1971 1972 /** 1973 * i40e_aq_add_vsi 1974 * @hw: pointer to the hw struct 1975 * @vsi_ctx: pointer to a vsi context struct 1976 * @cmd_details: pointer to command details structure or NULL 1977 * 1978 * Add a VSI context to the hardware. 1979 **/ 1980 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1981 struct i40e_vsi_context *vsi_ctx, 1982 struct i40e_asq_cmd_details *cmd_details) 1983 { 1984 struct i40e_aq_desc desc; 1985 struct i40e_aqc_add_get_update_vsi *cmd = 1986 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1987 struct i40e_aqc_add_get_update_vsi_completion *resp = 1988 (struct i40e_aqc_add_get_update_vsi_completion *) 1989 &desc.params.raw; 1990 i40e_status status; 1991 1992 i40e_fill_default_direct_cmd_desc(&desc, 1993 i40e_aqc_opc_add_vsi); 1994 1995 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1996 cmd->connection_type = vsi_ctx->connection_type; 1997 cmd->vf_id = vsi_ctx->vf_num; 1998 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1999 2000 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2001 2002 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2003 sizeof(vsi_ctx->info), cmd_details); 2004 2005 if (status) 2006 goto aq_add_vsi_exit; 2007 2008 vsi_ctx->seid = le16_to_cpu(resp->seid); 2009 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2010 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2011 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2012 2013 aq_add_vsi_exit: 2014 return status; 2015 } 2016 2017 /** 2018 * i40e_aq_set_default_vsi 2019 * @hw: pointer to the hw struct 2020 * @seid: vsi number 2021 * @cmd_details: pointer to command details structure or NULL 2022 **/ 2023 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 2024 u16 seid, 2025 struct i40e_asq_cmd_details *cmd_details) 2026 { 2027 struct i40e_aq_desc desc; 2028 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2029 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2030 &desc.params.raw; 2031 i40e_status status; 2032 2033 i40e_fill_default_direct_cmd_desc(&desc, 2034 i40e_aqc_opc_set_vsi_promiscuous_modes); 2035 2036 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2037 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2038 cmd->seid = cpu_to_le16(seid); 2039 2040 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2041 2042 return status; 2043 } 2044 2045 /** 2046 * i40e_aq_clear_default_vsi 2047 * @hw: pointer to the hw struct 2048 * @seid: vsi number 2049 * @cmd_details: pointer to command details structure or NULL 2050 **/ 2051 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2052 u16 seid, 2053 struct i40e_asq_cmd_details *cmd_details) 2054 { 2055 struct i40e_aq_desc desc; 2056 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2057 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2058 &desc.params.raw; 2059 i40e_status status; 2060 2061 i40e_fill_default_direct_cmd_desc(&desc, 2062 i40e_aqc_opc_set_vsi_promiscuous_modes); 2063 2064 cmd->promiscuous_flags = cpu_to_le16(0); 2065 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2066 cmd->seid = cpu_to_le16(seid); 2067 2068 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2069 2070 return status; 2071 } 2072 2073 /** 2074 * i40e_aq_set_vsi_unicast_promiscuous 2075 * @hw: pointer to the hw struct 2076 * @seid: vsi number 2077 * @set: set unicast promiscuous enable/disable 2078 * @cmd_details: pointer to command details structure or NULL 2079 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2080 **/ 2081 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2082 u16 seid, bool set, 2083 struct i40e_asq_cmd_details *cmd_details, 2084 bool rx_only_promisc) 2085 { 2086 struct i40e_aq_desc desc; 2087 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2088 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2089 i40e_status status; 2090 u16 flags = 0; 2091 2092 i40e_fill_default_direct_cmd_desc(&desc, 2093 i40e_aqc_opc_set_vsi_promiscuous_modes); 2094 2095 if (set) { 2096 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2097 if (rx_only_promisc && 2098 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || 2099 (hw->aq.api_maj_ver > 1))) 2100 flags |= I40E_AQC_SET_VSI_PROMISC_TX; 2101 } 2102 2103 cmd->promiscuous_flags = cpu_to_le16(flags); 2104 2105 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2106 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || 2107 (hw->aq.api_maj_ver > 1)) 2108 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); 2109 2110 cmd->seid = cpu_to_le16(seid); 2111 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2112 2113 return status; 2114 } 2115 2116 /** 2117 * i40e_aq_set_vsi_multicast_promiscuous 2118 * @hw: pointer to the hw struct 2119 * @seid: vsi number 2120 * @set: set multicast promiscuous enable/disable 2121 * @cmd_details: pointer to command details structure or NULL 2122 **/ 2123 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2124 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2125 { 2126 struct i40e_aq_desc desc; 2127 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2128 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2129 i40e_status status; 2130 u16 flags = 0; 2131 2132 i40e_fill_default_direct_cmd_desc(&desc, 2133 i40e_aqc_opc_set_vsi_promiscuous_modes); 2134 2135 if (set) 2136 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2137 2138 cmd->promiscuous_flags = cpu_to_le16(flags); 2139 2140 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2141 2142 cmd->seid = cpu_to_le16(seid); 2143 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2144 2145 return status; 2146 } 2147 2148 /** 2149 * i40e_aq_set_vsi_mc_promisc_on_vlan 2150 * @hw: pointer to the hw struct 2151 * @seid: vsi number 2152 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2153 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2154 * @cmd_details: pointer to command details structure or NULL 2155 **/ 2156 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2157 u16 seid, bool enable, 2158 u16 vid, 2159 struct i40e_asq_cmd_details *cmd_details) 2160 { 2161 struct i40e_aq_desc desc; 2162 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2163 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2164 enum i40e_status_code status; 2165 u16 flags = 0; 2166 2167 i40e_fill_default_direct_cmd_desc(&desc, 2168 i40e_aqc_opc_set_vsi_promiscuous_modes); 2169 2170 if (enable) 2171 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2172 2173 cmd->promiscuous_flags = cpu_to_le16(flags); 2174 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2175 cmd->seid = cpu_to_le16(seid); 2176 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2177 2178 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2179 2180 return status; 2181 } 2182 2183 /** 2184 * i40e_aq_set_vsi_uc_promisc_on_vlan 2185 * @hw: pointer to the hw struct 2186 * @seid: vsi number 2187 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2188 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2189 * @cmd_details: pointer to command details structure or NULL 2190 **/ 2191 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2192 u16 seid, bool enable, 2193 u16 vid, 2194 struct i40e_asq_cmd_details *cmd_details) 2195 { 2196 struct i40e_aq_desc desc; 2197 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2198 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2199 enum i40e_status_code status; 2200 u16 flags = 0; 2201 2202 i40e_fill_default_direct_cmd_desc(&desc, 2203 i40e_aqc_opc_set_vsi_promiscuous_modes); 2204 2205 if (enable) 2206 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2207 2208 cmd->promiscuous_flags = cpu_to_le16(flags); 2209 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2210 cmd->seid = cpu_to_le16(seid); 2211 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2212 2213 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2214 2215 return status; 2216 } 2217 2218 /** 2219 * i40e_aq_set_vsi_bc_promisc_on_vlan 2220 * @hw: pointer to the hw struct 2221 * @seid: vsi number 2222 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2223 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2224 * @cmd_details: pointer to command details structure or NULL 2225 **/ 2226 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2227 u16 seid, bool enable, u16 vid, 2228 struct i40e_asq_cmd_details *cmd_details) 2229 { 2230 struct i40e_aq_desc desc; 2231 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2232 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2233 i40e_status status; 2234 u16 flags = 0; 2235 2236 i40e_fill_default_direct_cmd_desc(&desc, 2237 i40e_aqc_opc_set_vsi_promiscuous_modes); 2238 2239 if (enable) 2240 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2241 2242 cmd->promiscuous_flags = cpu_to_le16(flags); 2243 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2244 cmd->seid = cpu_to_le16(seid); 2245 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2246 2247 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2248 2249 return status; 2250 } 2251 2252 /** 2253 * i40e_aq_set_vsi_broadcast 2254 * @hw: pointer to the hw struct 2255 * @seid: vsi number 2256 * @set_filter: true to set filter, false to clear filter 2257 * @cmd_details: pointer to command details structure or NULL 2258 * 2259 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2260 **/ 2261 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2262 u16 seid, bool set_filter, 2263 struct i40e_asq_cmd_details *cmd_details) 2264 { 2265 struct i40e_aq_desc desc; 2266 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2267 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2268 i40e_status status; 2269 2270 i40e_fill_default_direct_cmd_desc(&desc, 2271 i40e_aqc_opc_set_vsi_promiscuous_modes); 2272 2273 if (set_filter) 2274 cmd->promiscuous_flags 2275 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2276 else 2277 cmd->promiscuous_flags 2278 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2279 2280 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2281 cmd->seid = cpu_to_le16(seid); 2282 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2283 2284 return status; 2285 } 2286 2287 /** 2288 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2289 * @hw: pointer to the hw struct 2290 * @seid: vsi number 2291 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2292 * @cmd_details: pointer to command details structure or NULL 2293 **/ 2294 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2295 u16 seid, bool enable, 2296 struct i40e_asq_cmd_details *cmd_details) 2297 { 2298 struct i40e_aq_desc desc; 2299 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2300 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2301 i40e_status status; 2302 u16 flags = 0; 2303 2304 i40e_fill_default_direct_cmd_desc(&desc, 2305 i40e_aqc_opc_set_vsi_promiscuous_modes); 2306 if (enable) 2307 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2308 2309 cmd->promiscuous_flags = cpu_to_le16(flags); 2310 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2311 cmd->seid = cpu_to_le16(seid); 2312 2313 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2314 2315 return status; 2316 } 2317 2318 /** 2319 * i40e_get_vsi_params - get VSI configuration info 2320 * @hw: pointer to the hw struct 2321 * @vsi_ctx: pointer to a vsi context struct 2322 * @cmd_details: pointer to command details structure or NULL 2323 **/ 2324 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2325 struct i40e_vsi_context *vsi_ctx, 2326 struct i40e_asq_cmd_details *cmd_details) 2327 { 2328 struct i40e_aq_desc desc; 2329 struct i40e_aqc_add_get_update_vsi *cmd = 2330 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2331 struct i40e_aqc_add_get_update_vsi_completion *resp = 2332 (struct i40e_aqc_add_get_update_vsi_completion *) 2333 &desc.params.raw; 2334 i40e_status status; 2335 2336 i40e_fill_default_direct_cmd_desc(&desc, 2337 i40e_aqc_opc_get_vsi_parameters); 2338 2339 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2340 2341 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2342 2343 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2344 sizeof(vsi_ctx->info), NULL); 2345 2346 if (status) 2347 goto aq_get_vsi_params_exit; 2348 2349 vsi_ctx->seid = le16_to_cpu(resp->seid); 2350 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2351 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2352 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2353 2354 aq_get_vsi_params_exit: 2355 return status; 2356 } 2357 2358 /** 2359 * i40e_aq_update_vsi_params 2360 * @hw: pointer to the hw struct 2361 * @vsi_ctx: pointer to a vsi context struct 2362 * @cmd_details: pointer to command details structure or NULL 2363 * 2364 * Update a VSI context. 2365 **/ 2366 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2367 struct i40e_vsi_context *vsi_ctx, 2368 struct i40e_asq_cmd_details *cmd_details) 2369 { 2370 struct i40e_aq_desc desc; 2371 struct i40e_aqc_add_get_update_vsi *cmd = 2372 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2373 struct i40e_aqc_add_get_update_vsi_completion *resp = 2374 (struct i40e_aqc_add_get_update_vsi_completion *) 2375 &desc.params.raw; 2376 i40e_status status; 2377 2378 i40e_fill_default_direct_cmd_desc(&desc, 2379 i40e_aqc_opc_update_vsi_parameters); 2380 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2381 2382 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2383 2384 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2385 sizeof(vsi_ctx->info), cmd_details); 2386 2387 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2388 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2389 2390 return status; 2391 } 2392 2393 /** 2394 * i40e_aq_get_switch_config 2395 * @hw: pointer to the hardware structure 2396 * @buf: pointer to the result buffer 2397 * @buf_size: length of input buffer 2398 * @start_seid: seid to start for the report, 0 == beginning 2399 * @cmd_details: pointer to command details structure or NULL 2400 * 2401 * Fill the buf with switch configuration returned from AdminQ command 2402 **/ 2403 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2404 struct i40e_aqc_get_switch_config_resp *buf, 2405 u16 buf_size, u16 *start_seid, 2406 struct i40e_asq_cmd_details *cmd_details) 2407 { 2408 struct i40e_aq_desc desc; 2409 struct i40e_aqc_switch_seid *scfg = 2410 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2411 i40e_status status; 2412 2413 i40e_fill_default_direct_cmd_desc(&desc, 2414 i40e_aqc_opc_get_switch_config); 2415 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2416 if (buf_size > I40E_AQ_LARGE_BUF) 2417 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2418 scfg->seid = cpu_to_le16(*start_seid); 2419 2420 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2421 *start_seid = le16_to_cpu(scfg->seid); 2422 2423 return status; 2424 } 2425 2426 /** 2427 * i40e_aq_set_switch_config 2428 * @hw: pointer to the hardware structure 2429 * @flags: bit flag values to set 2430 * @mode: cloud filter mode 2431 * @valid_flags: which bit flags to set 2432 * @mode: cloud filter mode 2433 * @cmd_details: pointer to command details structure or NULL 2434 * 2435 * Set switch configuration bits 2436 **/ 2437 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2438 u16 flags, 2439 u16 valid_flags, u8 mode, 2440 struct i40e_asq_cmd_details *cmd_details) 2441 { 2442 struct i40e_aq_desc desc; 2443 struct i40e_aqc_set_switch_config *scfg = 2444 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2445 enum i40e_status_code status; 2446 2447 i40e_fill_default_direct_cmd_desc(&desc, 2448 i40e_aqc_opc_set_switch_config); 2449 scfg->flags = cpu_to_le16(flags); 2450 scfg->valid_flags = cpu_to_le16(valid_flags); 2451 scfg->mode = mode; 2452 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2453 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2454 scfg->first_tag = cpu_to_le16(hw->first_tag); 2455 scfg->second_tag = cpu_to_le16(hw->second_tag); 2456 } 2457 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2458 2459 return status; 2460 } 2461 2462 /** 2463 * i40e_aq_get_firmware_version 2464 * @hw: pointer to the hw struct 2465 * @fw_major_version: firmware major version 2466 * @fw_minor_version: firmware minor version 2467 * @fw_build: firmware build number 2468 * @api_major_version: major queue version 2469 * @api_minor_version: minor queue version 2470 * @cmd_details: pointer to command details structure or NULL 2471 * 2472 * Get the firmware version from the admin queue commands 2473 **/ 2474 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2475 u16 *fw_major_version, u16 *fw_minor_version, 2476 u32 *fw_build, 2477 u16 *api_major_version, u16 *api_minor_version, 2478 struct i40e_asq_cmd_details *cmd_details) 2479 { 2480 struct i40e_aq_desc desc; 2481 struct i40e_aqc_get_version *resp = 2482 (struct i40e_aqc_get_version *)&desc.params.raw; 2483 i40e_status status; 2484 2485 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2486 2487 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2488 2489 if (!status) { 2490 if (fw_major_version) 2491 *fw_major_version = le16_to_cpu(resp->fw_major); 2492 if (fw_minor_version) 2493 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2494 if (fw_build) 2495 *fw_build = le32_to_cpu(resp->fw_build); 2496 if (api_major_version) 2497 *api_major_version = le16_to_cpu(resp->api_major); 2498 if (api_minor_version) 2499 *api_minor_version = le16_to_cpu(resp->api_minor); 2500 } 2501 2502 return status; 2503 } 2504 2505 /** 2506 * i40e_aq_send_driver_version 2507 * @hw: pointer to the hw struct 2508 * @dv: driver's major, minor version 2509 * @cmd_details: pointer to command details structure or NULL 2510 * 2511 * Send the driver version to the firmware 2512 **/ 2513 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2514 struct i40e_driver_version *dv, 2515 struct i40e_asq_cmd_details *cmd_details) 2516 { 2517 struct i40e_aq_desc desc; 2518 struct i40e_aqc_driver_version *cmd = 2519 (struct i40e_aqc_driver_version *)&desc.params.raw; 2520 i40e_status status; 2521 u16 len; 2522 2523 if (dv == NULL) 2524 return I40E_ERR_PARAM; 2525 2526 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2527 2528 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2529 cmd->driver_major_ver = dv->major_version; 2530 cmd->driver_minor_ver = dv->minor_version; 2531 cmd->driver_build_ver = dv->build_version; 2532 cmd->driver_subbuild_ver = dv->subbuild_version; 2533 2534 len = 0; 2535 while (len < sizeof(dv->driver_string) && 2536 (dv->driver_string[len] < 0x80) && 2537 dv->driver_string[len]) 2538 len++; 2539 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2540 len, cmd_details); 2541 2542 return status; 2543 } 2544 2545 /** 2546 * i40e_get_link_status - get status of the HW network link 2547 * @hw: pointer to the hw struct 2548 * @link_up: pointer to bool (true/false = linkup/linkdown) 2549 * 2550 * Variable link_up true if link is up, false if link is down. 2551 * The variable link_up is invalid if returned value of status != 0 2552 * 2553 * Side effect: LinkStatusEvent reporting becomes enabled 2554 **/ 2555 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2556 { 2557 i40e_status status = 0; 2558 2559 if (hw->phy.get_link_info) { 2560 status = i40e_update_link_info(hw); 2561 2562 if (status) 2563 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2564 status); 2565 } 2566 2567 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2568 2569 return status; 2570 } 2571 2572 /** 2573 * i40e_updatelink_status - update status of the HW network link 2574 * @hw: pointer to the hw struct 2575 **/ 2576 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2577 { 2578 struct i40e_aq_get_phy_abilities_resp abilities; 2579 i40e_status status = 0; 2580 2581 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2582 if (status) 2583 return status; 2584 2585 /* extra checking needed to ensure link info to user is timely */ 2586 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2587 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2588 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2589 status = i40e_aq_get_phy_capabilities(hw, false, false, 2590 &abilities, NULL); 2591 if (status) 2592 return status; 2593 2594 if (abilities.fec_cfg_curr_mod_ext_info & 2595 I40E_AQ_ENABLE_FEC_AUTO) 2596 hw->phy.link_info.req_fec_info = 2597 (I40E_AQ_REQUEST_FEC_KR | 2598 I40E_AQ_REQUEST_FEC_RS); 2599 else 2600 hw->phy.link_info.req_fec_info = 2601 abilities.fec_cfg_curr_mod_ext_info & 2602 (I40E_AQ_REQUEST_FEC_KR | 2603 I40E_AQ_REQUEST_FEC_RS); 2604 2605 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2606 sizeof(hw->phy.link_info.module_type)); 2607 } 2608 2609 return status; 2610 } 2611 2612 /** 2613 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2614 * @hw: pointer to the hw struct 2615 * @uplink_seid: the MAC or other gizmo SEID 2616 * @downlink_seid: the VSI SEID 2617 * @enabled_tc: bitmap of TCs to be enabled 2618 * @default_port: true for default port VSI, false for control port 2619 * @veb_seid: pointer to where to put the resulting VEB SEID 2620 * @enable_stats: true to turn on VEB stats 2621 * @cmd_details: pointer to command details structure or NULL 2622 * 2623 * This asks the FW to add a VEB between the uplink and downlink 2624 * elements. If the uplink SEID is 0, this will be a floating VEB. 2625 **/ 2626 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2627 u16 downlink_seid, u8 enabled_tc, 2628 bool default_port, u16 *veb_seid, 2629 bool enable_stats, 2630 struct i40e_asq_cmd_details *cmd_details) 2631 { 2632 struct i40e_aq_desc desc; 2633 struct i40e_aqc_add_veb *cmd = 2634 (struct i40e_aqc_add_veb *)&desc.params.raw; 2635 struct i40e_aqc_add_veb_completion *resp = 2636 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2637 i40e_status status; 2638 u16 veb_flags = 0; 2639 2640 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2641 if (!!uplink_seid != !!downlink_seid) 2642 return I40E_ERR_PARAM; 2643 2644 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2645 2646 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2647 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2648 cmd->enable_tcs = enabled_tc; 2649 if (!uplink_seid) 2650 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2651 if (default_port) 2652 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2653 else 2654 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2655 2656 /* reverse logic here: set the bitflag to disable the stats */ 2657 if (!enable_stats) 2658 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2659 2660 cmd->veb_flags = cpu_to_le16(veb_flags); 2661 2662 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2663 2664 if (!status && veb_seid) 2665 *veb_seid = le16_to_cpu(resp->veb_seid); 2666 2667 return status; 2668 } 2669 2670 /** 2671 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2672 * @hw: pointer to the hw struct 2673 * @veb_seid: the SEID of the VEB to query 2674 * @switch_id: the uplink switch id 2675 * @floating: set to true if the VEB is floating 2676 * @statistic_index: index of the stats counter block for this VEB 2677 * @vebs_used: number of VEB's used by function 2678 * @vebs_free: total VEB's not reserved by any function 2679 * @cmd_details: pointer to command details structure or NULL 2680 * 2681 * This retrieves the parameters for a particular VEB, specified by 2682 * uplink_seid, and returns them to the caller. 2683 **/ 2684 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2685 u16 veb_seid, u16 *switch_id, 2686 bool *floating, u16 *statistic_index, 2687 u16 *vebs_used, u16 *vebs_free, 2688 struct i40e_asq_cmd_details *cmd_details) 2689 { 2690 struct i40e_aq_desc desc; 2691 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2692 (struct i40e_aqc_get_veb_parameters_completion *) 2693 &desc.params.raw; 2694 i40e_status status; 2695 2696 if (veb_seid == 0) 2697 return I40E_ERR_PARAM; 2698 2699 i40e_fill_default_direct_cmd_desc(&desc, 2700 i40e_aqc_opc_get_veb_parameters); 2701 cmd_resp->seid = cpu_to_le16(veb_seid); 2702 2703 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2704 if (status) 2705 goto get_veb_exit; 2706 2707 if (switch_id) 2708 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2709 if (statistic_index) 2710 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2711 if (vebs_used) 2712 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2713 if (vebs_free) 2714 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2715 if (floating) { 2716 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2717 2718 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2719 *floating = true; 2720 else 2721 *floating = false; 2722 } 2723 2724 get_veb_exit: 2725 return status; 2726 } 2727 2728 /** 2729 * i40e_aq_add_macvlan 2730 * @hw: pointer to the hw struct 2731 * @seid: VSI for the mac address 2732 * @mv_list: list of macvlans to be added 2733 * @count: length of the list 2734 * @cmd_details: pointer to command details structure or NULL 2735 * 2736 * Add MAC/VLAN addresses to the HW filtering 2737 **/ 2738 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2739 struct i40e_aqc_add_macvlan_element_data *mv_list, 2740 u16 count, struct i40e_asq_cmd_details *cmd_details) 2741 { 2742 struct i40e_aq_desc desc; 2743 struct i40e_aqc_macvlan *cmd = 2744 (struct i40e_aqc_macvlan *)&desc.params.raw; 2745 i40e_status status; 2746 u16 buf_size; 2747 int i; 2748 2749 if (count == 0 || !mv_list || !hw) 2750 return I40E_ERR_PARAM; 2751 2752 buf_size = count * sizeof(*mv_list); 2753 2754 /* prep the rest of the request */ 2755 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2756 cmd->num_addresses = cpu_to_le16(count); 2757 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2758 cmd->seid[1] = 0; 2759 cmd->seid[2] = 0; 2760 2761 for (i = 0; i < count; i++) 2762 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2763 mv_list[i].flags |= 2764 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2765 2766 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2767 if (buf_size > I40E_AQ_LARGE_BUF) 2768 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2769 2770 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2771 cmd_details); 2772 2773 return status; 2774 } 2775 2776 /** 2777 * i40e_aq_remove_macvlan 2778 * @hw: pointer to the hw struct 2779 * @seid: VSI for the mac address 2780 * @mv_list: list of macvlans to be removed 2781 * @count: length of the list 2782 * @cmd_details: pointer to command details structure or NULL 2783 * 2784 * Remove MAC/VLAN addresses from the HW filtering 2785 **/ 2786 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2787 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2788 u16 count, struct i40e_asq_cmd_details *cmd_details) 2789 { 2790 struct i40e_aq_desc desc; 2791 struct i40e_aqc_macvlan *cmd = 2792 (struct i40e_aqc_macvlan *)&desc.params.raw; 2793 i40e_status status; 2794 u16 buf_size; 2795 2796 if (count == 0 || !mv_list || !hw) 2797 return I40E_ERR_PARAM; 2798 2799 buf_size = count * sizeof(*mv_list); 2800 2801 /* prep the rest of the request */ 2802 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2803 cmd->num_addresses = cpu_to_le16(count); 2804 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2805 cmd->seid[1] = 0; 2806 cmd->seid[2] = 0; 2807 2808 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2809 if (buf_size > I40E_AQ_LARGE_BUF) 2810 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2811 2812 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2813 cmd_details); 2814 2815 return status; 2816 } 2817 2818 /** 2819 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2820 * @hw: pointer to the hw struct 2821 * @opcode: AQ opcode for add or delete mirror rule 2822 * @sw_seid: Switch SEID (to which rule refers) 2823 * @rule_type: Rule Type (ingress/egress/VLAN) 2824 * @id: Destination VSI SEID or Rule ID 2825 * @count: length of the list 2826 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2827 * @cmd_details: pointer to command details structure or NULL 2828 * @rule_id: Rule ID returned from FW 2829 * @rules_used: Number of rules used in internal switch 2830 * @rules_free: Number of rules free in internal switch 2831 * 2832 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2833 * VEBs/VEPA elements only 2834 **/ 2835 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2836 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2837 u16 count, __le16 *mr_list, 2838 struct i40e_asq_cmd_details *cmd_details, 2839 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2840 { 2841 struct i40e_aq_desc desc; 2842 struct i40e_aqc_add_delete_mirror_rule *cmd = 2843 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2844 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2845 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2846 i40e_status status; 2847 u16 buf_size; 2848 2849 buf_size = count * sizeof(*mr_list); 2850 2851 /* prep the rest of the request */ 2852 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2853 cmd->seid = cpu_to_le16(sw_seid); 2854 cmd->rule_type = cpu_to_le16(rule_type & 2855 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2856 cmd->num_entries = cpu_to_le16(count); 2857 /* Dest VSI for add, rule_id for delete */ 2858 cmd->destination = cpu_to_le16(id); 2859 if (mr_list) { 2860 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2861 I40E_AQ_FLAG_RD)); 2862 if (buf_size > I40E_AQ_LARGE_BUF) 2863 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2864 } 2865 2866 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2867 cmd_details); 2868 if (!status || 2869 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2870 if (rule_id) 2871 *rule_id = le16_to_cpu(resp->rule_id); 2872 if (rules_used) 2873 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2874 if (rules_free) 2875 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2876 } 2877 return status; 2878 } 2879 2880 /** 2881 * i40e_aq_add_mirrorrule - add a mirror rule 2882 * @hw: pointer to the hw struct 2883 * @sw_seid: Switch SEID (to which rule refers) 2884 * @rule_type: Rule Type (ingress/egress/VLAN) 2885 * @dest_vsi: SEID of VSI to which packets will be mirrored 2886 * @count: length of the list 2887 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2888 * @cmd_details: pointer to command details structure or NULL 2889 * @rule_id: Rule ID returned from FW 2890 * @rules_used: Number of rules used in internal switch 2891 * @rules_free: Number of rules free in internal switch 2892 * 2893 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2894 **/ 2895 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2896 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2897 struct i40e_asq_cmd_details *cmd_details, 2898 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2899 { 2900 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2901 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2902 if (count == 0 || !mr_list) 2903 return I40E_ERR_PARAM; 2904 } 2905 2906 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2907 rule_type, dest_vsi, count, mr_list, 2908 cmd_details, rule_id, rules_used, rules_free); 2909 } 2910 2911 /** 2912 * i40e_aq_delete_mirrorrule - delete a mirror rule 2913 * @hw: pointer to the hw struct 2914 * @sw_seid: Switch SEID (to which rule refers) 2915 * @rule_type: Rule Type (ingress/egress/VLAN) 2916 * @count: length of the list 2917 * @rule_id: Rule ID that is returned in the receive desc as part of 2918 * add_mirrorrule. 2919 * @mr_list: list of mirrored VLAN IDs to be removed 2920 * @cmd_details: pointer to command details structure or NULL 2921 * @rules_used: Number of rules used in internal switch 2922 * @rules_free: Number of rules free in internal switch 2923 * 2924 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2925 **/ 2926 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2927 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2928 struct i40e_asq_cmd_details *cmd_details, 2929 u16 *rules_used, u16 *rules_free) 2930 { 2931 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2932 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2933 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2934 * mirroring. For other rule_type, count and rule_type should 2935 * not matter. 2936 */ 2937 if (count == 0 || !mr_list) 2938 return I40E_ERR_PARAM; 2939 } 2940 2941 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2942 rule_type, rule_id, count, mr_list, 2943 cmd_details, NULL, rules_used, rules_free); 2944 } 2945 2946 /** 2947 * i40e_aq_send_msg_to_vf 2948 * @hw: pointer to the hardware structure 2949 * @vfid: VF id to send msg 2950 * @v_opcode: opcodes for VF-PF communication 2951 * @v_retval: return error code 2952 * @msg: pointer to the msg buffer 2953 * @msglen: msg length 2954 * @cmd_details: pointer to command details 2955 * 2956 * send msg to vf 2957 **/ 2958 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2959 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2960 struct i40e_asq_cmd_details *cmd_details) 2961 { 2962 struct i40e_aq_desc desc; 2963 struct i40e_aqc_pf_vf_message *cmd = 2964 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2965 i40e_status status; 2966 2967 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2968 cmd->id = cpu_to_le32(vfid); 2969 desc.cookie_high = cpu_to_le32(v_opcode); 2970 desc.cookie_low = cpu_to_le32(v_retval); 2971 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2972 if (msglen) { 2973 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2974 I40E_AQ_FLAG_RD)); 2975 if (msglen > I40E_AQ_LARGE_BUF) 2976 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2977 desc.datalen = cpu_to_le16(msglen); 2978 } 2979 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2980 2981 return status; 2982 } 2983 2984 /** 2985 * i40e_aq_debug_read_register 2986 * @hw: pointer to the hw struct 2987 * @reg_addr: register address 2988 * @reg_val: register value 2989 * @cmd_details: pointer to command details structure or NULL 2990 * 2991 * Read the register using the admin queue commands 2992 **/ 2993 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 2994 u32 reg_addr, u64 *reg_val, 2995 struct i40e_asq_cmd_details *cmd_details) 2996 { 2997 struct i40e_aq_desc desc; 2998 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2999 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3000 i40e_status status; 3001 3002 if (reg_val == NULL) 3003 return I40E_ERR_PARAM; 3004 3005 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 3006 3007 cmd_resp->address = cpu_to_le32(reg_addr); 3008 3009 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3010 3011 if (!status) { 3012 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 3013 (u64)le32_to_cpu(cmd_resp->value_low); 3014 } 3015 3016 return status; 3017 } 3018 3019 /** 3020 * i40e_aq_debug_write_register 3021 * @hw: pointer to the hw struct 3022 * @reg_addr: register address 3023 * @reg_val: register value 3024 * @cmd_details: pointer to command details structure or NULL 3025 * 3026 * Write to a register using the admin queue commands 3027 **/ 3028 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3029 u32 reg_addr, u64 reg_val, 3030 struct i40e_asq_cmd_details *cmd_details) 3031 { 3032 struct i40e_aq_desc desc; 3033 struct i40e_aqc_debug_reg_read_write *cmd = 3034 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3035 i40e_status status; 3036 3037 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3038 3039 cmd->address = cpu_to_le32(reg_addr); 3040 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3041 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3042 3043 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3044 3045 return status; 3046 } 3047 3048 /** 3049 * i40e_aq_request_resource 3050 * @hw: pointer to the hw struct 3051 * @resource: resource id 3052 * @access: access type 3053 * @sdp_number: resource number 3054 * @timeout: the maximum time in ms that the driver may hold the resource 3055 * @cmd_details: pointer to command details structure or NULL 3056 * 3057 * requests common resource using the admin queue commands 3058 **/ 3059 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3060 enum i40e_aq_resources_ids resource, 3061 enum i40e_aq_resource_access_type access, 3062 u8 sdp_number, u64 *timeout, 3063 struct i40e_asq_cmd_details *cmd_details) 3064 { 3065 struct i40e_aq_desc desc; 3066 struct i40e_aqc_request_resource *cmd_resp = 3067 (struct i40e_aqc_request_resource *)&desc.params.raw; 3068 i40e_status status; 3069 3070 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3071 3072 cmd_resp->resource_id = cpu_to_le16(resource); 3073 cmd_resp->access_type = cpu_to_le16(access); 3074 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3075 3076 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3077 /* The completion specifies the maximum time in ms that the driver 3078 * may hold the resource in the Timeout field. 3079 * If the resource is held by someone else, the command completes with 3080 * busy return value and the timeout field indicates the maximum time 3081 * the current owner of the resource has to free it. 3082 */ 3083 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3084 *timeout = le32_to_cpu(cmd_resp->timeout); 3085 3086 return status; 3087 } 3088 3089 /** 3090 * i40e_aq_release_resource 3091 * @hw: pointer to the hw struct 3092 * @resource: resource id 3093 * @sdp_number: resource number 3094 * @cmd_details: pointer to command details structure or NULL 3095 * 3096 * release common resource using the admin queue commands 3097 **/ 3098 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3099 enum i40e_aq_resources_ids resource, 3100 u8 sdp_number, 3101 struct i40e_asq_cmd_details *cmd_details) 3102 { 3103 struct i40e_aq_desc desc; 3104 struct i40e_aqc_request_resource *cmd = 3105 (struct i40e_aqc_request_resource *)&desc.params.raw; 3106 i40e_status status; 3107 3108 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3109 3110 cmd->resource_id = cpu_to_le16(resource); 3111 cmd->resource_number = cpu_to_le32(sdp_number); 3112 3113 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3114 3115 return status; 3116 } 3117 3118 /** 3119 * i40e_aq_read_nvm 3120 * @hw: pointer to the hw struct 3121 * @module_pointer: module pointer location in words from the NVM beginning 3122 * @offset: byte offset from the module beginning 3123 * @length: length of the section to be read (in bytes from the offset) 3124 * @data: command buffer (size [bytes] = length) 3125 * @last_command: tells if this is the last command in a series 3126 * @cmd_details: pointer to command details structure or NULL 3127 * 3128 * Read the NVM using the admin queue commands 3129 **/ 3130 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3131 u32 offset, u16 length, void *data, 3132 bool last_command, 3133 struct i40e_asq_cmd_details *cmd_details) 3134 { 3135 struct i40e_aq_desc desc; 3136 struct i40e_aqc_nvm_update *cmd = 3137 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3138 i40e_status status; 3139 3140 /* In offset the highest byte must be zeroed. */ 3141 if (offset & 0xFF000000) { 3142 status = I40E_ERR_PARAM; 3143 goto i40e_aq_read_nvm_exit; 3144 } 3145 3146 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3147 3148 /* If this is the last command in a series, set the proper flag. */ 3149 if (last_command) 3150 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3151 cmd->module_pointer = module_pointer; 3152 cmd->offset = cpu_to_le32(offset); 3153 cmd->length = cpu_to_le16(length); 3154 3155 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3156 if (length > I40E_AQ_LARGE_BUF) 3157 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3158 3159 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3160 3161 i40e_aq_read_nvm_exit: 3162 return status; 3163 } 3164 3165 /** 3166 * i40e_aq_erase_nvm 3167 * @hw: pointer to the hw struct 3168 * @module_pointer: module pointer location in words from the NVM beginning 3169 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3170 * @length: length of the section to be erased (expressed in 4 KB) 3171 * @last_command: tells if this is the last command in a series 3172 * @cmd_details: pointer to command details structure or NULL 3173 * 3174 * Erase the NVM sector using the admin queue commands 3175 **/ 3176 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3177 u32 offset, u16 length, bool last_command, 3178 struct i40e_asq_cmd_details *cmd_details) 3179 { 3180 struct i40e_aq_desc desc; 3181 struct i40e_aqc_nvm_update *cmd = 3182 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3183 i40e_status status; 3184 3185 /* In offset the highest byte must be zeroed. */ 3186 if (offset & 0xFF000000) { 3187 status = I40E_ERR_PARAM; 3188 goto i40e_aq_erase_nvm_exit; 3189 } 3190 3191 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3192 3193 /* If this is the last command in a series, set the proper flag. */ 3194 if (last_command) 3195 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3196 cmd->module_pointer = module_pointer; 3197 cmd->offset = cpu_to_le32(offset); 3198 cmd->length = cpu_to_le16(length); 3199 3200 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3201 3202 i40e_aq_erase_nvm_exit: 3203 return status; 3204 } 3205 3206 /** 3207 * i40e_parse_discover_capabilities 3208 * @hw: pointer to the hw struct 3209 * @buff: pointer to a buffer containing device/function capability records 3210 * @cap_count: number of capability records in the list 3211 * @list_type_opc: type of capabilities list to parse 3212 * 3213 * Parse the device/function capabilities list. 3214 **/ 3215 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3216 u32 cap_count, 3217 enum i40e_admin_queue_opc list_type_opc) 3218 { 3219 struct i40e_aqc_list_capabilities_element_resp *cap; 3220 u32 valid_functions, num_functions; 3221 u32 number, logical_id, phys_id; 3222 struct i40e_hw_capabilities *p; 3223 u16 id, ocp_cfg_word0; 3224 i40e_status status; 3225 u8 major_rev; 3226 u32 i = 0; 3227 3228 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3229 3230 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3231 p = &hw->dev_caps; 3232 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3233 p = &hw->func_caps; 3234 else 3235 return; 3236 3237 for (i = 0; i < cap_count; i++, cap++) { 3238 id = le16_to_cpu(cap->id); 3239 number = le32_to_cpu(cap->number); 3240 logical_id = le32_to_cpu(cap->logical_id); 3241 phys_id = le32_to_cpu(cap->phys_id); 3242 major_rev = cap->major_rev; 3243 3244 switch (id) { 3245 case I40E_AQ_CAP_ID_SWITCH_MODE: 3246 p->switch_mode = number; 3247 break; 3248 case I40E_AQ_CAP_ID_MNG_MODE: 3249 p->management_mode = number; 3250 if (major_rev > 1) { 3251 p->mng_protocols_over_mctp = logical_id; 3252 i40e_debug(hw, I40E_DEBUG_INIT, 3253 "HW Capability: Protocols over MCTP = %d\n", 3254 p->mng_protocols_over_mctp); 3255 } else { 3256 p->mng_protocols_over_mctp = 0; 3257 } 3258 break; 3259 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3260 p->npar_enable = number; 3261 break; 3262 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3263 p->os2bmc = number; 3264 break; 3265 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3266 p->valid_functions = number; 3267 break; 3268 case I40E_AQ_CAP_ID_SRIOV: 3269 if (number == 1) 3270 p->sr_iov_1_1 = true; 3271 break; 3272 case I40E_AQ_CAP_ID_VF: 3273 p->num_vfs = number; 3274 p->vf_base_id = logical_id; 3275 break; 3276 case I40E_AQ_CAP_ID_VMDQ: 3277 if (number == 1) 3278 p->vmdq = true; 3279 break; 3280 case I40E_AQ_CAP_ID_8021QBG: 3281 if (number == 1) 3282 p->evb_802_1_qbg = true; 3283 break; 3284 case I40E_AQ_CAP_ID_8021QBR: 3285 if (number == 1) 3286 p->evb_802_1_qbh = true; 3287 break; 3288 case I40E_AQ_CAP_ID_VSI: 3289 p->num_vsis = number; 3290 break; 3291 case I40E_AQ_CAP_ID_DCB: 3292 if (number == 1) { 3293 p->dcb = true; 3294 p->enabled_tcmap = logical_id; 3295 p->maxtc = phys_id; 3296 } 3297 break; 3298 case I40E_AQ_CAP_ID_FCOE: 3299 if (number == 1) 3300 p->fcoe = true; 3301 break; 3302 case I40E_AQ_CAP_ID_ISCSI: 3303 if (number == 1) 3304 p->iscsi = true; 3305 break; 3306 case I40E_AQ_CAP_ID_RSS: 3307 p->rss = true; 3308 p->rss_table_size = number; 3309 p->rss_table_entry_width = logical_id; 3310 break; 3311 case I40E_AQ_CAP_ID_RXQ: 3312 p->num_rx_qp = number; 3313 p->base_queue = phys_id; 3314 break; 3315 case I40E_AQ_CAP_ID_TXQ: 3316 p->num_tx_qp = number; 3317 p->base_queue = phys_id; 3318 break; 3319 case I40E_AQ_CAP_ID_MSIX: 3320 p->num_msix_vectors = number; 3321 i40e_debug(hw, I40E_DEBUG_INIT, 3322 "HW Capability: MSIX vector count = %d\n", 3323 p->num_msix_vectors); 3324 break; 3325 case I40E_AQ_CAP_ID_VF_MSIX: 3326 p->num_msix_vectors_vf = number; 3327 break; 3328 case I40E_AQ_CAP_ID_FLEX10: 3329 if (major_rev == 1) { 3330 if (number == 1) { 3331 p->flex10_enable = true; 3332 p->flex10_capable = true; 3333 } 3334 } else { 3335 /* Capability revision >= 2 */ 3336 if (number & 1) 3337 p->flex10_enable = true; 3338 if (number & 2) 3339 p->flex10_capable = true; 3340 } 3341 p->flex10_mode = logical_id; 3342 p->flex10_status = phys_id; 3343 break; 3344 case I40E_AQ_CAP_ID_CEM: 3345 if (number == 1) 3346 p->mgmt_cem = true; 3347 break; 3348 case I40E_AQ_CAP_ID_IWARP: 3349 if (number == 1) 3350 p->iwarp = true; 3351 break; 3352 case I40E_AQ_CAP_ID_LED: 3353 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3354 p->led[phys_id] = true; 3355 break; 3356 case I40E_AQ_CAP_ID_SDP: 3357 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3358 p->sdp[phys_id] = true; 3359 break; 3360 case I40E_AQ_CAP_ID_MDIO: 3361 if (number == 1) { 3362 p->mdio_port_num = phys_id; 3363 p->mdio_port_mode = logical_id; 3364 } 3365 break; 3366 case I40E_AQ_CAP_ID_1588: 3367 if (number == 1) 3368 p->ieee_1588 = true; 3369 break; 3370 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3371 p->fd = true; 3372 p->fd_filters_guaranteed = number; 3373 p->fd_filters_best_effort = logical_id; 3374 break; 3375 case I40E_AQ_CAP_ID_WSR_PROT: 3376 p->wr_csr_prot = (u64)number; 3377 p->wr_csr_prot |= (u64)logical_id << 32; 3378 break; 3379 case I40E_AQ_CAP_ID_NVM_MGMT: 3380 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3381 p->sec_rev_disabled = true; 3382 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3383 p->update_disabled = true; 3384 break; 3385 default: 3386 break; 3387 } 3388 } 3389 3390 if (p->fcoe) 3391 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3392 3393 /* Software override ensuring FCoE is disabled if npar or mfp 3394 * mode because it is not supported in these modes. 3395 */ 3396 if (p->npar_enable || p->flex10_enable) 3397 p->fcoe = false; 3398 3399 /* count the enabled ports (aka the "not disabled" ports) */ 3400 hw->num_ports = 0; 3401 for (i = 0; i < 4; i++) { 3402 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3403 u64 port_cfg = 0; 3404 3405 /* use AQ read to get the physical register offset instead 3406 * of the port relative offset 3407 */ 3408 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3409 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3410 hw->num_ports++; 3411 } 3412 3413 /* OCP cards case: if a mezz is removed the Ethernet port is at 3414 * disabled state in PRTGEN_CNF register. Additional NVM read is 3415 * needed in order to check if we are dealing with OCP card. 3416 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3417 * physical ports results in wrong partition id calculation and thus 3418 * not supporting WoL. 3419 */ 3420 if (hw->mac.type == I40E_MAC_X722) { 3421 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3422 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3423 2 * I40E_SR_OCP_CFG_WORD0, 3424 sizeof(ocp_cfg_word0), 3425 &ocp_cfg_word0, true, NULL); 3426 if (!status && 3427 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3428 hw->num_ports = 4; 3429 i40e_release_nvm(hw); 3430 } 3431 } 3432 3433 valid_functions = p->valid_functions; 3434 num_functions = 0; 3435 while (valid_functions) { 3436 if (valid_functions & 1) 3437 num_functions++; 3438 valid_functions >>= 1; 3439 } 3440 3441 /* partition id is 1-based, and functions are evenly spread 3442 * across the ports as partitions 3443 */ 3444 if (hw->num_ports != 0) { 3445 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3446 hw->num_partitions = num_functions / hw->num_ports; 3447 } 3448 3449 /* additional HW specific goodies that might 3450 * someday be HW version specific 3451 */ 3452 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3453 } 3454 3455 /** 3456 * i40e_aq_discover_capabilities 3457 * @hw: pointer to the hw struct 3458 * @buff: a virtual buffer to hold the capabilities 3459 * @buff_size: Size of the virtual buffer 3460 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3461 * @list_type_opc: capabilities type to discover - pass in the command opcode 3462 * @cmd_details: pointer to command details structure or NULL 3463 * 3464 * Get the device capabilities descriptions from the firmware 3465 **/ 3466 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3467 void *buff, u16 buff_size, u16 *data_size, 3468 enum i40e_admin_queue_opc list_type_opc, 3469 struct i40e_asq_cmd_details *cmd_details) 3470 { 3471 struct i40e_aqc_list_capabilites *cmd; 3472 struct i40e_aq_desc desc; 3473 i40e_status status = 0; 3474 3475 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3476 3477 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3478 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3479 status = I40E_ERR_PARAM; 3480 goto exit; 3481 } 3482 3483 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3484 3485 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3486 if (buff_size > I40E_AQ_LARGE_BUF) 3487 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3488 3489 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3490 *data_size = le16_to_cpu(desc.datalen); 3491 3492 if (status) 3493 goto exit; 3494 3495 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3496 list_type_opc); 3497 3498 exit: 3499 return status; 3500 } 3501 3502 /** 3503 * i40e_aq_update_nvm 3504 * @hw: pointer to the hw struct 3505 * @module_pointer: module pointer location in words from the NVM beginning 3506 * @offset: byte offset from the module beginning 3507 * @length: length of the section to be written (in bytes from the offset) 3508 * @data: command buffer (size [bytes] = length) 3509 * @last_command: tells if this is the last command in a series 3510 * @preservation_flags: Preservation mode flags 3511 * @cmd_details: pointer to command details structure or NULL 3512 * 3513 * Update the NVM using the admin queue commands 3514 **/ 3515 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3516 u32 offset, u16 length, void *data, 3517 bool last_command, u8 preservation_flags, 3518 struct i40e_asq_cmd_details *cmd_details) 3519 { 3520 struct i40e_aq_desc desc; 3521 struct i40e_aqc_nvm_update *cmd = 3522 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3523 i40e_status status; 3524 3525 /* In offset the highest byte must be zeroed. */ 3526 if (offset & 0xFF000000) { 3527 status = I40E_ERR_PARAM; 3528 goto i40e_aq_update_nvm_exit; 3529 } 3530 3531 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3532 3533 /* If this is the last command in a series, set the proper flag. */ 3534 if (last_command) 3535 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3536 if (hw->mac.type == I40E_MAC_X722) { 3537 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3538 cmd->command_flags |= 3539 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3540 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3541 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3542 cmd->command_flags |= 3543 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3544 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3545 } 3546 cmd->module_pointer = module_pointer; 3547 cmd->offset = cpu_to_le32(offset); 3548 cmd->length = cpu_to_le16(length); 3549 3550 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3551 if (length > I40E_AQ_LARGE_BUF) 3552 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3553 3554 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3555 3556 i40e_aq_update_nvm_exit: 3557 return status; 3558 } 3559 3560 /** 3561 * i40e_aq_rearrange_nvm 3562 * @hw: pointer to the hw struct 3563 * @rearrange_nvm: defines direction of rearrangement 3564 * @cmd_details: pointer to command details structure or NULL 3565 * 3566 * Rearrange NVM structure, available only for transition FW 3567 **/ 3568 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3569 u8 rearrange_nvm, 3570 struct i40e_asq_cmd_details *cmd_details) 3571 { 3572 struct i40e_aqc_nvm_update *cmd; 3573 i40e_status status; 3574 struct i40e_aq_desc desc; 3575 3576 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3577 3578 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3579 3580 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3581 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3582 3583 if (!rearrange_nvm) { 3584 status = I40E_ERR_PARAM; 3585 goto i40e_aq_rearrange_nvm_exit; 3586 } 3587 3588 cmd->command_flags |= rearrange_nvm; 3589 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3590 3591 i40e_aq_rearrange_nvm_exit: 3592 return status; 3593 } 3594 3595 /** 3596 * i40e_aq_get_lldp_mib 3597 * @hw: pointer to the hw struct 3598 * @bridge_type: type of bridge requested 3599 * @mib_type: Local, Remote or both Local and Remote MIBs 3600 * @buff: pointer to a user supplied buffer to store the MIB block 3601 * @buff_size: size of the buffer (in bytes) 3602 * @local_len : length of the returned Local LLDP MIB 3603 * @remote_len: length of the returned Remote LLDP MIB 3604 * @cmd_details: pointer to command details structure or NULL 3605 * 3606 * Requests the complete LLDP MIB (entire packet). 3607 **/ 3608 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3609 u8 mib_type, void *buff, u16 buff_size, 3610 u16 *local_len, u16 *remote_len, 3611 struct i40e_asq_cmd_details *cmd_details) 3612 { 3613 struct i40e_aq_desc desc; 3614 struct i40e_aqc_lldp_get_mib *cmd = 3615 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3616 struct i40e_aqc_lldp_get_mib *resp = 3617 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3618 i40e_status status; 3619 3620 if (buff_size == 0 || !buff) 3621 return I40E_ERR_PARAM; 3622 3623 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3624 /* Indirect Command */ 3625 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3626 3627 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3628 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3629 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3630 3631 desc.datalen = cpu_to_le16(buff_size); 3632 3633 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3634 if (buff_size > I40E_AQ_LARGE_BUF) 3635 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3636 3637 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3638 if (!status) { 3639 if (local_len != NULL) 3640 *local_len = le16_to_cpu(resp->local_len); 3641 if (remote_len != NULL) 3642 *remote_len = le16_to_cpu(resp->remote_len); 3643 } 3644 3645 return status; 3646 } 3647 3648 /** 3649 * i40e_aq_cfg_lldp_mib_change_event 3650 * @hw: pointer to the hw struct 3651 * @enable_update: Enable or Disable event posting 3652 * @cmd_details: pointer to command details structure or NULL 3653 * 3654 * Enable or Disable posting of an event on ARQ when LLDP MIB 3655 * associated with the interface changes 3656 **/ 3657 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3658 bool enable_update, 3659 struct i40e_asq_cmd_details *cmd_details) 3660 { 3661 struct i40e_aq_desc desc; 3662 struct i40e_aqc_lldp_update_mib *cmd = 3663 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3664 i40e_status status; 3665 3666 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3667 3668 if (!enable_update) 3669 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3670 3671 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3672 3673 return status; 3674 } 3675 3676 /** 3677 * i40e_aq_restore_lldp 3678 * @hw: pointer to the hw struct 3679 * @setting: pointer to factory setting variable or NULL 3680 * @restore: True if factory settings should be restored 3681 * @cmd_details: pointer to command details structure or NULL 3682 * 3683 * Restore LLDP Agent factory settings if @restore set to True. In other case 3684 * only returns factory setting in AQ response. 3685 **/ 3686 enum i40e_status_code 3687 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3688 struct i40e_asq_cmd_details *cmd_details) 3689 { 3690 struct i40e_aq_desc desc; 3691 struct i40e_aqc_lldp_restore *cmd = 3692 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3693 i40e_status status; 3694 3695 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3696 i40e_debug(hw, I40E_DEBUG_ALL, 3697 "Restore LLDP not supported by current FW version.\n"); 3698 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3699 } 3700 3701 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3702 3703 if (restore) 3704 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3705 3706 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3707 3708 if (setting) 3709 *setting = cmd->command & 1; 3710 3711 return status; 3712 } 3713 3714 /** 3715 * i40e_aq_stop_lldp 3716 * @hw: pointer to the hw struct 3717 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3718 * @persist: True if stop of LLDP should be persistent across power cycles 3719 * @cmd_details: pointer to command details structure or NULL 3720 * 3721 * Stop or Shutdown the embedded LLDP Agent 3722 **/ 3723 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3724 bool persist, 3725 struct i40e_asq_cmd_details *cmd_details) 3726 { 3727 struct i40e_aq_desc desc; 3728 struct i40e_aqc_lldp_stop *cmd = 3729 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3730 i40e_status status; 3731 3732 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3733 3734 if (shutdown_agent) 3735 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3736 3737 if (persist) { 3738 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3739 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3740 else 3741 i40e_debug(hw, I40E_DEBUG_ALL, 3742 "Persistent Stop LLDP not supported by current FW version.\n"); 3743 } 3744 3745 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3746 3747 return status; 3748 } 3749 3750 /** 3751 * i40e_aq_start_lldp 3752 * @hw: pointer to the hw struct 3753 * @buff: buffer for result 3754 * @persist: True if start of LLDP should be persistent across power cycles 3755 * @buff_size: buffer size 3756 * @cmd_details: pointer to command details structure or NULL 3757 * 3758 * Start the embedded LLDP Agent on all ports. 3759 **/ 3760 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3761 struct i40e_asq_cmd_details *cmd_details) 3762 { 3763 struct i40e_aq_desc desc; 3764 struct i40e_aqc_lldp_start *cmd = 3765 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3766 i40e_status status; 3767 3768 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3769 3770 cmd->command = I40E_AQ_LLDP_AGENT_START; 3771 3772 if (persist) { 3773 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3774 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3775 else 3776 i40e_debug(hw, I40E_DEBUG_ALL, 3777 "Persistent Start LLDP not supported by current FW version.\n"); 3778 } 3779 3780 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3781 3782 return status; 3783 } 3784 3785 /** 3786 * i40e_aq_set_dcb_parameters 3787 * @hw: pointer to the hw struct 3788 * @cmd_details: pointer to command details structure or NULL 3789 * @dcb_enable: True if DCB configuration needs to be applied 3790 * 3791 **/ 3792 enum i40e_status_code 3793 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3794 struct i40e_asq_cmd_details *cmd_details) 3795 { 3796 struct i40e_aq_desc desc; 3797 struct i40e_aqc_set_dcb_parameters *cmd = 3798 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3799 i40e_status status; 3800 3801 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3802 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3803 3804 i40e_fill_default_direct_cmd_desc(&desc, 3805 i40e_aqc_opc_set_dcb_parameters); 3806 3807 if (dcb_enable) { 3808 cmd->valid_flags = I40E_DCB_VALID; 3809 cmd->command = I40E_AQ_DCB_SET_AGENT; 3810 } 3811 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3812 3813 return status; 3814 } 3815 3816 /** 3817 * i40e_aq_get_cee_dcb_config 3818 * @hw: pointer to the hw struct 3819 * @buff: response buffer that stores CEE operational configuration 3820 * @buff_size: size of the buffer passed 3821 * @cmd_details: pointer to command details structure or NULL 3822 * 3823 * Get CEE DCBX mode operational configuration from firmware 3824 **/ 3825 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3826 void *buff, u16 buff_size, 3827 struct i40e_asq_cmd_details *cmd_details) 3828 { 3829 struct i40e_aq_desc desc; 3830 i40e_status status; 3831 3832 if (buff_size == 0 || !buff) 3833 return I40E_ERR_PARAM; 3834 3835 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3836 3837 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3838 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3839 cmd_details); 3840 3841 return status; 3842 } 3843 3844 /** 3845 * i40e_aq_add_udp_tunnel 3846 * @hw: pointer to the hw struct 3847 * @udp_port: the UDP port to add in Host byte order 3848 * @protocol_index: protocol index type 3849 * @filter_index: pointer to filter index 3850 * @cmd_details: pointer to command details structure or NULL 3851 * 3852 * Note: Firmware expects the udp_port value to be in Little Endian format, 3853 * and this function will call cpu_to_le16 to convert from Host byte order to 3854 * Little Endian order. 3855 **/ 3856 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3857 u16 udp_port, u8 protocol_index, 3858 u8 *filter_index, 3859 struct i40e_asq_cmd_details *cmd_details) 3860 { 3861 struct i40e_aq_desc desc; 3862 struct i40e_aqc_add_udp_tunnel *cmd = 3863 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3864 struct i40e_aqc_del_udp_tunnel_completion *resp = 3865 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3866 i40e_status status; 3867 3868 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3869 3870 cmd->udp_port = cpu_to_le16(udp_port); 3871 cmd->protocol_type = protocol_index; 3872 3873 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3874 3875 if (!status && filter_index) 3876 *filter_index = resp->index; 3877 3878 return status; 3879 } 3880 3881 /** 3882 * i40e_aq_del_udp_tunnel 3883 * @hw: pointer to the hw struct 3884 * @index: filter index 3885 * @cmd_details: pointer to command details structure or NULL 3886 **/ 3887 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3888 struct i40e_asq_cmd_details *cmd_details) 3889 { 3890 struct i40e_aq_desc desc; 3891 struct i40e_aqc_remove_udp_tunnel *cmd = 3892 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3893 i40e_status status; 3894 3895 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3896 3897 cmd->index = index; 3898 3899 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3900 3901 return status; 3902 } 3903 3904 /** 3905 * i40e_aq_delete_element - Delete switch element 3906 * @hw: pointer to the hw struct 3907 * @seid: the SEID to delete from the switch 3908 * @cmd_details: pointer to command details structure or NULL 3909 * 3910 * This deletes a switch element from the switch. 3911 **/ 3912 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3913 struct i40e_asq_cmd_details *cmd_details) 3914 { 3915 struct i40e_aq_desc desc; 3916 struct i40e_aqc_switch_seid *cmd = 3917 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3918 i40e_status status; 3919 3920 if (seid == 0) 3921 return I40E_ERR_PARAM; 3922 3923 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3924 3925 cmd->seid = cpu_to_le16(seid); 3926 3927 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3928 3929 return status; 3930 } 3931 3932 /** 3933 * i40e_aq_dcb_updated - DCB Updated Command 3934 * @hw: pointer to the hw struct 3935 * @cmd_details: pointer to command details structure or NULL 3936 * 3937 * EMP will return when the shared RPB settings have been 3938 * recomputed and modified. The retval field in the descriptor 3939 * will be set to 0 when RPB is modified. 3940 **/ 3941 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3942 struct i40e_asq_cmd_details *cmd_details) 3943 { 3944 struct i40e_aq_desc desc; 3945 i40e_status status; 3946 3947 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3948 3949 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3950 3951 return status; 3952 } 3953 3954 /** 3955 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3956 * @hw: pointer to the hw struct 3957 * @seid: seid for the physical port/switching component/vsi 3958 * @buff: Indirect buffer to hold data parameters and response 3959 * @buff_size: Indirect buffer size 3960 * @opcode: Tx scheduler AQ command opcode 3961 * @cmd_details: pointer to command details structure or NULL 3962 * 3963 * Generic command handler for Tx scheduler AQ commands 3964 **/ 3965 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3966 void *buff, u16 buff_size, 3967 enum i40e_admin_queue_opc opcode, 3968 struct i40e_asq_cmd_details *cmd_details) 3969 { 3970 struct i40e_aq_desc desc; 3971 struct i40e_aqc_tx_sched_ind *cmd = 3972 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3973 i40e_status status; 3974 bool cmd_param_flag = false; 3975 3976 switch (opcode) { 3977 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3978 case i40e_aqc_opc_configure_vsi_tc_bw: 3979 case i40e_aqc_opc_enable_switching_comp_ets: 3980 case i40e_aqc_opc_modify_switching_comp_ets: 3981 case i40e_aqc_opc_disable_switching_comp_ets: 3982 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3983 case i40e_aqc_opc_configure_switching_comp_bw_config: 3984 cmd_param_flag = true; 3985 break; 3986 case i40e_aqc_opc_query_vsi_bw_config: 3987 case i40e_aqc_opc_query_vsi_ets_sla_config: 3988 case i40e_aqc_opc_query_switching_comp_ets_config: 3989 case i40e_aqc_opc_query_port_ets_config: 3990 case i40e_aqc_opc_query_switching_comp_bw_config: 3991 cmd_param_flag = false; 3992 break; 3993 default: 3994 return I40E_ERR_PARAM; 3995 } 3996 3997 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3998 3999 /* Indirect command */ 4000 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4001 if (cmd_param_flag) 4002 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4003 if (buff_size > I40E_AQ_LARGE_BUF) 4004 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4005 4006 desc.datalen = cpu_to_le16(buff_size); 4007 4008 cmd->vsi_seid = cpu_to_le16(seid); 4009 4010 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4011 4012 return status; 4013 } 4014 4015 /** 4016 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 4017 * @hw: pointer to the hw struct 4018 * @seid: VSI seid 4019 * @credit: BW limit credits (0 = disabled) 4020 * @max_credit: Max BW limit credits 4021 * @cmd_details: pointer to command details structure or NULL 4022 **/ 4023 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 4024 u16 seid, u16 credit, u8 max_credit, 4025 struct i40e_asq_cmd_details *cmd_details) 4026 { 4027 struct i40e_aq_desc desc; 4028 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4029 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4030 i40e_status status; 4031 4032 i40e_fill_default_direct_cmd_desc(&desc, 4033 i40e_aqc_opc_configure_vsi_bw_limit); 4034 4035 cmd->vsi_seid = cpu_to_le16(seid); 4036 cmd->credit = cpu_to_le16(credit); 4037 cmd->max_credit = max_credit; 4038 4039 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4040 4041 return status; 4042 } 4043 4044 /** 4045 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4046 * @hw: pointer to the hw struct 4047 * @seid: VSI seid 4048 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4049 * @cmd_details: pointer to command details structure or NULL 4050 **/ 4051 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4052 u16 seid, 4053 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4054 struct i40e_asq_cmd_details *cmd_details) 4055 { 4056 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4057 i40e_aqc_opc_configure_vsi_tc_bw, 4058 cmd_details); 4059 } 4060 4061 /** 4062 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4063 * @hw: pointer to the hw struct 4064 * @seid: seid of the switching component connected to Physical Port 4065 * @ets_data: Buffer holding ETS parameters 4066 * @opcode: Tx scheduler AQ command opcode 4067 * @cmd_details: pointer to command details structure or NULL 4068 **/ 4069 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4070 u16 seid, 4071 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4072 enum i40e_admin_queue_opc opcode, 4073 struct i40e_asq_cmd_details *cmd_details) 4074 { 4075 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4076 sizeof(*ets_data), opcode, cmd_details); 4077 } 4078 4079 /** 4080 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4081 * @hw: pointer to the hw struct 4082 * @seid: seid of the switching component 4083 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4084 * @cmd_details: pointer to command details structure or NULL 4085 **/ 4086 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4087 u16 seid, 4088 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4089 struct i40e_asq_cmd_details *cmd_details) 4090 { 4091 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4092 i40e_aqc_opc_configure_switching_comp_bw_config, 4093 cmd_details); 4094 } 4095 4096 /** 4097 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4098 * @hw: pointer to the hw struct 4099 * @seid: seid of the VSI 4100 * @bw_data: Buffer to hold VSI BW configuration 4101 * @cmd_details: pointer to command details structure or NULL 4102 **/ 4103 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4104 u16 seid, 4105 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4106 struct i40e_asq_cmd_details *cmd_details) 4107 { 4108 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4109 i40e_aqc_opc_query_vsi_bw_config, 4110 cmd_details); 4111 } 4112 4113 /** 4114 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4115 * @hw: pointer to the hw struct 4116 * @seid: seid of the VSI 4117 * @bw_data: Buffer to hold VSI BW configuration per TC 4118 * @cmd_details: pointer to command details structure or NULL 4119 **/ 4120 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4121 u16 seid, 4122 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4123 struct i40e_asq_cmd_details *cmd_details) 4124 { 4125 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4126 i40e_aqc_opc_query_vsi_ets_sla_config, 4127 cmd_details); 4128 } 4129 4130 /** 4131 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4132 * @hw: pointer to the hw struct 4133 * @seid: seid of the switching component 4134 * @bw_data: Buffer to hold switching component's per TC BW config 4135 * @cmd_details: pointer to command details structure or NULL 4136 **/ 4137 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4138 u16 seid, 4139 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4140 struct i40e_asq_cmd_details *cmd_details) 4141 { 4142 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4143 i40e_aqc_opc_query_switching_comp_ets_config, 4144 cmd_details); 4145 } 4146 4147 /** 4148 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4149 * @hw: pointer to the hw struct 4150 * @seid: seid of the VSI or switching component connected to Physical Port 4151 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4152 * @cmd_details: pointer to command details structure or NULL 4153 **/ 4154 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4155 u16 seid, 4156 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4157 struct i40e_asq_cmd_details *cmd_details) 4158 { 4159 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4160 i40e_aqc_opc_query_port_ets_config, 4161 cmd_details); 4162 } 4163 4164 /** 4165 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4166 * @hw: pointer to the hw struct 4167 * @seid: seid of the switching component 4168 * @bw_data: Buffer to hold switching component's BW configuration 4169 * @cmd_details: pointer to command details structure or NULL 4170 **/ 4171 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4172 u16 seid, 4173 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4174 struct i40e_asq_cmd_details *cmd_details) 4175 { 4176 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4177 i40e_aqc_opc_query_switching_comp_bw_config, 4178 cmd_details); 4179 } 4180 4181 /** 4182 * i40e_validate_filter_settings 4183 * @hw: pointer to the hardware structure 4184 * @settings: Filter control settings 4185 * 4186 * Check and validate the filter control settings passed. 4187 * The function checks for the valid filter/context sizes being 4188 * passed for FCoE and PE. 4189 * 4190 * Returns 0 if the values passed are valid and within 4191 * range else returns an error. 4192 **/ 4193 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4194 struct i40e_filter_control_settings *settings) 4195 { 4196 u32 fcoe_cntx_size, fcoe_filt_size; 4197 u32 pe_cntx_size, pe_filt_size; 4198 u32 fcoe_fmax; 4199 u32 val; 4200 4201 /* Validate FCoE settings passed */ 4202 switch (settings->fcoe_filt_num) { 4203 case I40E_HASH_FILTER_SIZE_1K: 4204 case I40E_HASH_FILTER_SIZE_2K: 4205 case I40E_HASH_FILTER_SIZE_4K: 4206 case I40E_HASH_FILTER_SIZE_8K: 4207 case I40E_HASH_FILTER_SIZE_16K: 4208 case I40E_HASH_FILTER_SIZE_32K: 4209 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4210 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4211 break; 4212 default: 4213 return I40E_ERR_PARAM; 4214 } 4215 4216 switch (settings->fcoe_cntx_num) { 4217 case I40E_DMA_CNTX_SIZE_512: 4218 case I40E_DMA_CNTX_SIZE_1K: 4219 case I40E_DMA_CNTX_SIZE_2K: 4220 case I40E_DMA_CNTX_SIZE_4K: 4221 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4222 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4223 break; 4224 default: 4225 return I40E_ERR_PARAM; 4226 } 4227 4228 /* Validate PE settings passed */ 4229 switch (settings->pe_filt_num) { 4230 case I40E_HASH_FILTER_SIZE_1K: 4231 case I40E_HASH_FILTER_SIZE_2K: 4232 case I40E_HASH_FILTER_SIZE_4K: 4233 case I40E_HASH_FILTER_SIZE_8K: 4234 case I40E_HASH_FILTER_SIZE_16K: 4235 case I40E_HASH_FILTER_SIZE_32K: 4236 case I40E_HASH_FILTER_SIZE_64K: 4237 case I40E_HASH_FILTER_SIZE_128K: 4238 case I40E_HASH_FILTER_SIZE_256K: 4239 case I40E_HASH_FILTER_SIZE_512K: 4240 case I40E_HASH_FILTER_SIZE_1M: 4241 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4242 pe_filt_size <<= (u32)settings->pe_filt_num; 4243 break; 4244 default: 4245 return I40E_ERR_PARAM; 4246 } 4247 4248 switch (settings->pe_cntx_num) { 4249 case I40E_DMA_CNTX_SIZE_512: 4250 case I40E_DMA_CNTX_SIZE_1K: 4251 case I40E_DMA_CNTX_SIZE_2K: 4252 case I40E_DMA_CNTX_SIZE_4K: 4253 case I40E_DMA_CNTX_SIZE_8K: 4254 case I40E_DMA_CNTX_SIZE_16K: 4255 case I40E_DMA_CNTX_SIZE_32K: 4256 case I40E_DMA_CNTX_SIZE_64K: 4257 case I40E_DMA_CNTX_SIZE_128K: 4258 case I40E_DMA_CNTX_SIZE_256K: 4259 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4260 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4261 break; 4262 default: 4263 return I40E_ERR_PARAM; 4264 } 4265 4266 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4267 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4268 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4269 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4270 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4271 return I40E_ERR_INVALID_SIZE; 4272 4273 return 0; 4274 } 4275 4276 /** 4277 * i40e_set_filter_control 4278 * @hw: pointer to the hardware structure 4279 * @settings: Filter control settings 4280 * 4281 * Set the Queue Filters for PE/FCoE and enable filters required 4282 * for a single PF. It is expected that these settings are programmed 4283 * at the driver initialization time. 4284 **/ 4285 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4286 struct i40e_filter_control_settings *settings) 4287 { 4288 i40e_status ret = 0; 4289 u32 hash_lut_size = 0; 4290 u32 val; 4291 4292 if (!settings) 4293 return I40E_ERR_PARAM; 4294 4295 /* Validate the input settings */ 4296 ret = i40e_validate_filter_settings(hw, settings); 4297 if (ret) 4298 return ret; 4299 4300 /* Read the PF Queue Filter control register */ 4301 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4302 4303 /* Program required PE hash buckets for the PF */ 4304 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4305 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4306 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4307 /* Program required PE contexts for the PF */ 4308 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4309 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4310 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4311 4312 /* Program required FCoE hash buckets for the PF */ 4313 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4314 val |= ((u32)settings->fcoe_filt_num << 4315 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4316 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4317 /* Program required FCoE DDP contexts for the PF */ 4318 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4319 val |= ((u32)settings->fcoe_cntx_num << 4320 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4321 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4322 4323 /* Program Hash LUT size for the PF */ 4324 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4325 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4326 hash_lut_size = 1; 4327 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4328 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4329 4330 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4331 if (settings->enable_fdir) 4332 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4333 if (settings->enable_ethtype) 4334 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4335 if (settings->enable_macvlan) 4336 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4337 4338 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4339 4340 return 0; 4341 } 4342 4343 /** 4344 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4345 * @hw: pointer to the hw struct 4346 * @mac_addr: MAC address to use in the filter 4347 * @ethtype: Ethertype to use in the filter 4348 * @flags: Flags that needs to be applied to the filter 4349 * @vsi_seid: seid of the control VSI 4350 * @queue: VSI queue number to send the packet to 4351 * @is_add: Add control packet filter if True else remove 4352 * @stats: Structure to hold information on control filter counts 4353 * @cmd_details: pointer to command details structure or NULL 4354 * 4355 * This command will Add or Remove control packet filter for a control VSI. 4356 * In return it will update the total number of perfect filter count in 4357 * the stats member. 4358 **/ 4359 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4360 u8 *mac_addr, u16 ethtype, u16 flags, 4361 u16 vsi_seid, u16 queue, bool is_add, 4362 struct i40e_control_filter_stats *stats, 4363 struct i40e_asq_cmd_details *cmd_details) 4364 { 4365 struct i40e_aq_desc desc; 4366 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4367 (struct i40e_aqc_add_remove_control_packet_filter *) 4368 &desc.params.raw; 4369 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4370 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4371 &desc.params.raw; 4372 i40e_status status; 4373 4374 if (vsi_seid == 0) 4375 return I40E_ERR_PARAM; 4376 4377 if (is_add) { 4378 i40e_fill_default_direct_cmd_desc(&desc, 4379 i40e_aqc_opc_add_control_packet_filter); 4380 cmd->queue = cpu_to_le16(queue); 4381 } else { 4382 i40e_fill_default_direct_cmd_desc(&desc, 4383 i40e_aqc_opc_remove_control_packet_filter); 4384 } 4385 4386 if (mac_addr) 4387 ether_addr_copy(cmd->mac, mac_addr); 4388 4389 cmd->etype = cpu_to_le16(ethtype); 4390 cmd->flags = cpu_to_le16(flags); 4391 cmd->seid = cpu_to_le16(vsi_seid); 4392 4393 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4394 4395 if (!status && stats) { 4396 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4397 stats->etype_used = le16_to_cpu(resp->etype_used); 4398 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4399 stats->etype_free = le16_to_cpu(resp->etype_free); 4400 } 4401 4402 return status; 4403 } 4404 4405 /** 4406 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4407 * @hw: pointer to the hw struct 4408 * @seid: VSI seid to add ethertype filter from 4409 **/ 4410 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4411 u16 seid) 4412 { 4413 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4414 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4415 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4416 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4417 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4418 i40e_status status; 4419 4420 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4421 seid, 0, true, NULL, 4422 NULL); 4423 if (status) 4424 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4425 } 4426 4427 /** 4428 * i40e_aq_alternate_read 4429 * @hw: pointer to the hardware structure 4430 * @reg_addr0: address of first dword to be read 4431 * @reg_val0: pointer for data read from 'reg_addr0' 4432 * @reg_addr1: address of second dword to be read 4433 * @reg_val1: pointer for data read from 'reg_addr1' 4434 * 4435 * Read one or two dwords from alternate structure. Fields are indicated 4436 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4437 * is not passed then only register at 'reg_addr0' is read. 4438 * 4439 **/ 4440 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4441 u32 reg_addr0, u32 *reg_val0, 4442 u32 reg_addr1, u32 *reg_val1) 4443 { 4444 struct i40e_aq_desc desc; 4445 struct i40e_aqc_alternate_write *cmd_resp = 4446 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4447 i40e_status status; 4448 4449 if (!reg_val0) 4450 return I40E_ERR_PARAM; 4451 4452 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4453 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4454 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4455 4456 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4457 4458 if (!status) { 4459 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4460 4461 if (reg_val1) 4462 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4463 } 4464 4465 return status; 4466 } 4467 4468 /** 4469 * i40e_aq_resume_port_tx 4470 * @hw: pointer to the hardware structure 4471 * @cmd_details: pointer to command details structure or NULL 4472 * 4473 * Resume port's Tx traffic 4474 **/ 4475 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4476 struct i40e_asq_cmd_details *cmd_details) 4477 { 4478 struct i40e_aq_desc desc; 4479 i40e_status status; 4480 4481 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4482 4483 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4484 4485 return status; 4486 } 4487 4488 /** 4489 * i40e_set_pci_config_data - store PCI bus info 4490 * @hw: pointer to hardware structure 4491 * @link_status: the link status word from PCI config space 4492 * 4493 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4494 **/ 4495 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4496 { 4497 hw->bus.type = i40e_bus_type_pci_express; 4498 4499 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4500 case PCI_EXP_LNKSTA_NLW_X1: 4501 hw->bus.width = i40e_bus_width_pcie_x1; 4502 break; 4503 case PCI_EXP_LNKSTA_NLW_X2: 4504 hw->bus.width = i40e_bus_width_pcie_x2; 4505 break; 4506 case PCI_EXP_LNKSTA_NLW_X4: 4507 hw->bus.width = i40e_bus_width_pcie_x4; 4508 break; 4509 case PCI_EXP_LNKSTA_NLW_X8: 4510 hw->bus.width = i40e_bus_width_pcie_x8; 4511 break; 4512 default: 4513 hw->bus.width = i40e_bus_width_unknown; 4514 break; 4515 } 4516 4517 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4518 case PCI_EXP_LNKSTA_CLS_2_5GB: 4519 hw->bus.speed = i40e_bus_speed_2500; 4520 break; 4521 case PCI_EXP_LNKSTA_CLS_5_0GB: 4522 hw->bus.speed = i40e_bus_speed_5000; 4523 break; 4524 case PCI_EXP_LNKSTA_CLS_8_0GB: 4525 hw->bus.speed = i40e_bus_speed_8000; 4526 break; 4527 default: 4528 hw->bus.speed = i40e_bus_speed_unknown; 4529 break; 4530 } 4531 } 4532 4533 /** 4534 * i40e_aq_debug_dump 4535 * @hw: pointer to the hardware structure 4536 * @cluster_id: specific cluster to dump 4537 * @table_id: table id within cluster 4538 * @start_index: index of line in the block to read 4539 * @buff_size: dump buffer size 4540 * @buff: dump buffer 4541 * @ret_buff_size: actual buffer size returned 4542 * @ret_next_table: next block to read 4543 * @ret_next_index: next index to read 4544 * @cmd_details: pointer to command details structure or NULL 4545 * 4546 * Dump internal FW/HW data for debug purposes. 4547 * 4548 **/ 4549 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4550 u8 table_id, u32 start_index, u16 buff_size, 4551 void *buff, u16 *ret_buff_size, 4552 u8 *ret_next_table, u32 *ret_next_index, 4553 struct i40e_asq_cmd_details *cmd_details) 4554 { 4555 struct i40e_aq_desc desc; 4556 struct i40e_aqc_debug_dump_internals *cmd = 4557 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4558 struct i40e_aqc_debug_dump_internals *resp = 4559 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4560 i40e_status status; 4561 4562 if (buff_size == 0 || !buff) 4563 return I40E_ERR_PARAM; 4564 4565 i40e_fill_default_direct_cmd_desc(&desc, 4566 i40e_aqc_opc_debug_dump_internals); 4567 /* Indirect Command */ 4568 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4569 if (buff_size > I40E_AQ_LARGE_BUF) 4570 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4571 4572 cmd->cluster_id = cluster_id; 4573 cmd->table_id = table_id; 4574 cmd->idx = cpu_to_le32(start_index); 4575 4576 desc.datalen = cpu_to_le16(buff_size); 4577 4578 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4579 if (!status) { 4580 if (ret_buff_size) 4581 *ret_buff_size = le16_to_cpu(desc.datalen); 4582 if (ret_next_table) 4583 *ret_next_table = resp->table_id; 4584 if (ret_next_index) 4585 *ret_next_index = le32_to_cpu(resp->idx); 4586 } 4587 4588 return status; 4589 } 4590 4591 /** 4592 * i40e_read_bw_from_alt_ram 4593 * @hw: pointer to the hardware structure 4594 * @max_bw: pointer for max_bw read 4595 * @min_bw: pointer for min_bw read 4596 * @min_valid: pointer for bool that is true if min_bw is a valid value 4597 * @max_valid: pointer for bool that is true if max_bw is a valid value 4598 * 4599 * Read bw from the alternate ram for the given pf 4600 **/ 4601 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4602 u32 *max_bw, u32 *min_bw, 4603 bool *min_valid, bool *max_valid) 4604 { 4605 i40e_status status; 4606 u32 max_bw_addr, min_bw_addr; 4607 4608 /* Calculate the address of the min/max bw registers */ 4609 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4610 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4611 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4612 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4613 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4614 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4615 4616 /* Read the bandwidths from alt ram */ 4617 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4618 min_bw_addr, min_bw); 4619 4620 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4621 *min_valid = true; 4622 else 4623 *min_valid = false; 4624 4625 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4626 *max_valid = true; 4627 else 4628 *max_valid = false; 4629 4630 return status; 4631 } 4632 4633 /** 4634 * i40e_aq_configure_partition_bw 4635 * @hw: pointer to the hardware structure 4636 * @bw_data: Buffer holding valid pfs and bw limits 4637 * @cmd_details: pointer to command details 4638 * 4639 * Configure partitions guaranteed/max bw 4640 **/ 4641 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4642 struct i40e_aqc_configure_partition_bw_data *bw_data, 4643 struct i40e_asq_cmd_details *cmd_details) 4644 { 4645 i40e_status status; 4646 struct i40e_aq_desc desc; 4647 u16 bwd_size = sizeof(*bw_data); 4648 4649 i40e_fill_default_direct_cmd_desc(&desc, 4650 i40e_aqc_opc_configure_partition_bw); 4651 4652 /* Indirect command */ 4653 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4654 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4655 4656 if (bwd_size > I40E_AQ_LARGE_BUF) 4657 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4658 4659 desc.datalen = cpu_to_le16(bwd_size); 4660 4661 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4662 cmd_details); 4663 4664 return status; 4665 } 4666 4667 /** 4668 * i40e_read_phy_register_clause22 4669 * @hw: pointer to the HW structure 4670 * @reg: register address in the page 4671 * @phy_addr: PHY address on MDIO interface 4672 * @value: PHY register value 4673 * 4674 * Reads specified PHY register value 4675 **/ 4676 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4677 u16 reg, u8 phy_addr, u16 *value) 4678 { 4679 i40e_status status = I40E_ERR_TIMEOUT; 4680 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4681 u32 command = 0; 4682 u16 retry = 1000; 4683 4684 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4685 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4686 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4687 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4688 (I40E_GLGEN_MSCA_MDICMD_MASK); 4689 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4690 do { 4691 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4692 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4693 status = 0; 4694 break; 4695 } 4696 udelay(10); 4697 retry--; 4698 } while (retry); 4699 4700 if (status) { 4701 i40e_debug(hw, I40E_DEBUG_PHY, 4702 "PHY: Can't write command to external PHY.\n"); 4703 } else { 4704 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4705 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4706 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4707 } 4708 4709 return status; 4710 } 4711 4712 /** 4713 * i40e_write_phy_register_clause22 4714 * @hw: pointer to the HW structure 4715 * @reg: register address in the page 4716 * @phy_addr: PHY address on MDIO interface 4717 * @value: PHY register value 4718 * 4719 * Writes specified PHY register value 4720 **/ 4721 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4722 u16 reg, u8 phy_addr, u16 value) 4723 { 4724 i40e_status status = I40E_ERR_TIMEOUT; 4725 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4726 u32 command = 0; 4727 u16 retry = 1000; 4728 4729 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4730 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4731 4732 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4733 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4734 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4735 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4736 (I40E_GLGEN_MSCA_MDICMD_MASK); 4737 4738 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4739 do { 4740 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4741 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4742 status = 0; 4743 break; 4744 } 4745 udelay(10); 4746 retry--; 4747 } while (retry); 4748 4749 return status; 4750 } 4751 4752 /** 4753 * i40e_read_phy_register_clause45 4754 * @hw: pointer to the HW structure 4755 * @page: registers page number 4756 * @reg: register address in the page 4757 * @phy_addr: PHY address on MDIO interface 4758 * @value: PHY register value 4759 * 4760 * Reads specified PHY register value 4761 **/ 4762 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4763 u8 page, u16 reg, u8 phy_addr, u16 *value) 4764 { 4765 i40e_status status = I40E_ERR_TIMEOUT; 4766 u32 command = 0; 4767 u16 retry = 1000; 4768 u8 port_num = hw->func_caps.mdio_port_num; 4769 4770 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4771 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4772 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4773 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4774 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4775 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4776 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4777 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4778 do { 4779 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4780 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4781 status = 0; 4782 break; 4783 } 4784 usleep_range(10, 20); 4785 retry--; 4786 } while (retry); 4787 4788 if (status) { 4789 i40e_debug(hw, I40E_DEBUG_PHY, 4790 "PHY: Can't write command to external PHY.\n"); 4791 goto phy_read_end; 4792 } 4793 4794 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4795 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4796 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4797 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4798 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4799 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4800 status = I40E_ERR_TIMEOUT; 4801 retry = 1000; 4802 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4803 do { 4804 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4805 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4806 status = 0; 4807 break; 4808 } 4809 usleep_range(10, 20); 4810 retry--; 4811 } while (retry); 4812 4813 if (!status) { 4814 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4815 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4816 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4817 } else { 4818 i40e_debug(hw, I40E_DEBUG_PHY, 4819 "PHY: Can't read register value from external PHY.\n"); 4820 } 4821 4822 phy_read_end: 4823 return status; 4824 } 4825 4826 /** 4827 * i40e_write_phy_register_clause45 4828 * @hw: pointer to the HW structure 4829 * @page: registers page number 4830 * @reg: register address in the page 4831 * @phy_addr: PHY address on MDIO interface 4832 * @value: PHY register value 4833 * 4834 * Writes value to specified PHY register 4835 **/ 4836 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4837 u8 page, u16 reg, u8 phy_addr, u16 value) 4838 { 4839 i40e_status status = I40E_ERR_TIMEOUT; 4840 u32 command = 0; 4841 u16 retry = 1000; 4842 u8 port_num = hw->func_caps.mdio_port_num; 4843 4844 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4845 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4846 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4847 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4848 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4849 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4850 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4851 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4852 do { 4853 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4854 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4855 status = 0; 4856 break; 4857 } 4858 usleep_range(10, 20); 4859 retry--; 4860 } while (retry); 4861 if (status) { 4862 i40e_debug(hw, I40E_DEBUG_PHY, 4863 "PHY: Can't write command to external PHY.\n"); 4864 goto phy_write_end; 4865 } 4866 4867 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4868 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4869 4870 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4871 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4872 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4873 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4874 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4875 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4876 status = I40E_ERR_TIMEOUT; 4877 retry = 1000; 4878 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4879 do { 4880 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4881 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4882 status = 0; 4883 break; 4884 } 4885 usleep_range(10, 20); 4886 retry--; 4887 } while (retry); 4888 4889 phy_write_end: 4890 return status; 4891 } 4892 4893 /** 4894 * i40e_write_phy_register 4895 * @hw: pointer to the HW structure 4896 * @page: registers page number 4897 * @reg: register address in the page 4898 * @phy_addr: PHY address on MDIO interface 4899 * @value: PHY register value 4900 * 4901 * Writes value to specified PHY register 4902 **/ 4903 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4904 u8 page, u16 reg, u8 phy_addr, u16 value) 4905 { 4906 i40e_status status; 4907 4908 switch (hw->device_id) { 4909 case I40E_DEV_ID_1G_BASE_T_X722: 4910 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4911 value); 4912 break; 4913 case I40E_DEV_ID_10G_BASE_T: 4914 case I40E_DEV_ID_10G_BASE_T4: 4915 case I40E_DEV_ID_10G_BASE_T_BC: 4916 case I40E_DEV_ID_10G_BASE_T_X722: 4917 case I40E_DEV_ID_25G_B: 4918 case I40E_DEV_ID_25G_SFP28: 4919 status = i40e_write_phy_register_clause45(hw, page, reg, 4920 phy_addr, value); 4921 break; 4922 default: 4923 status = I40E_ERR_UNKNOWN_PHY; 4924 break; 4925 } 4926 4927 return status; 4928 } 4929 4930 /** 4931 * i40e_read_phy_register 4932 * @hw: pointer to the HW structure 4933 * @page: registers page number 4934 * @reg: register address in the page 4935 * @phy_addr: PHY address on MDIO interface 4936 * @value: PHY register value 4937 * 4938 * Reads specified PHY register value 4939 **/ 4940 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 4941 u8 page, u16 reg, u8 phy_addr, u16 *value) 4942 { 4943 i40e_status status; 4944 4945 switch (hw->device_id) { 4946 case I40E_DEV_ID_1G_BASE_T_X722: 4947 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4948 value); 4949 break; 4950 case I40E_DEV_ID_10G_BASE_T: 4951 case I40E_DEV_ID_10G_BASE_T4: 4952 case I40E_DEV_ID_10G_BASE_T_BC: 4953 case I40E_DEV_ID_10G_BASE_T_X722: 4954 case I40E_DEV_ID_25G_B: 4955 case I40E_DEV_ID_25G_SFP28: 4956 status = i40e_read_phy_register_clause45(hw, page, reg, 4957 phy_addr, value); 4958 break; 4959 default: 4960 status = I40E_ERR_UNKNOWN_PHY; 4961 break; 4962 } 4963 4964 return status; 4965 } 4966 4967 /** 4968 * i40e_get_phy_address 4969 * @hw: pointer to the HW structure 4970 * @dev_num: PHY port num that address we want 4971 * 4972 * Gets PHY address for current port 4973 **/ 4974 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4975 { 4976 u8 port_num = hw->func_caps.mdio_port_num; 4977 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4978 4979 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4980 } 4981 4982 /** 4983 * i40e_blink_phy_led 4984 * @hw: pointer to the HW structure 4985 * @time: time how long led will blinks in secs 4986 * @interval: gap between LED on and off in msecs 4987 * 4988 * Blinks PHY link LED 4989 **/ 4990 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 4991 u32 time, u32 interval) 4992 { 4993 i40e_status status = 0; 4994 u32 i; 4995 u16 led_ctl; 4996 u16 gpio_led_port; 4997 u16 led_reg; 4998 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4999 u8 phy_addr = 0; 5000 u8 port_num; 5001 5002 i = rd32(hw, I40E_PFGEN_PORTNUM); 5003 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5004 phy_addr = i40e_get_phy_address(hw, port_num); 5005 5006 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5007 led_addr++) { 5008 status = i40e_read_phy_register_clause45(hw, 5009 I40E_PHY_COM_REG_PAGE, 5010 led_addr, phy_addr, 5011 &led_reg); 5012 if (status) 5013 goto phy_blinking_end; 5014 led_ctl = led_reg; 5015 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5016 led_reg = 0; 5017 status = i40e_write_phy_register_clause45(hw, 5018 I40E_PHY_COM_REG_PAGE, 5019 led_addr, phy_addr, 5020 led_reg); 5021 if (status) 5022 goto phy_blinking_end; 5023 break; 5024 } 5025 } 5026 5027 if (time > 0 && interval > 0) { 5028 for (i = 0; i < time * 1000; i += interval) { 5029 status = i40e_read_phy_register_clause45(hw, 5030 I40E_PHY_COM_REG_PAGE, 5031 led_addr, phy_addr, &led_reg); 5032 if (status) 5033 goto restore_config; 5034 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5035 led_reg = 0; 5036 else 5037 led_reg = I40E_PHY_LED_MANUAL_ON; 5038 status = i40e_write_phy_register_clause45(hw, 5039 I40E_PHY_COM_REG_PAGE, 5040 led_addr, phy_addr, led_reg); 5041 if (status) 5042 goto restore_config; 5043 msleep(interval); 5044 } 5045 } 5046 5047 restore_config: 5048 status = i40e_write_phy_register_clause45(hw, 5049 I40E_PHY_COM_REG_PAGE, 5050 led_addr, phy_addr, led_ctl); 5051 5052 phy_blinking_end: 5053 return status; 5054 } 5055 5056 /** 5057 * i40e_led_get_reg - read LED register 5058 * @hw: pointer to the HW structure 5059 * @led_addr: LED register address 5060 * @reg_val: read register value 5061 **/ 5062 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5063 u32 *reg_val) 5064 { 5065 enum i40e_status_code status; 5066 u8 phy_addr = 0; 5067 u8 port_num; 5068 u32 i; 5069 5070 *reg_val = 0; 5071 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5072 status = 5073 i40e_aq_get_phy_register(hw, 5074 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5075 I40E_PHY_COM_REG_PAGE, true, 5076 I40E_PHY_LED_PROV_REG_1, 5077 reg_val, NULL); 5078 } else { 5079 i = rd32(hw, I40E_PFGEN_PORTNUM); 5080 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5081 phy_addr = i40e_get_phy_address(hw, port_num); 5082 status = i40e_read_phy_register_clause45(hw, 5083 I40E_PHY_COM_REG_PAGE, 5084 led_addr, phy_addr, 5085 (u16 *)reg_val); 5086 } 5087 return status; 5088 } 5089 5090 /** 5091 * i40e_led_set_reg - write LED register 5092 * @hw: pointer to the HW structure 5093 * @led_addr: LED register address 5094 * @reg_val: register value to write 5095 **/ 5096 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5097 u32 reg_val) 5098 { 5099 enum i40e_status_code status; 5100 u8 phy_addr = 0; 5101 u8 port_num; 5102 u32 i; 5103 5104 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5105 status = 5106 i40e_aq_set_phy_register(hw, 5107 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5108 I40E_PHY_COM_REG_PAGE, true, 5109 I40E_PHY_LED_PROV_REG_1, 5110 reg_val, NULL); 5111 } else { 5112 i = rd32(hw, I40E_PFGEN_PORTNUM); 5113 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5114 phy_addr = i40e_get_phy_address(hw, port_num); 5115 status = i40e_write_phy_register_clause45(hw, 5116 I40E_PHY_COM_REG_PAGE, 5117 led_addr, phy_addr, 5118 (u16)reg_val); 5119 } 5120 5121 return status; 5122 } 5123 5124 /** 5125 * i40e_led_get_phy - return current on/off mode 5126 * @hw: pointer to the hw struct 5127 * @led_addr: address of led register to use 5128 * @val: original value of register to use 5129 * 5130 **/ 5131 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5132 u16 *val) 5133 { 5134 i40e_status status = 0; 5135 u16 gpio_led_port; 5136 u8 phy_addr = 0; 5137 u16 reg_val; 5138 u16 temp_addr; 5139 u8 port_num; 5140 u32 i; 5141 u32 reg_val_aq; 5142 5143 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5144 status = 5145 i40e_aq_get_phy_register(hw, 5146 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5147 I40E_PHY_COM_REG_PAGE, true, 5148 I40E_PHY_LED_PROV_REG_1, 5149 ®_val_aq, NULL); 5150 if (status == I40E_SUCCESS) 5151 *val = (u16)reg_val_aq; 5152 return status; 5153 } 5154 temp_addr = I40E_PHY_LED_PROV_REG_1; 5155 i = rd32(hw, I40E_PFGEN_PORTNUM); 5156 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5157 phy_addr = i40e_get_phy_address(hw, port_num); 5158 5159 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5160 temp_addr++) { 5161 status = i40e_read_phy_register_clause45(hw, 5162 I40E_PHY_COM_REG_PAGE, 5163 temp_addr, phy_addr, 5164 ®_val); 5165 if (status) 5166 return status; 5167 *val = reg_val; 5168 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5169 *led_addr = temp_addr; 5170 break; 5171 } 5172 } 5173 return status; 5174 } 5175 5176 /** 5177 * i40e_led_set_phy 5178 * @hw: pointer to the HW structure 5179 * @on: true or false 5180 * @led_addr: address of led register to use 5181 * @mode: original val plus bit for set or ignore 5182 * 5183 * Set led's on or off when controlled by the PHY 5184 * 5185 **/ 5186 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5187 u16 led_addr, u32 mode) 5188 { 5189 i40e_status status = 0; 5190 u32 led_ctl = 0; 5191 u32 led_reg = 0; 5192 5193 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5194 if (status) 5195 return status; 5196 led_ctl = led_reg; 5197 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5198 led_reg = 0; 5199 status = i40e_led_set_reg(hw, led_addr, led_reg); 5200 if (status) 5201 return status; 5202 } 5203 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5204 if (status) 5205 goto restore_config; 5206 if (on) 5207 led_reg = I40E_PHY_LED_MANUAL_ON; 5208 else 5209 led_reg = 0; 5210 5211 status = i40e_led_set_reg(hw, led_addr, led_reg); 5212 if (status) 5213 goto restore_config; 5214 if (mode & I40E_PHY_LED_MODE_ORIG) { 5215 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5216 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5217 } 5218 return status; 5219 5220 restore_config: 5221 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5222 return status; 5223 } 5224 5225 /** 5226 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5227 * @hw: pointer to the hw struct 5228 * @reg_addr: register address 5229 * @reg_val: ptr to register value 5230 * @cmd_details: pointer to command details structure or NULL 5231 * 5232 * Use the firmware to read the Rx control register, 5233 * especially useful if the Rx unit is under heavy pressure 5234 **/ 5235 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5236 u32 reg_addr, u32 *reg_val, 5237 struct i40e_asq_cmd_details *cmd_details) 5238 { 5239 struct i40e_aq_desc desc; 5240 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5241 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5242 i40e_status status; 5243 5244 if (!reg_val) 5245 return I40E_ERR_PARAM; 5246 5247 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5248 5249 cmd_resp->address = cpu_to_le32(reg_addr); 5250 5251 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5252 5253 if (status == 0) 5254 *reg_val = le32_to_cpu(cmd_resp->value); 5255 5256 return status; 5257 } 5258 5259 /** 5260 * i40e_read_rx_ctl - read from an Rx control register 5261 * @hw: pointer to the hw struct 5262 * @reg_addr: register address 5263 **/ 5264 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5265 { 5266 i40e_status status = 0; 5267 bool use_register; 5268 int retry = 5; 5269 u32 val = 0; 5270 5271 use_register = (((hw->aq.api_maj_ver == 1) && 5272 (hw->aq.api_min_ver < 5)) || 5273 (hw->mac.type == I40E_MAC_X722)); 5274 if (!use_register) { 5275 do_retry: 5276 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5277 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5278 usleep_range(1000, 2000); 5279 retry--; 5280 goto do_retry; 5281 } 5282 } 5283 5284 /* if the AQ access failed, try the old-fashioned way */ 5285 if (status || use_register) 5286 val = rd32(hw, reg_addr); 5287 5288 return val; 5289 } 5290 5291 /** 5292 * i40e_aq_rx_ctl_write_register 5293 * @hw: pointer to the hw struct 5294 * @reg_addr: register address 5295 * @reg_val: register value 5296 * @cmd_details: pointer to command details structure or NULL 5297 * 5298 * Use the firmware to write to an Rx control register, 5299 * especially useful if the Rx unit is under heavy pressure 5300 **/ 5301 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5302 u32 reg_addr, u32 reg_val, 5303 struct i40e_asq_cmd_details *cmd_details) 5304 { 5305 struct i40e_aq_desc desc; 5306 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5307 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5308 i40e_status status; 5309 5310 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5311 5312 cmd->address = cpu_to_le32(reg_addr); 5313 cmd->value = cpu_to_le32(reg_val); 5314 5315 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5316 5317 return status; 5318 } 5319 5320 /** 5321 * i40e_write_rx_ctl - write to an Rx control register 5322 * @hw: pointer to the hw struct 5323 * @reg_addr: register address 5324 * @reg_val: register value 5325 **/ 5326 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5327 { 5328 i40e_status status = 0; 5329 bool use_register; 5330 int retry = 5; 5331 5332 use_register = (((hw->aq.api_maj_ver == 1) && 5333 (hw->aq.api_min_ver < 5)) || 5334 (hw->mac.type == I40E_MAC_X722)); 5335 if (!use_register) { 5336 do_retry: 5337 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5338 reg_val, NULL); 5339 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5340 usleep_range(1000, 2000); 5341 retry--; 5342 goto do_retry; 5343 } 5344 } 5345 5346 /* if the AQ access failed, try the old-fashioned way */ 5347 if (status || use_register) 5348 wr32(hw, reg_addr, reg_val); 5349 } 5350 5351 /** 5352 * i40e_mdio_if_number_selection - MDIO I/F number selection 5353 * @hw: pointer to the hw struct 5354 * @set_mdio: use MDIO I/F number specified by mdio_num 5355 * @mdio_num: MDIO I/F number 5356 * @cmd: pointer to PHY Register command structure 5357 **/ 5358 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5359 u8 mdio_num, 5360 struct i40e_aqc_phy_register_access *cmd) 5361 { 5362 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5363 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5364 cmd->cmd_flags |= 5365 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5366 ((mdio_num << 5367 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5368 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5369 else 5370 i40e_debug(hw, I40E_DEBUG_PHY, 5371 "MDIO I/F number selection not supported by current FW version.\n"); 5372 } 5373 } 5374 5375 /** 5376 * i40e_aq_set_phy_register_ext 5377 * @hw: pointer to the hw struct 5378 * @phy_select: select which phy should be accessed 5379 * @dev_addr: PHY device address 5380 * @set_mdio: use MDIO I/F number specified by mdio_num 5381 * @mdio_num: MDIO I/F number 5382 * @reg_addr: PHY register address 5383 * @reg_val: new register value 5384 * @cmd_details: pointer to command details structure or NULL 5385 * 5386 * Write the external PHY register. 5387 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5388 * may use simple wrapper i40e_aq_set_phy_register. 5389 **/ 5390 enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5391 u8 phy_select, u8 dev_addr, bool page_change, 5392 bool set_mdio, u8 mdio_num, 5393 u32 reg_addr, u32 reg_val, 5394 struct i40e_asq_cmd_details *cmd_details) 5395 { 5396 struct i40e_aq_desc desc; 5397 struct i40e_aqc_phy_register_access *cmd = 5398 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5399 i40e_status status; 5400 5401 i40e_fill_default_direct_cmd_desc(&desc, 5402 i40e_aqc_opc_set_phy_register); 5403 5404 cmd->phy_interface = phy_select; 5405 cmd->dev_address = dev_addr; 5406 cmd->reg_address = cpu_to_le32(reg_addr); 5407 cmd->reg_value = cpu_to_le32(reg_val); 5408 5409 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5410 5411 if (!page_change) 5412 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5413 5414 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5415 5416 return status; 5417 } 5418 5419 /** 5420 * i40e_aq_get_phy_register_ext 5421 * @hw: pointer to the hw struct 5422 * @phy_select: select which phy should be accessed 5423 * @dev_addr: PHY device address 5424 * @set_mdio: use MDIO I/F number specified by mdio_num 5425 * @mdio_num: MDIO I/F number 5426 * @reg_addr: PHY register address 5427 * @reg_val: read register value 5428 * @cmd_details: pointer to command details structure or NULL 5429 * 5430 * Read the external PHY register. 5431 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5432 * may use simple wrapper i40e_aq_get_phy_register. 5433 **/ 5434 enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5435 u8 phy_select, u8 dev_addr, bool page_change, 5436 bool set_mdio, u8 mdio_num, 5437 u32 reg_addr, u32 *reg_val, 5438 struct i40e_asq_cmd_details *cmd_details) 5439 { 5440 struct i40e_aq_desc desc; 5441 struct i40e_aqc_phy_register_access *cmd = 5442 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5443 i40e_status status; 5444 5445 i40e_fill_default_direct_cmd_desc(&desc, 5446 i40e_aqc_opc_get_phy_register); 5447 5448 cmd->phy_interface = phy_select; 5449 cmd->dev_address = dev_addr; 5450 cmd->reg_address = cpu_to_le32(reg_addr); 5451 5452 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5453 5454 if (!page_change) 5455 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5456 5457 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5458 if (!status) 5459 *reg_val = le32_to_cpu(cmd->reg_value); 5460 5461 return status; 5462 } 5463 5464 /** 5465 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5466 * @hw: pointer to the hw struct 5467 * @buff: command buffer (size in bytes = buff_size) 5468 * @buff_size: buffer size in bytes 5469 * @track_id: package tracking id 5470 * @error_offset: returns error offset 5471 * @error_info: returns error information 5472 * @cmd_details: pointer to command details structure or NULL 5473 **/ 5474 enum 5475 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5476 u16 buff_size, u32 track_id, 5477 u32 *error_offset, u32 *error_info, 5478 struct i40e_asq_cmd_details *cmd_details) 5479 { 5480 struct i40e_aq_desc desc; 5481 struct i40e_aqc_write_personalization_profile *cmd = 5482 (struct i40e_aqc_write_personalization_profile *) 5483 &desc.params.raw; 5484 struct i40e_aqc_write_ddp_resp *resp; 5485 i40e_status status; 5486 5487 i40e_fill_default_direct_cmd_desc(&desc, 5488 i40e_aqc_opc_write_personalization_profile); 5489 5490 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5491 if (buff_size > I40E_AQ_LARGE_BUF) 5492 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5493 5494 desc.datalen = cpu_to_le16(buff_size); 5495 5496 cmd->profile_track_id = cpu_to_le32(track_id); 5497 5498 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5499 if (!status) { 5500 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5501 if (error_offset) 5502 *error_offset = le32_to_cpu(resp->error_offset); 5503 if (error_info) 5504 *error_info = le32_to_cpu(resp->error_info); 5505 } 5506 5507 return status; 5508 } 5509 5510 /** 5511 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5512 * @hw: pointer to the hw struct 5513 * @buff: command buffer (size in bytes = buff_size) 5514 * @buff_size: buffer size in bytes 5515 * @flags: AdminQ command flags 5516 * @cmd_details: pointer to command details structure or NULL 5517 **/ 5518 enum 5519 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5520 u16 buff_size, u8 flags, 5521 struct i40e_asq_cmd_details *cmd_details) 5522 { 5523 struct i40e_aq_desc desc; 5524 struct i40e_aqc_get_applied_profiles *cmd = 5525 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5526 i40e_status status; 5527 5528 i40e_fill_default_direct_cmd_desc(&desc, 5529 i40e_aqc_opc_get_personalization_profile_list); 5530 5531 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5532 if (buff_size > I40E_AQ_LARGE_BUF) 5533 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5534 desc.datalen = cpu_to_le16(buff_size); 5535 5536 cmd->flags = flags; 5537 5538 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5539 5540 return status; 5541 } 5542 5543 /** 5544 * i40e_find_segment_in_package 5545 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5546 * @pkg_hdr: pointer to the package header to be searched 5547 * 5548 * This function searches a package file for a particular segment type. On 5549 * success it returns a pointer to the segment header, otherwise it will 5550 * return NULL. 5551 **/ 5552 struct i40e_generic_seg_header * 5553 i40e_find_segment_in_package(u32 segment_type, 5554 struct i40e_package_header *pkg_hdr) 5555 { 5556 struct i40e_generic_seg_header *segment; 5557 u32 i; 5558 5559 /* Search all package segments for the requested segment type */ 5560 for (i = 0; i < pkg_hdr->segment_count; i++) { 5561 segment = 5562 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5563 pkg_hdr->segment_offset[i]); 5564 5565 if (segment->type == segment_type) 5566 return segment; 5567 } 5568 5569 return NULL; 5570 } 5571 5572 /* Get section table in profile */ 5573 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5574 do { \ 5575 struct i40e_profile_segment *p = (profile); \ 5576 u32 count; \ 5577 u32 *nvm; \ 5578 count = p->device_table_count; \ 5579 nvm = (u32 *)&p->device_table[count]; \ 5580 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5581 } while (0) 5582 5583 /* Get section header in profile */ 5584 #define I40E_SECTION_HEADER(profile, offset) \ 5585 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5586 5587 /** 5588 * i40e_find_section_in_profile 5589 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5590 * @profile: pointer to the i40e segment header to be searched 5591 * 5592 * This function searches i40e segment for a particular section type. On 5593 * success it returns a pointer to the section header, otherwise it will 5594 * return NULL. 5595 **/ 5596 struct i40e_profile_section_header * 5597 i40e_find_section_in_profile(u32 section_type, 5598 struct i40e_profile_segment *profile) 5599 { 5600 struct i40e_profile_section_header *sec; 5601 struct i40e_section_table *sec_tbl; 5602 u32 sec_off; 5603 u32 i; 5604 5605 if (profile->header.type != SEGMENT_TYPE_I40E) 5606 return NULL; 5607 5608 I40E_SECTION_TABLE(profile, sec_tbl); 5609 5610 for (i = 0; i < sec_tbl->section_count; i++) { 5611 sec_off = sec_tbl->section_offset[i]; 5612 sec = I40E_SECTION_HEADER(profile, sec_off); 5613 if (sec->section.type == section_type) 5614 return sec; 5615 } 5616 5617 return NULL; 5618 } 5619 5620 /** 5621 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5622 * @hw: pointer to the hw struct 5623 * @aq: command buffer containing all data to execute AQ 5624 **/ 5625 static enum 5626 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5627 struct i40e_profile_aq_section *aq) 5628 { 5629 i40e_status status; 5630 struct i40e_aq_desc desc; 5631 u8 *msg = NULL; 5632 u16 msglen; 5633 5634 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5635 desc.flags |= cpu_to_le16(aq->flags); 5636 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5637 5638 msglen = aq->datalen; 5639 if (msglen) { 5640 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5641 I40E_AQ_FLAG_RD)); 5642 if (msglen > I40E_AQ_LARGE_BUF) 5643 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5644 desc.datalen = cpu_to_le16(msglen); 5645 msg = &aq->data[0]; 5646 } 5647 5648 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5649 5650 if (status) { 5651 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5652 "unable to exec DDP AQ opcode %u, error %d\n", 5653 aq->opcode, status); 5654 return status; 5655 } 5656 5657 /* copy returned desc to aq_buf */ 5658 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5659 5660 return 0; 5661 } 5662 5663 /** 5664 * i40e_validate_profile 5665 * @hw: pointer to the hardware structure 5666 * @profile: pointer to the profile segment of the package to be validated 5667 * @track_id: package tracking id 5668 * @rollback: flag if the profile is for rollback. 5669 * 5670 * Validates supported devices and profile's sections. 5671 */ 5672 static enum i40e_status_code 5673 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5674 u32 track_id, bool rollback) 5675 { 5676 struct i40e_profile_section_header *sec = NULL; 5677 i40e_status status = 0; 5678 struct i40e_section_table *sec_tbl; 5679 u32 vendor_dev_id; 5680 u32 dev_cnt; 5681 u32 sec_off; 5682 u32 i; 5683 5684 if (track_id == I40E_DDP_TRACKID_INVALID) { 5685 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5686 return I40E_NOT_SUPPORTED; 5687 } 5688 5689 dev_cnt = profile->device_table_count; 5690 for (i = 0; i < dev_cnt; i++) { 5691 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5692 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5693 hw->device_id == (vendor_dev_id & 0xFFFF)) 5694 break; 5695 } 5696 if (dev_cnt && i == dev_cnt) { 5697 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5698 "Device doesn't support DDP\n"); 5699 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5700 } 5701 5702 I40E_SECTION_TABLE(profile, sec_tbl); 5703 5704 /* Validate sections types */ 5705 for (i = 0; i < sec_tbl->section_count; i++) { 5706 sec_off = sec_tbl->section_offset[i]; 5707 sec = I40E_SECTION_HEADER(profile, sec_off); 5708 if (rollback) { 5709 if (sec->section.type == SECTION_TYPE_MMIO || 5710 sec->section.type == SECTION_TYPE_AQ || 5711 sec->section.type == SECTION_TYPE_RB_AQ) { 5712 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5713 "Not a roll-back package\n"); 5714 return I40E_NOT_SUPPORTED; 5715 } 5716 } else { 5717 if (sec->section.type == SECTION_TYPE_RB_AQ || 5718 sec->section.type == SECTION_TYPE_RB_MMIO) { 5719 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5720 "Not an original package\n"); 5721 return I40E_NOT_SUPPORTED; 5722 } 5723 } 5724 } 5725 5726 return status; 5727 } 5728 5729 /** 5730 * i40e_write_profile 5731 * @hw: pointer to the hardware structure 5732 * @profile: pointer to the profile segment of the package to be downloaded 5733 * @track_id: package tracking id 5734 * 5735 * Handles the download of a complete package. 5736 */ 5737 enum i40e_status_code 5738 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5739 u32 track_id) 5740 { 5741 i40e_status status = 0; 5742 struct i40e_section_table *sec_tbl; 5743 struct i40e_profile_section_header *sec = NULL; 5744 struct i40e_profile_aq_section *ddp_aq; 5745 u32 section_size = 0; 5746 u32 offset = 0, info = 0; 5747 u32 sec_off; 5748 u32 i; 5749 5750 status = i40e_validate_profile(hw, profile, track_id, false); 5751 if (status) 5752 return status; 5753 5754 I40E_SECTION_TABLE(profile, sec_tbl); 5755 5756 for (i = 0; i < sec_tbl->section_count; i++) { 5757 sec_off = sec_tbl->section_offset[i]; 5758 sec = I40E_SECTION_HEADER(profile, sec_off); 5759 /* Process generic admin command */ 5760 if (sec->section.type == SECTION_TYPE_AQ) { 5761 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5762 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5763 if (status) { 5764 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5765 "Failed to execute aq: section %d, opcode %u\n", 5766 i, ddp_aq->opcode); 5767 break; 5768 } 5769 sec->section.type = SECTION_TYPE_RB_AQ; 5770 } 5771 5772 /* Skip any non-mmio sections */ 5773 if (sec->section.type != SECTION_TYPE_MMIO) 5774 continue; 5775 5776 section_size = sec->section.size + 5777 sizeof(struct i40e_profile_section_header); 5778 5779 /* Write MMIO section */ 5780 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5781 track_id, &offset, &info, NULL); 5782 if (status) { 5783 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5784 "Failed to write profile: section %d, offset %d, info %d\n", 5785 i, offset, info); 5786 break; 5787 } 5788 } 5789 return status; 5790 } 5791 5792 /** 5793 * i40e_rollback_profile 5794 * @hw: pointer to the hardware structure 5795 * @profile: pointer to the profile segment of the package to be removed 5796 * @track_id: package tracking id 5797 * 5798 * Rolls back previously loaded package. 5799 */ 5800 enum i40e_status_code 5801 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5802 u32 track_id) 5803 { 5804 struct i40e_profile_section_header *sec = NULL; 5805 i40e_status status = 0; 5806 struct i40e_section_table *sec_tbl; 5807 u32 offset = 0, info = 0; 5808 u32 section_size = 0; 5809 u32 sec_off; 5810 int i; 5811 5812 status = i40e_validate_profile(hw, profile, track_id, true); 5813 if (status) 5814 return status; 5815 5816 I40E_SECTION_TABLE(profile, sec_tbl); 5817 5818 /* For rollback write sections in reverse */ 5819 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5820 sec_off = sec_tbl->section_offset[i]; 5821 sec = I40E_SECTION_HEADER(profile, sec_off); 5822 5823 /* Skip any non-rollback sections */ 5824 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5825 continue; 5826 5827 section_size = sec->section.size + 5828 sizeof(struct i40e_profile_section_header); 5829 5830 /* Write roll-back MMIO section */ 5831 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5832 track_id, &offset, &info, NULL); 5833 if (status) { 5834 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5835 "Failed to write profile: section %d, offset %d, info %d\n", 5836 i, offset, info); 5837 break; 5838 } 5839 } 5840 return status; 5841 } 5842 5843 /** 5844 * i40e_add_pinfo_to_list 5845 * @hw: pointer to the hardware structure 5846 * @profile: pointer to the profile segment of the package 5847 * @profile_info_sec: buffer for information section 5848 * @track_id: package tracking id 5849 * 5850 * Register a profile to the list of loaded profiles. 5851 */ 5852 enum i40e_status_code 5853 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5854 struct i40e_profile_segment *profile, 5855 u8 *profile_info_sec, u32 track_id) 5856 { 5857 i40e_status status = 0; 5858 struct i40e_profile_section_header *sec = NULL; 5859 struct i40e_profile_info *pinfo; 5860 u32 offset = 0, info = 0; 5861 5862 sec = (struct i40e_profile_section_header *)profile_info_sec; 5863 sec->tbl_size = 1; 5864 sec->data_end = sizeof(struct i40e_profile_section_header) + 5865 sizeof(struct i40e_profile_info); 5866 sec->section.type = SECTION_TYPE_INFO; 5867 sec->section.offset = sizeof(struct i40e_profile_section_header); 5868 sec->section.size = sizeof(struct i40e_profile_info); 5869 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5870 sec->section.offset); 5871 pinfo->track_id = track_id; 5872 pinfo->version = profile->version; 5873 pinfo->op = I40E_DDP_ADD_TRACKID; 5874 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5875 5876 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5877 track_id, &offset, &info, NULL); 5878 5879 return status; 5880 } 5881 5882 /** 5883 * i40e_aq_add_cloud_filters 5884 * @hw: pointer to the hardware structure 5885 * @seid: VSI seid to add cloud filters from 5886 * @filters: Buffer which contains the filters to be added 5887 * @filter_count: number of filters contained in the buffer 5888 * 5889 * Set the cloud filters for a given VSI. The contents of the 5890 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5891 * of the function. 5892 * 5893 **/ 5894 enum i40e_status_code 5895 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5896 struct i40e_aqc_cloud_filters_element_data *filters, 5897 u8 filter_count) 5898 { 5899 struct i40e_aq_desc desc; 5900 struct i40e_aqc_add_remove_cloud_filters *cmd = 5901 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5902 enum i40e_status_code status; 5903 u16 buff_len; 5904 5905 i40e_fill_default_direct_cmd_desc(&desc, 5906 i40e_aqc_opc_add_cloud_filters); 5907 5908 buff_len = filter_count * sizeof(*filters); 5909 desc.datalen = cpu_to_le16(buff_len); 5910 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5911 cmd->num_filters = filter_count; 5912 cmd->seid = cpu_to_le16(seid); 5913 5914 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5915 5916 return status; 5917 } 5918 5919 /** 5920 * i40e_aq_add_cloud_filters_bb 5921 * @hw: pointer to the hardware structure 5922 * @seid: VSI seid to add cloud filters from 5923 * @filters: Buffer which contains the filters in big buffer to be added 5924 * @filter_count: number of filters contained in the buffer 5925 * 5926 * Set the big buffer cloud filters for a given VSI. The contents of the 5927 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5928 * function. 5929 * 5930 **/ 5931 enum i40e_status_code 5932 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5933 struct i40e_aqc_cloud_filters_element_bb *filters, 5934 u8 filter_count) 5935 { 5936 struct i40e_aq_desc desc; 5937 struct i40e_aqc_add_remove_cloud_filters *cmd = 5938 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5939 i40e_status status; 5940 u16 buff_len; 5941 int i; 5942 5943 i40e_fill_default_direct_cmd_desc(&desc, 5944 i40e_aqc_opc_add_cloud_filters); 5945 5946 buff_len = filter_count * sizeof(*filters); 5947 desc.datalen = cpu_to_le16(buff_len); 5948 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5949 cmd->num_filters = filter_count; 5950 cmd->seid = cpu_to_le16(seid); 5951 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5952 5953 for (i = 0; i < filter_count; i++) { 5954 u16 tnl_type; 5955 u32 ti; 5956 5957 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5958 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5959 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5960 5961 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5962 * one more byte further than normally used for Tenant ID in 5963 * other tunnel types. 5964 */ 5965 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5966 ti = le32_to_cpu(filters[i].element.tenant_id); 5967 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5968 } 5969 } 5970 5971 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5972 5973 return status; 5974 } 5975 5976 /** 5977 * i40e_aq_rem_cloud_filters 5978 * @hw: pointer to the hardware structure 5979 * @seid: VSI seid to remove cloud filters from 5980 * @filters: Buffer which contains the filters to be removed 5981 * @filter_count: number of filters contained in the buffer 5982 * 5983 * Remove the cloud filters for a given VSI. The contents of the 5984 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5985 * of the function. 5986 * 5987 **/ 5988 enum i40e_status_code 5989 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5990 struct i40e_aqc_cloud_filters_element_data *filters, 5991 u8 filter_count) 5992 { 5993 struct i40e_aq_desc desc; 5994 struct i40e_aqc_add_remove_cloud_filters *cmd = 5995 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5996 enum i40e_status_code status; 5997 u16 buff_len; 5998 5999 i40e_fill_default_direct_cmd_desc(&desc, 6000 i40e_aqc_opc_remove_cloud_filters); 6001 6002 buff_len = filter_count * sizeof(*filters); 6003 desc.datalen = cpu_to_le16(buff_len); 6004 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6005 cmd->num_filters = filter_count; 6006 cmd->seid = cpu_to_le16(seid); 6007 6008 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6009 6010 return status; 6011 } 6012 6013 /** 6014 * i40e_aq_rem_cloud_filters_bb 6015 * @hw: pointer to the hardware structure 6016 * @seid: VSI seid to remove cloud filters from 6017 * @filters: Buffer which contains the filters in big buffer to be removed 6018 * @filter_count: number of filters contained in the buffer 6019 * 6020 * Remove the big buffer cloud filters for a given VSI. The contents of the 6021 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6022 * function. 6023 * 6024 **/ 6025 enum i40e_status_code 6026 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6027 struct i40e_aqc_cloud_filters_element_bb *filters, 6028 u8 filter_count) 6029 { 6030 struct i40e_aq_desc desc; 6031 struct i40e_aqc_add_remove_cloud_filters *cmd = 6032 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6033 i40e_status status; 6034 u16 buff_len; 6035 int i; 6036 6037 i40e_fill_default_direct_cmd_desc(&desc, 6038 i40e_aqc_opc_remove_cloud_filters); 6039 6040 buff_len = filter_count * sizeof(*filters); 6041 desc.datalen = cpu_to_le16(buff_len); 6042 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6043 cmd->num_filters = filter_count; 6044 cmd->seid = cpu_to_le16(seid); 6045 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6046 6047 for (i = 0; i < filter_count; i++) { 6048 u16 tnl_type; 6049 u32 ti; 6050 6051 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6052 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6053 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6054 6055 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6056 * one more byte further than normally used for Tenant ID in 6057 * other tunnel types. 6058 */ 6059 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6060 ti = le32_to_cpu(filters[i].element.tenant_id); 6061 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6062 } 6063 } 6064 6065 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6066 6067 return status; 6068 } 6069