1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e_type.h" 5 #include "i40e_adminq.h" 6 #include "i40e_prototype.h" 7 #include <linux/avf/virtchnl.h> 8 9 /** 10 * i40e_set_mac_type - Sets MAC type 11 * @hw: pointer to the HW structure 12 * 13 * This function sets the mac type of the adapter based on the 14 * vendor ID and device ID stored in the hw structure. 15 **/ 16 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 17 { 18 i40e_status status = 0; 19 20 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 21 switch (hw->device_id) { 22 case I40E_DEV_ID_SFP_XL710: 23 case I40E_DEV_ID_QEMU: 24 case I40E_DEV_ID_KX_B: 25 case I40E_DEV_ID_KX_C: 26 case I40E_DEV_ID_QSFP_A: 27 case I40E_DEV_ID_QSFP_B: 28 case I40E_DEV_ID_QSFP_C: 29 case I40E_DEV_ID_10G_BASE_T: 30 case I40E_DEV_ID_10G_BASE_T4: 31 case I40E_DEV_ID_10G_B: 32 case I40E_DEV_ID_10G_SFP: 33 case I40E_DEV_ID_20G_KR2: 34 case I40E_DEV_ID_20G_KR2_A: 35 case I40E_DEV_ID_25G_B: 36 case I40E_DEV_ID_25G_SFP28: 37 case I40E_DEV_ID_X710_N3000: 38 case I40E_DEV_ID_XXV710_N3000: 39 hw->mac.type = I40E_MAC_XL710; 40 break; 41 case I40E_DEV_ID_KX_X722: 42 case I40E_DEV_ID_QSFP_X722: 43 case I40E_DEV_ID_SFP_X722: 44 case I40E_DEV_ID_1G_BASE_T_X722: 45 case I40E_DEV_ID_10G_BASE_T_X722: 46 case I40E_DEV_ID_SFP_I_X722: 47 hw->mac.type = I40E_MAC_X722; 48 break; 49 default: 50 hw->mac.type = I40E_MAC_GENERIC; 51 break; 52 } 53 } else { 54 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 55 } 56 57 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 58 hw->mac.type, status); 59 return status; 60 } 61 62 /** 63 * i40e_aq_str - convert AQ err code to a string 64 * @hw: pointer to the HW structure 65 * @aq_err: the AQ error code to convert 66 **/ 67 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 68 { 69 switch (aq_err) { 70 case I40E_AQ_RC_OK: 71 return "OK"; 72 case I40E_AQ_RC_EPERM: 73 return "I40E_AQ_RC_EPERM"; 74 case I40E_AQ_RC_ENOENT: 75 return "I40E_AQ_RC_ENOENT"; 76 case I40E_AQ_RC_ESRCH: 77 return "I40E_AQ_RC_ESRCH"; 78 case I40E_AQ_RC_EINTR: 79 return "I40E_AQ_RC_EINTR"; 80 case I40E_AQ_RC_EIO: 81 return "I40E_AQ_RC_EIO"; 82 case I40E_AQ_RC_ENXIO: 83 return "I40E_AQ_RC_ENXIO"; 84 case I40E_AQ_RC_E2BIG: 85 return "I40E_AQ_RC_E2BIG"; 86 case I40E_AQ_RC_EAGAIN: 87 return "I40E_AQ_RC_EAGAIN"; 88 case I40E_AQ_RC_ENOMEM: 89 return "I40E_AQ_RC_ENOMEM"; 90 case I40E_AQ_RC_EACCES: 91 return "I40E_AQ_RC_EACCES"; 92 case I40E_AQ_RC_EFAULT: 93 return "I40E_AQ_RC_EFAULT"; 94 case I40E_AQ_RC_EBUSY: 95 return "I40E_AQ_RC_EBUSY"; 96 case I40E_AQ_RC_EEXIST: 97 return "I40E_AQ_RC_EEXIST"; 98 case I40E_AQ_RC_EINVAL: 99 return "I40E_AQ_RC_EINVAL"; 100 case I40E_AQ_RC_ENOTTY: 101 return "I40E_AQ_RC_ENOTTY"; 102 case I40E_AQ_RC_ENOSPC: 103 return "I40E_AQ_RC_ENOSPC"; 104 case I40E_AQ_RC_ENOSYS: 105 return "I40E_AQ_RC_ENOSYS"; 106 case I40E_AQ_RC_ERANGE: 107 return "I40E_AQ_RC_ERANGE"; 108 case I40E_AQ_RC_EFLUSHED: 109 return "I40E_AQ_RC_EFLUSHED"; 110 case I40E_AQ_RC_BAD_ADDR: 111 return "I40E_AQ_RC_BAD_ADDR"; 112 case I40E_AQ_RC_EMODE: 113 return "I40E_AQ_RC_EMODE"; 114 case I40E_AQ_RC_EFBIG: 115 return "I40E_AQ_RC_EFBIG"; 116 } 117 118 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 119 return hw->err_str; 120 } 121 122 /** 123 * i40e_stat_str - convert status err code to a string 124 * @hw: pointer to the HW structure 125 * @stat_err: the status error code to convert 126 **/ 127 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 128 { 129 switch (stat_err) { 130 case 0: 131 return "OK"; 132 case I40E_ERR_NVM: 133 return "I40E_ERR_NVM"; 134 case I40E_ERR_NVM_CHECKSUM: 135 return "I40E_ERR_NVM_CHECKSUM"; 136 case I40E_ERR_PHY: 137 return "I40E_ERR_PHY"; 138 case I40E_ERR_CONFIG: 139 return "I40E_ERR_CONFIG"; 140 case I40E_ERR_PARAM: 141 return "I40E_ERR_PARAM"; 142 case I40E_ERR_MAC_TYPE: 143 return "I40E_ERR_MAC_TYPE"; 144 case I40E_ERR_UNKNOWN_PHY: 145 return "I40E_ERR_UNKNOWN_PHY"; 146 case I40E_ERR_LINK_SETUP: 147 return "I40E_ERR_LINK_SETUP"; 148 case I40E_ERR_ADAPTER_STOPPED: 149 return "I40E_ERR_ADAPTER_STOPPED"; 150 case I40E_ERR_INVALID_MAC_ADDR: 151 return "I40E_ERR_INVALID_MAC_ADDR"; 152 case I40E_ERR_DEVICE_NOT_SUPPORTED: 153 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 154 case I40E_ERR_MASTER_REQUESTS_PENDING: 155 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 156 case I40E_ERR_INVALID_LINK_SETTINGS: 157 return "I40E_ERR_INVALID_LINK_SETTINGS"; 158 case I40E_ERR_AUTONEG_NOT_COMPLETE: 159 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 160 case I40E_ERR_RESET_FAILED: 161 return "I40E_ERR_RESET_FAILED"; 162 case I40E_ERR_SWFW_SYNC: 163 return "I40E_ERR_SWFW_SYNC"; 164 case I40E_ERR_NO_AVAILABLE_VSI: 165 return "I40E_ERR_NO_AVAILABLE_VSI"; 166 case I40E_ERR_NO_MEMORY: 167 return "I40E_ERR_NO_MEMORY"; 168 case I40E_ERR_BAD_PTR: 169 return "I40E_ERR_BAD_PTR"; 170 case I40E_ERR_RING_FULL: 171 return "I40E_ERR_RING_FULL"; 172 case I40E_ERR_INVALID_PD_ID: 173 return "I40E_ERR_INVALID_PD_ID"; 174 case I40E_ERR_INVALID_QP_ID: 175 return "I40E_ERR_INVALID_QP_ID"; 176 case I40E_ERR_INVALID_CQ_ID: 177 return "I40E_ERR_INVALID_CQ_ID"; 178 case I40E_ERR_INVALID_CEQ_ID: 179 return "I40E_ERR_INVALID_CEQ_ID"; 180 case I40E_ERR_INVALID_AEQ_ID: 181 return "I40E_ERR_INVALID_AEQ_ID"; 182 case I40E_ERR_INVALID_SIZE: 183 return "I40E_ERR_INVALID_SIZE"; 184 case I40E_ERR_INVALID_ARP_INDEX: 185 return "I40E_ERR_INVALID_ARP_INDEX"; 186 case I40E_ERR_INVALID_FPM_FUNC_ID: 187 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 188 case I40E_ERR_QP_INVALID_MSG_SIZE: 189 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 190 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 191 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 192 case I40E_ERR_INVALID_FRAG_COUNT: 193 return "I40E_ERR_INVALID_FRAG_COUNT"; 194 case I40E_ERR_QUEUE_EMPTY: 195 return "I40E_ERR_QUEUE_EMPTY"; 196 case I40E_ERR_INVALID_ALIGNMENT: 197 return "I40E_ERR_INVALID_ALIGNMENT"; 198 case I40E_ERR_FLUSHED_QUEUE: 199 return "I40E_ERR_FLUSHED_QUEUE"; 200 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 201 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 202 case I40E_ERR_INVALID_IMM_DATA_SIZE: 203 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 204 case I40E_ERR_TIMEOUT: 205 return "I40E_ERR_TIMEOUT"; 206 case I40E_ERR_OPCODE_MISMATCH: 207 return "I40E_ERR_OPCODE_MISMATCH"; 208 case I40E_ERR_CQP_COMPL_ERROR: 209 return "I40E_ERR_CQP_COMPL_ERROR"; 210 case I40E_ERR_INVALID_VF_ID: 211 return "I40E_ERR_INVALID_VF_ID"; 212 case I40E_ERR_INVALID_HMCFN_ID: 213 return "I40E_ERR_INVALID_HMCFN_ID"; 214 case I40E_ERR_BACKING_PAGE_ERROR: 215 return "I40E_ERR_BACKING_PAGE_ERROR"; 216 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 217 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 218 case I40E_ERR_INVALID_PBLE_INDEX: 219 return "I40E_ERR_INVALID_PBLE_INDEX"; 220 case I40E_ERR_INVALID_SD_INDEX: 221 return "I40E_ERR_INVALID_SD_INDEX"; 222 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 223 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 224 case I40E_ERR_INVALID_SD_TYPE: 225 return "I40E_ERR_INVALID_SD_TYPE"; 226 case I40E_ERR_MEMCPY_FAILED: 227 return "I40E_ERR_MEMCPY_FAILED"; 228 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 229 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 230 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 231 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 232 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 233 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 234 case I40E_ERR_SRQ_ENABLED: 235 return "I40E_ERR_SRQ_ENABLED"; 236 case I40E_ERR_ADMIN_QUEUE_ERROR: 237 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 238 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 239 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 240 case I40E_ERR_BUF_TOO_SHORT: 241 return "I40E_ERR_BUF_TOO_SHORT"; 242 case I40E_ERR_ADMIN_QUEUE_FULL: 243 return "I40E_ERR_ADMIN_QUEUE_FULL"; 244 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 245 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 246 case I40E_ERR_BAD_IWARP_CQE: 247 return "I40E_ERR_BAD_IWARP_CQE"; 248 case I40E_ERR_NVM_BLANK_MODE: 249 return "I40E_ERR_NVM_BLANK_MODE"; 250 case I40E_ERR_NOT_IMPLEMENTED: 251 return "I40E_ERR_NOT_IMPLEMENTED"; 252 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 253 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 254 case I40E_ERR_DIAG_TEST_FAILED: 255 return "I40E_ERR_DIAG_TEST_FAILED"; 256 case I40E_ERR_NOT_READY: 257 return "I40E_ERR_NOT_READY"; 258 case I40E_NOT_SUPPORTED: 259 return "I40E_NOT_SUPPORTED"; 260 case I40E_ERR_FIRMWARE_API_VERSION: 261 return "I40E_ERR_FIRMWARE_API_VERSION"; 262 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 263 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 264 } 265 266 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 267 return hw->err_str; 268 } 269 270 /** 271 * i40e_debug_aq 272 * @hw: debug mask related to admin queue 273 * @mask: debug mask 274 * @desc: pointer to admin queue descriptor 275 * @buffer: pointer to command buffer 276 * @buf_len: max length of buffer 277 * 278 * Dumps debug log about adminq command with descriptor contents. 279 **/ 280 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 281 void *buffer, u16 buf_len) 282 { 283 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 284 u32 effective_mask = hw->debug_mask & mask; 285 char prefix[27]; 286 u16 len; 287 u8 *buf = (u8 *)buffer; 288 289 if (!effective_mask || !desc) 290 return; 291 292 len = le16_to_cpu(aq_desc->datalen); 293 294 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 295 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 296 le16_to_cpu(aq_desc->opcode), 297 le16_to_cpu(aq_desc->flags), 298 le16_to_cpu(aq_desc->datalen), 299 le16_to_cpu(aq_desc->retval)); 300 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 301 "\tcookie (h,l) 0x%08X 0x%08X\n", 302 le32_to_cpu(aq_desc->cookie_high), 303 le32_to_cpu(aq_desc->cookie_low)); 304 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 305 "\tparam (0,1) 0x%08X 0x%08X\n", 306 le32_to_cpu(aq_desc->params.internal.param0), 307 le32_to_cpu(aq_desc->params.internal.param1)); 308 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 309 "\taddr (h,l) 0x%08X 0x%08X\n", 310 le32_to_cpu(aq_desc->params.external.addr_high), 311 le32_to_cpu(aq_desc->params.external.addr_low)); 312 313 if (buffer && buf_len != 0 && len != 0 && 314 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 315 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 316 if (buf_len < len) 317 len = buf_len; 318 319 snprintf(prefix, sizeof(prefix), 320 "i40e %02x:%02x.%x: \t0x", 321 hw->bus.bus_id, 322 hw->bus.device, 323 hw->bus.func); 324 325 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 326 16, 1, buf, len, false); 327 } 328 } 329 330 /** 331 * i40e_check_asq_alive 332 * @hw: pointer to the hw struct 333 * 334 * Returns true if Queue is enabled else false. 335 **/ 336 bool i40e_check_asq_alive(struct i40e_hw *hw) 337 { 338 if (hw->aq.asq.len) 339 return !!(rd32(hw, hw->aq.asq.len) & 340 I40E_PF_ATQLEN_ATQENABLE_MASK); 341 else 342 return false; 343 } 344 345 /** 346 * i40e_aq_queue_shutdown 347 * @hw: pointer to the hw struct 348 * @unloading: is the driver unloading itself 349 * 350 * Tell the Firmware that we're shutting down the AdminQ and whether 351 * or not the driver is unloading as well. 352 **/ 353 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 354 bool unloading) 355 { 356 struct i40e_aq_desc desc; 357 struct i40e_aqc_queue_shutdown *cmd = 358 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 359 i40e_status status; 360 361 i40e_fill_default_direct_cmd_desc(&desc, 362 i40e_aqc_opc_queue_shutdown); 363 364 if (unloading) 365 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 366 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 367 368 return status; 369 } 370 371 /** 372 * i40e_aq_get_set_rss_lut 373 * @hw: pointer to the hardware structure 374 * @vsi_id: vsi fw index 375 * @pf_lut: for PF table set true, for VSI table set false 376 * @lut: pointer to the lut buffer provided by the caller 377 * @lut_size: size of the lut buffer 378 * @set: set true to set the table, false to get the table 379 * 380 * Internal function to get or set RSS look up table 381 **/ 382 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 383 u16 vsi_id, bool pf_lut, 384 u8 *lut, u16 lut_size, 385 bool set) 386 { 387 i40e_status status; 388 struct i40e_aq_desc desc; 389 struct i40e_aqc_get_set_rss_lut *cmd_resp = 390 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 391 392 if (set) 393 i40e_fill_default_direct_cmd_desc(&desc, 394 i40e_aqc_opc_set_rss_lut); 395 else 396 i40e_fill_default_direct_cmd_desc(&desc, 397 i40e_aqc_opc_get_rss_lut); 398 399 /* Indirect command */ 400 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 401 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 402 403 cmd_resp->vsi_id = 404 cpu_to_le16((u16)((vsi_id << 405 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 406 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 407 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 408 409 if (pf_lut) 410 cmd_resp->flags |= cpu_to_le16((u16) 411 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 412 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 413 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 414 else 415 cmd_resp->flags |= cpu_to_le16((u16) 416 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 419 420 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 421 422 return status; 423 } 424 425 /** 426 * i40e_aq_get_rss_lut 427 * @hw: pointer to the hardware structure 428 * @vsi_id: vsi fw index 429 * @pf_lut: for PF table set true, for VSI table set false 430 * @lut: pointer to the lut buffer provided by the caller 431 * @lut_size: size of the lut buffer 432 * 433 * get the RSS lookup table, PF or VSI type 434 **/ 435 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 436 bool pf_lut, u8 *lut, u16 lut_size) 437 { 438 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 439 false); 440 } 441 442 /** 443 * i40e_aq_set_rss_lut 444 * @hw: pointer to the hardware structure 445 * @vsi_id: vsi fw index 446 * @pf_lut: for PF table set true, for VSI table set false 447 * @lut: pointer to the lut buffer provided by the caller 448 * @lut_size: size of the lut buffer 449 * 450 * set the RSS lookup table, PF or VSI type 451 **/ 452 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 453 bool pf_lut, u8 *lut, u16 lut_size) 454 { 455 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 456 } 457 458 /** 459 * i40e_aq_get_set_rss_key 460 * @hw: pointer to the hw struct 461 * @vsi_id: vsi fw index 462 * @key: pointer to key info struct 463 * @set: set true to set the key, false to get the key 464 * 465 * get the RSS key per VSI 466 **/ 467 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 468 u16 vsi_id, 469 struct i40e_aqc_get_set_rss_key_data *key, 470 bool set) 471 { 472 i40e_status status; 473 struct i40e_aq_desc desc; 474 struct i40e_aqc_get_set_rss_key *cmd_resp = 475 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 476 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 477 478 if (set) 479 i40e_fill_default_direct_cmd_desc(&desc, 480 i40e_aqc_opc_set_rss_key); 481 else 482 i40e_fill_default_direct_cmd_desc(&desc, 483 i40e_aqc_opc_get_rss_key); 484 485 /* Indirect command */ 486 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 487 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 488 489 cmd_resp->vsi_id = 490 cpu_to_le16((u16)((vsi_id << 491 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 492 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 493 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 494 495 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 496 497 return status; 498 } 499 500 /** 501 * i40e_aq_get_rss_key 502 * @hw: pointer to the hw struct 503 * @vsi_id: vsi fw index 504 * @key: pointer to key info struct 505 * 506 **/ 507 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 508 u16 vsi_id, 509 struct i40e_aqc_get_set_rss_key_data *key) 510 { 511 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 512 } 513 514 /** 515 * i40e_aq_set_rss_key 516 * @hw: pointer to the hw struct 517 * @vsi_id: vsi fw index 518 * @key: pointer to key info struct 519 * 520 * set the RSS key per VSI 521 **/ 522 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 523 u16 vsi_id, 524 struct i40e_aqc_get_set_rss_key_data *key) 525 { 526 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 527 } 528 529 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 530 * hardware to a bit-field that can be used by SW to more easily determine the 531 * packet type. 532 * 533 * Macros are used to shorten the table lines and make this table human 534 * readable. 535 * 536 * We store the PTYPE in the top byte of the bit field - this is just so that 537 * we can check that the table doesn't have a row missing, as the index into 538 * the table should be the PTYPE. 539 * 540 * Typical work flow: 541 * 542 * IF NOT i40e_ptype_lookup[ptype].known 543 * THEN 544 * Packet is unknown 545 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 546 * Use the rest of the fields to look at the tunnels, inner protocols, etc 547 * ELSE 548 * Use the enum i40e_rx_l2_ptype to decode the packet type 549 * ENDIF 550 */ 551 552 /* macro to make the table lines short */ 553 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 554 { PTYPE, \ 555 1, \ 556 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 557 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 558 I40E_RX_PTYPE_##OUTER_FRAG, \ 559 I40E_RX_PTYPE_TUNNEL_##T, \ 560 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 561 I40E_RX_PTYPE_##TEF, \ 562 I40E_RX_PTYPE_INNER_PROT_##I, \ 563 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 564 565 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 566 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 567 568 /* shorter macros makes the table fit but are terse */ 569 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 570 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 571 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 572 573 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 574 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 575 /* L2 Packet types */ 576 I40E_PTT_UNUSED_ENTRY(0), 577 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 578 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 579 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 580 I40E_PTT_UNUSED_ENTRY(4), 581 I40E_PTT_UNUSED_ENTRY(5), 582 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 583 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 584 I40E_PTT_UNUSED_ENTRY(8), 585 I40E_PTT_UNUSED_ENTRY(9), 586 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 587 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 588 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 589 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 590 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 591 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 599 /* Non Tunneled IPv4 */ 600 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 601 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 602 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 603 I40E_PTT_UNUSED_ENTRY(25), 604 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 605 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 606 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 607 608 /* IPv4 --> IPv4 */ 609 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 610 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 611 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 612 I40E_PTT_UNUSED_ENTRY(32), 613 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 614 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 615 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 616 617 /* IPv4 --> IPv6 */ 618 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 619 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 620 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 621 I40E_PTT_UNUSED_ENTRY(39), 622 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 623 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 624 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 625 626 /* IPv4 --> GRE/NAT */ 627 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 628 629 /* IPv4 --> GRE/NAT --> IPv4 */ 630 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 631 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 632 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 633 I40E_PTT_UNUSED_ENTRY(47), 634 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 635 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 636 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 637 638 /* IPv4 --> GRE/NAT --> IPv6 */ 639 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 640 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 641 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 642 I40E_PTT_UNUSED_ENTRY(54), 643 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 644 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 645 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 646 647 /* IPv4 --> GRE/NAT --> MAC */ 648 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 649 650 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 651 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 652 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 653 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 654 I40E_PTT_UNUSED_ENTRY(62), 655 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 656 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 657 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 658 659 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 660 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 661 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 662 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 663 I40E_PTT_UNUSED_ENTRY(69), 664 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 665 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 666 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 667 668 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 669 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 670 671 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 672 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 673 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 674 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 675 I40E_PTT_UNUSED_ENTRY(77), 676 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 677 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 678 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 679 680 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 681 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 682 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 683 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 684 I40E_PTT_UNUSED_ENTRY(84), 685 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 686 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 687 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 688 689 /* Non Tunneled IPv6 */ 690 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 691 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 692 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 693 I40E_PTT_UNUSED_ENTRY(91), 694 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 695 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 696 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 697 698 /* IPv6 --> IPv4 */ 699 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 700 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 701 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 702 I40E_PTT_UNUSED_ENTRY(98), 703 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 704 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 705 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 706 707 /* IPv6 --> IPv6 */ 708 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 709 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 710 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 711 I40E_PTT_UNUSED_ENTRY(105), 712 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 713 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 714 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 715 716 /* IPv6 --> GRE/NAT */ 717 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 718 719 /* IPv6 --> GRE/NAT -> IPv4 */ 720 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 721 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 722 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 723 I40E_PTT_UNUSED_ENTRY(113), 724 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 725 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 726 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 727 728 /* IPv6 --> GRE/NAT -> IPv6 */ 729 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 730 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 731 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 732 I40E_PTT_UNUSED_ENTRY(120), 733 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 734 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 735 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 736 737 /* IPv6 --> GRE/NAT -> MAC */ 738 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 739 740 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 741 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 742 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 743 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 744 I40E_PTT_UNUSED_ENTRY(128), 745 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 746 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 747 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 748 749 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 750 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 751 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 752 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 753 I40E_PTT_UNUSED_ENTRY(135), 754 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 755 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 756 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 757 758 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 759 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 760 761 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 762 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 763 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 764 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 765 I40E_PTT_UNUSED_ENTRY(143), 766 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 767 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 768 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 769 770 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 771 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 772 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 773 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 774 I40E_PTT_UNUSED_ENTRY(150), 775 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 776 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 777 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 778 779 /* unused entries */ 780 I40E_PTT_UNUSED_ENTRY(154), 781 I40E_PTT_UNUSED_ENTRY(155), 782 I40E_PTT_UNUSED_ENTRY(156), 783 I40E_PTT_UNUSED_ENTRY(157), 784 I40E_PTT_UNUSED_ENTRY(158), 785 I40E_PTT_UNUSED_ENTRY(159), 786 787 I40E_PTT_UNUSED_ENTRY(160), 788 I40E_PTT_UNUSED_ENTRY(161), 789 I40E_PTT_UNUSED_ENTRY(162), 790 I40E_PTT_UNUSED_ENTRY(163), 791 I40E_PTT_UNUSED_ENTRY(164), 792 I40E_PTT_UNUSED_ENTRY(165), 793 I40E_PTT_UNUSED_ENTRY(166), 794 I40E_PTT_UNUSED_ENTRY(167), 795 I40E_PTT_UNUSED_ENTRY(168), 796 I40E_PTT_UNUSED_ENTRY(169), 797 798 I40E_PTT_UNUSED_ENTRY(170), 799 I40E_PTT_UNUSED_ENTRY(171), 800 I40E_PTT_UNUSED_ENTRY(172), 801 I40E_PTT_UNUSED_ENTRY(173), 802 I40E_PTT_UNUSED_ENTRY(174), 803 I40E_PTT_UNUSED_ENTRY(175), 804 I40E_PTT_UNUSED_ENTRY(176), 805 I40E_PTT_UNUSED_ENTRY(177), 806 I40E_PTT_UNUSED_ENTRY(178), 807 I40E_PTT_UNUSED_ENTRY(179), 808 809 I40E_PTT_UNUSED_ENTRY(180), 810 I40E_PTT_UNUSED_ENTRY(181), 811 I40E_PTT_UNUSED_ENTRY(182), 812 I40E_PTT_UNUSED_ENTRY(183), 813 I40E_PTT_UNUSED_ENTRY(184), 814 I40E_PTT_UNUSED_ENTRY(185), 815 I40E_PTT_UNUSED_ENTRY(186), 816 I40E_PTT_UNUSED_ENTRY(187), 817 I40E_PTT_UNUSED_ENTRY(188), 818 I40E_PTT_UNUSED_ENTRY(189), 819 820 I40E_PTT_UNUSED_ENTRY(190), 821 I40E_PTT_UNUSED_ENTRY(191), 822 I40E_PTT_UNUSED_ENTRY(192), 823 I40E_PTT_UNUSED_ENTRY(193), 824 I40E_PTT_UNUSED_ENTRY(194), 825 I40E_PTT_UNUSED_ENTRY(195), 826 I40E_PTT_UNUSED_ENTRY(196), 827 I40E_PTT_UNUSED_ENTRY(197), 828 I40E_PTT_UNUSED_ENTRY(198), 829 I40E_PTT_UNUSED_ENTRY(199), 830 831 I40E_PTT_UNUSED_ENTRY(200), 832 I40E_PTT_UNUSED_ENTRY(201), 833 I40E_PTT_UNUSED_ENTRY(202), 834 I40E_PTT_UNUSED_ENTRY(203), 835 I40E_PTT_UNUSED_ENTRY(204), 836 I40E_PTT_UNUSED_ENTRY(205), 837 I40E_PTT_UNUSED_ENTRY(206), 838 I40E_PTT_UNUSED_ENTRY(207), 839 I40E_PTT_UNUSED_ENTRY(208), 840 I40E_PTT_UNUSED_ENTRY(209), 841 842 I40E_PTT_UNUSED_ENTRY(210), 843 I40E_PTT_UNUSED_ENTRY(211), 844 I40E_PTT_UNUSED_ENTRY(212), 845 I40E_PTT_UNUSED_ENTRY(213), 846 I40E_PTT_UNUSED_ENTRY(214), 847 I40E_PTT_UNUSED_ENTRY(215), 848 I40E_PTT_UNUSED_ENTRY(216), 849 I40E_PTT_UNUSED_ENTRY(217), 850 I40E_PTT_UNUSED_ENTRY(218), 851 I40E_PTT_UNUSED_ENTRY(219), 852 853 I40E_PTT_UNUSED_ENTRY(220), 854 I40E_PTT_UNUSED_ENTRY(221), 855 I40E_PTT_UNUSED_ENTRY(222), 856 I40E_PTT_UNUSED_ENTRY(223), 857 I40E_PTT_UNUSED_ENTRY(224), 858 I40E_PTT_UNUSED_ENTRY(225), 859 I40E_PTT_UNUSED_ENTRY(226), 860 I40E_PTT_UNUSED_ENTRY(227), 861 I40E_PTT_UNUSED_ENTRY(228), 862 I40E_PTT_UNUSED_ENTRY(229), 863 864 I40E_PTT_UNUSED_ENTRY(230), 865 I40E_PTT_UNUSED_ENTRY(231), 866 I40E_PTT_UNUSED_ENTRY(232), 867 I40E_PTT_UNUSED_ENTRY(233), 868 I40E_PTT_UNUSED_ENTRY(234), 869 I40E_PTT_UNUSED_ENTRY(235), 870 I40E_PTT_UNUSED_ENTRY(236), 871 I40E_PTT_UNUSED_ENTRY(237), 872 I40E_PTT_UNUSED_ENTRY(238), 873 I40E_PTT_UNUSED_ENTRY(239), 874 875 I40E_PTT_UNUSED_ENTRY(240), 876 I40E_PTT_UNUSED_ENTRY(241), 877 I40E_PTT_UNUSED_ENTRY(242), 878 I40E_PTT_UNUSED_ENTRY(243), 879 I40E_PTT_UNUSED_ENTRY(244), 880 I40E_PTT_UNUSED_ENTRY(245), 881 I40E_PTT_UNUSED_ENTRY(246), 882 I40E_PTT_UNUSED_ENTRY(247), 883 I40E_PTT_UNUSED_ENTRY(248), 884 I40E_PTT_UNUSED_ENTRY(249), 885 886 I40E_PTT_UNUSED_ENTRY(250), 887 I40E_PTT_UNUSED_ENTRY(251), 888 I40E_PTT_UNUSED_ENTRY(252), 889 I40E_PTT_UNUSED_ENTRY(253), 890 I40E_PTT_UNUSED_ENTRY(254), 891 I40E_PTT_UNUSED_ENTRY(255) 892 }; 893 894 /** 895 * i40e_init_shared_code - Initialize the shared code 896 * @hw: pointer to hardware structure 897 * 898 * This assigns the MAC type and PHY code and inits the NVM. 899 * Does not touch the hardware. This function must be called prior to any 900 * other function in the shared code. The i40e_hw structure should be 901 * memset to 0 prior to calling this function. The following fields in 902 * hw structure should be filled in prior to calling this function: 903 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 904 * subsystem_vendor_id, and revision_id 905 **/ 906 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 907 { 908 i40e_status status = 0; 909 u32 port, ari, func_rid; 910 911 i40e_set_mac_type(hw); 912 913 switch (hw->mac.type) { 914 case I40E_MAC_XL710: 915 case I40E_MAC_X722: 916 break; 917 default: 918 return I40E_ERR_DEVICE_NOT_SUPPORTED; 919 } 920 921 hw->phy.get_link_info = true; 922 923 /* Determine port number and PF number*/ 924 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 925 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 926 hw->port = (u8)port; 927 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 928 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 929 func_rid = rd32(hw, I40E_PF_FUNC_RID); 930 if (ari) 931 hw->pf_id = (u8)(func_rid & 0xff); 932 else 933 hw->pf_id = (u8)(func_rid & 0x7); 934 935 if (hw->mac.type == I40E_MAC_X722) 936 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 937 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 938 939 status = i40e_init_nvm(hw); 940 return status; 941 } 942 943 /** 944 * i40e_aq_mac_address_read - Retrieve the MAC addresses 945 * @hw: pointer to the hw struct 946 * @flags: a return indicator of what addresses were added to the addr store 947 * @addrs: the requestor's mac addr store 948 * @cmd_details: pointer to command details structure or NULL 949 **/ 950 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 951 u16 *flags, 952 struct i40e_aqc_mac_address_read_data *addrs, 953 struct i40e_asq_cmd_details *cmd_details) 954 { 955 struct i40e_aq_desc desc; 956 struct i40e_aqc_mac_address_read *cmd_data = 957 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 958 i40e_status status; 959 960 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 961 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 962 963 status = i40e_asq_send_command(hw, &desc, addrs, 964 sizeof(*addrs), cmd_details); 965 *flags = le16_to_cpu(cmd_data->command_flags); 966 967 return status; 968 } 969 970 /** 971 * i40e_aq_mac_address_write - Change the MAC addresses 972 * @hw: pointer to the hw struct 973 * @flags: indicates which MAC to be written 974 * @mac_addr: address to write 975 * @cmd_details: pointer to command details structure or NULL 976 **/ 977 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 978 u16 flags, u8 *mac_addr, 979 struct i40e_asq_cmd_details *cmd_details) 980 { 981 struct i40e_aq_desc desc; 982 struct i40e_aqc_mac_address_write *cmd_data = 983 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 984 i40e_status status; 985 986 i40e_fill_default_direct_cmd_desc(&desc, 987 i40e_aqc_opc_mac_address_write); 988 cmd_data->command_flags = cpu_to_le16(flags); 989 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 990 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 991 ((u32)mac_addr[3] << 16) | 992 ((u32)mac_addr[4] << 8) | 993 mac_addr[5]); 994 995 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 996 997 return status; 998 } 999 1000 /** 1001 * i40e_get_mac_addr - get MAC address 1002 * @hw: pointer to the HW structure 1003 * @mac_addr: pointer to MAC address 1004 * 1005 * Reads the adapter's MAC address from register 1006 **/ 1007 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1008 { 1009 struct i40e_aqc_mac_address_read_data addrs; 1010 i40e_status status; 1011 u16 flags = 0; 1012 1013 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1014 1015 if (flags & I40E_AQC_LAN_ADDR_VALID) 1016 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1017 1018 return status; 1019 } 1020 1021 /** 1022 * i40e_get_port_mac_addr - get Port MAC address 1023 * @hw: pointer to the HW structure 1024 * @mac_addr: pointer to Port MAC address 1025 * 1026 * Reads the adapter's Port MAC address 1027 **/ 1028 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1029 { 1030 struct i40e_aqc_mac_address_read_data addrs; 1031 i40e_status status; 1032 u16 flags = 0; 1033 1034 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1035 if (status) 1036 return status; 1037 1038 if (flags & I40E_AQC_PORT_ADDR_VALID) 1039 ether_addr_copy(mac_addr, addrs.port_mac); 1040 else 1041 status = I40E_ERR_INVALID_MAC_ADDR; 1042 1043 return status; 1044 } 1045 1046 /** 1047 * i40e_pre_tx_queue_cfg - pre tx queue configure 1048 * @hw: pointer to the HW structure 1049 * @queue: target PF queue index 1050 * @enable: state change request 1051 * 1052 * Handles hw requirement to indicate intention to enable 1053 * or disable target queue. 1054 **/ 1055 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1056 { 1057 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1058 u32 reg_block = 0; 1059 u32 reg_val; 1060 1061 if (abs_queue_idx >= 128) { 1062 reg_block = abs_queue_idx / 128; 1063 abs_queue_idx %= 128; 1064 } 1065 1066 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1067 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1068 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1069 1070 if (enable) 1071 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1072 else 1073 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1074 1075 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1076 } 1077 1078 /** 1079 * i40e_read_pba_string - Reads part number string from EEPROM 1080 * @hw: pointer to hardware structure 1081 * @pba_num: stores the part number string from the EEPROM 1082 * @pba_num_size: part number string buffer length 1083 * 1084 * Reads the part number string from the EEPROM. 1085 **/ 1086 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1087 u32 pba_num_size) 1088 { 1089 i40e_status status = 0; 1090 u16 pba_word = 0; 1091 u16 pba_size = 0; 1092 u16 pba_ptr = 0; 1093 u16 i = 0; 1094 1095 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1096 if (status || (pba_word != 0xFAFA)) { 1097 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1098 return status; 1099 } 1100 1101 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1102 if (status) { 1103 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1104 return status; 1105 } 1106 1107 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1108 if (status) { 1109 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1110 return status; 1111 } 1112 1113 /* Subtract one to get PBA word count (PBA Size word is included in 1114 * total size) 1115 */ 1116 pba_size--; 1117 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1118 hw_dbg(hw, "Buffer to small for PBA data.\n"); 1119 return I40E_ERR_PARAM; 1120 } 1121 1122 for (i = 0; i < pba_size; i++) { 1123 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1124 if (status) { 1125 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1126 return status; 1127 } 1128 1129 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1130 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1131 } 1132 pba_num[(pba_size * 2)] = '\0'; 1133 1134 return status; 1135 } 1136 1137 /** 1138 * i40e_get_media_type - Gets media type 1139 * @hw: pointer to the hardware structure 1140 **/ 1141 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1142 { 1143 enum i40e_media_type media; 1144 1145 switch (hw->phy.link_info.phy_type) { 1146 case I40E_PHY_TYPE_10GBASE_SR: 1147 case I40E_PHY_TYPE_10GBASE_LR: 1148 case I40E_PHY_TYPE_1000BASE_SX: 1149 case I40E_PHY_TYPE_1000BASE_LX: 1150 case I40E_PHY_TYPE_40GBASE_SR4: 1151 case I40E_PHY_TYPE_40GBASE_LR4: 1152 case I40E_PHY_TYPE_25GBASE_LR: 1153 case I40E_PHY_TYPE_25GBASE_SR: 1154 media = I40E_MEDIA_TYPE_FIBER; 1155 break; 1156 case I40E_PHY_TYPE_100BASE_TX: 1157 case I40E_PHY_TYPE_1000BASE_T: 1158 case I40E_PHY_TYPE_2_5GBASE_T: 1159 case I40E_PHY_TYPE_5GBASE_T: 1160 case I40E_PHY_TYPE_10GBASE_T: 1161 media = I40E_MEDIA_TYPE_BASET; 1162 break; 1163 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1164 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1165 case I40E_PHY_TYPE_10GBASE_CR1: 1166 case I40E_PHY_TYPE_40GBASE_CR4: 1167 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1168 case I40E_PHY_TYPE_40GBASE_AOC: 1169 case I40E_PHY_TYPE_10GBASE_AOC: 1170 case I40E_PHY_TYPE_25GBASE_CR: 1171 case I40E_PHY_TYPE_25GBASE_AOC: 1172 case I40E_PHY_TYPE_25GBASE_ACC: 1173 media = I40E_MEDIA_TYPE_DA; 1174 break; 1175 case I40E_PHY_TYPE_1000BASE_KX: 1176 case I40E_PHY_TYPE_10GBASE_KX4: 1177 case I40E_PHY_TYPE_10GBASE_KR: 1178 case I40E_PHY_TYPE_40GBASE_KR4: 1179 case I40E_PHY_TYPE_20GBASE_KR2: 1180 case I40E_PHY_TYPE_25GBASE_KR: 1181 media = I40E_MEDIA_TYPE_BACKPLANE; 1182 break; 1183 case I40E_PHY_TYPE_SGMII: 1184 case I40E_PHY_TYPE_XAUI: 1185 case I40E_PHY_TYPE_XFI: 1186 case I40E_PHY_TYPE_XLAUI: 1187 case I40E_PHY_TYPE_XLPPI: 1188 default: 1189 media = I40E_MEDIA_TYPE_UNKNOWN; 1190 break; 1191 } 1192 1193 return media; 1194 } 1195 1196 /** 1197 * i40e_poll_globr - Poll for Global Reset completion 1198 * @hw: pointer to the hardware structure 1199 * @retry_limit: how many times to retry before failure 1200 **/ 1201 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1202 u32 retry_limit) 1203 { 1204 u32 cnt, reg = 0; 1205 1206 for (cnt = 0; cnt < retry_limit; cnt++) { 1207 reg = rd32(hw, I40E_GLGEN_RSTAT); 1208 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1209 return 0; 1210 msleep(100); 1211 } 1212 1213 hw_dbg(hw, "Global reset failed.\n"); 1214 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1215 1216 return I40E_ERR_RESET_FAILED; 1217 } 1218 1219 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1220 #define I40E_PF_RESET_WAIT_COUNT 200 1221 /** 1222 * i40e_pf_reset - Reset the PF 1223 * @hw: pointer to the hardware structure 1224 * 1225 * Assuming someone else has triggered a global reset, 1226 * assure the global reset is complete and then reset the PF 1227 **/ 1228 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1229 { 1230 u32 cnt = 0; 1231 u32 cnt1 = 0; 1232 u32 reg = 0; 1233 u32 grst_del; 1234 1235 /* Poll for Global Reset steady state in case of recent GRST. 1236 * The grst delay value is in 100ms units, and we'll wait a 1237 * couple counts longer to be sure we don't just miss the end. 1238 */ 1239 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1240 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1241 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1242 1243 /* It can take upto 15 secs for GRST steady state. 1244 * Bump it to 16 secs max to be safe. 1245 */ 1246 grst_del = grst_del * 20; 1247 1248 for (cnt = 0; cnt < grst_del; cnt++) { 1249 reg = rd32(hw, I40E_GLGEN_RSTAT); 1250 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1251 break; 1252 msleep(100); 1253 } 1254 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1255 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1256 return I40E_ERR_RESET_FAILED; 1257 } 1258 1259 /* Now Wait for the FW to be ready */ 1260 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1261 reg = rd32(hw, I40E_GLNVM_ULD); 1262 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1263 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1264 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1265 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1266 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1267 break; 1268 } 1269 usleep_range(10000, 20000); 1270 } 1271 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1272 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1273 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1274 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1275 return I40E_ERR_RESET_FAILED; 1276 } 1277 1278 /* If there was a Global Reset in progress when we got here, 1279 * we don't need to do the PF Reset 1280 */ 1281 if (!cnt) { 1282 u32 reg2 = 0; 1283 if (hw->revision_id == 0) 1284 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1285 else 1286 cnt = I40E_PF_RESET_WAIT_COUNT; 1287 reg = rd32(hw, I40E_PFGEN_CTRL); 1288 wr32(hw, I40E_PFGEN_CTRL, 1289 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1290 for (; cnt; cnt--) { 1291 reg = rd32(hw, I40E_PFGEN_CTRL); 1292 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1293 break; 1294 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1295 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1296 break; 1297 usleep_range(1000, 2000); 1298 } 1299 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1300 if (i40e_poll_globr(hw, grst_del)) 1301 return I40E_ERR_RESET_FAILED; 1302 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1303 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1304 return I40E_ERR_RESET_FAILED; 1305 } 1306 } 1307 1308 i40e_clear_pxe_mode(hw); 1309 1310 return 0; 1311 } 1312 1313 /** 1314 * i40e_clear_hw - clear out any left over hw state 1315 * @hw: pointer to the hw struct 1316 * 1317 * Clear queues and interrupts, typically called at init time, 1318 * but after the capabilities have been found so we know how many 1319 * queues and msix vectors have been allocated. 1320 **/ 1321 void i40e_clear_hw(struct i40e_hw *hw) 1322 { 1323 u32 num_queues, base_queue; 1324 u32 num_pf_int; 1325 u32 num_vf_int; 1326 u32 num_vfs; 1327 u32 i, j; 1328 u32 val; 1329 u32 eol = 0x7ff; 1330 1331 /* get number of interrupts, queues, and VFs */ 1332 val = rd32(hw, I40E_GLPCI_CNF2); 1333 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1334 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1335 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1336 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1337 1338 val = rd32(hw, I40E_PFLAN_QALLOC); 1339 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1340 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1341 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1342 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1343 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1344 num_queues = (j - base_queue) + 1; 1345 else 1346 num_queues = 0; 1347 1348 val = rd32(hw, I40E_PF_VT_PFALLOC); 1349 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1350 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1351 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1352 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1353 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1354 num_vfs = (j - i) + 1; 1355 else 1356 num_vfs = 0; 1357 1358 /* stop all the interrupts */ 1359 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1360 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1361 for (i = 0; i < num_pf_int - 2; i++) 1362 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1363 1364 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1365 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1366 wr32(hw, I40E_PFINT_LNKLST0, val); 1367 for (i = 0; i < num_pf_int - 2; i++) 1368 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1369 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1370 for (i = 0; i < num_vfs; i++) 1371 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1372 for (i = 0; i < num_vf_int - 2; i++) 1373 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1374 1375 /* warn the HW of the coming Tx disables */ 1376 for (i = 0; i < num_queues; i++) { 1377 u32 abs_queue_idx = base_queue + i; 1378 u32 reg_block = 0; 1379 1380 if (abs_queue_idx >= 128) { 1381 reg_block = abs_queue_idx / 128; 1382 abs_queue_idx %= 128; 1383 } 1384 1385 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1386 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1387 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1388 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1389 1390 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1391 } 1392 udelay(400); 1393 1394 /* stop all the queues */ 1395 for (i = 0; i < num_queues; i++) { 1396 wr32(hw, I40E_QINT_TQCTL(i), 0); 1397 wr32(hw, I40E_QTX_ENA(i), 0); 1398 wr32(hw, I40E_QINT_RQCTL(i), 0); 1399 wr32(hw, I40E_QRX_ENA(i), 0); 1400 } 1401 1402 /* short wait for all queue disables to settle */ 1403 udelay(50); 1404 } 1405 1406 /** 1407 * i40e_clear_pxe_mode - clear pxe operations mode 1408 * @hw: pointer to the hw struct 1409 * 1410 * Make sure all PXE mode settings are cleared, including things 1411 * like descriptor fetch/write-back mode. 1412 **/ 1413 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1414 { 1415 u32 reg; 1416 1417 if (i40e_check_asq_alive(hw)) 1418 i40e_aq_clear_pxe_mode(hw, NULL); 1419 1420 /* Clear single descriptor fetch/write-back mode */ 1421 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1422 1423 if (hw->revision_id == 0) { 1424 /* As a work around clear PXE_MODE instead of setting it */ 1425 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1426 } else { 1427 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1428 } 1429 } 1430 1431 /** 1432 * i40e_led_is_mine - helper to find matching led 1433 * @hw: pointer to the hw struct 1434 * @idx: index into GPIO registers 1435 * 1436 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1437 */ 1438 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1439 { 1440 u32 gpio_val = 0; 1441 u32 port; 1442 1443 if (!hw->func_caps.led[idx]) 1444 return 0; 1445 1446 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1447 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1448 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1449 1450 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1451 * if it is not our port then ignore 1452 */ 1453 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1454 (port != hw->port)) 1455 return 0; 1456 1457 return gpio_val; 1458 } 1459 1460 #define I40E_COMBINED_ACTIVITY 0xA 1461 #define I40E_FILTER_ACTIVITY 0xE 1462 #define I40E_LINK_ACTIVITY 0xC 1463 #define I40E_MAC_ACTIVITY 0xD 1464 #define I40E_LED0 22 1465 1466 /** 1467 * i40e_led_get - return current on/off mode 1468 * @hw: pointer to the hw struct 1469 * 1470 * The value returned is the 'mode' field as defined in the 1471 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1472 * values are variations of possible behaviors relating to 1473 * blink, link, and wire. 1474 **/ 1475 u32 i40e_led_get(struct i40e_hw *hw) 1476 { 1477 u32 mode = 0; 1478 int i; 1479 1480 /* as per the documentation GPIO 22-29 are the LED 1481 * GPIO pins named LED0..LED7 1482 */ 1483 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1484 u32 gpio_val = i40e_led_is_mine(hw, i); 1485 1486 if (!gpio_val) 1487 continue; 1488 1489 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1490 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1491 break; 1492 } 1493 1494 return mode; 1495 } 1496 1497 /** 1498 * i40e_led_set - set new on/off mode 1499 * @hw: pointer to the hw struct 1500 * @mode: 0=off, 0xf=on (else see manual for mode details) 1501 * @blink: true if the LED should blink when on, false if steady 1502 * 1503 * if this function is used to turn on the blink it should 1504 * be used to disable the blink when restoring the original state. 1505 **/ 1506 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1507 { 1508 int i; 1509 1510 if (mode & 0xfffffff0) 1511 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1512 1513 /* as per the documentation GPIO 22-29 are the LED 1514 * GPIO pins named LED0..LED7 1515 */ 1516 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1517 u32 gpio_val = i40e_led_is_mine(hw, i); 1518 1519 if (!gpio_val) 1520 continue; 1521 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1522 /* this & is a bit of paranoia, but serves as a range check */ 1523 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1524 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1525 1526 if (blink) 1527 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1528 else 1529 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1530 1531 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1532 break; 1533 } 1534 } 1535 1536 /* Admin command wrappers */ 1537 1538 /** 1539 * i40e_aq_get_phy_capabilities 1540 * @hw: pointer to the hw struct 1541 * @abilities: structure for PHY capabilities to be filled 1542 * @qualified_modules: report Qualified Modules 1543 * @report_init: report init capabilities (active are default) 1544 * @cmd_details: pointer to command details structure or NULL 1545 * 1546 * Returns the various PHY abilities supported on the Port. 1547 **/ 1548 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1549 bool qualified_modules, bool report_init, 1550 struct i40e_aq_get_phy_abilities_resp *abilities, 1551 struct i40e_asq_cmd_details *cmd_details) 1552 { 1553 struct i40e_aq_desc desc; 1554 i40e_status status; 1555 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1556 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1557 1558 if (!abilities) 1559 return I40E_ERR_PARAM; 1560 1561 do { 1562 i40e_fill_default_direct_cmd_desc(&desc, 1563 i40e_aqc_opc_get_phy_abilities); 1564 1565 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1566 if (abilities_size > I40E_AQ_LARGE_BUF) 1567 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1568 1569 if (qualified_modules) 1570 desc.params.external.param0 |= 1571 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1572 1573 if (report_init) 1574 desc.params.external.param0 |= 1575 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1576 1577 status = i40e_asq_send_command(hw, &desc, abilities, 1578 abilities_size, cmd_details); 1579 1580 switch (hw->aq.asq_last_status) { 1581 case I40E_AQ_RC_EIO: 1582 status = I40E_ERR_UNKNOWN_PHY; 1583 break; 1584 case I40E_AQ_RC_EAGAIN: 1585 usleep_range(1000, 2000); 1586 total_delay++; 1587 status = I40E_ERR_TIMEOUT; 1588 break; 1589 /* also covers I40E_AQ_RC_OK */ 1590 default: 1591 break; 1592 } 1593 1594 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1595 (total_delay < max_delay)); 1596 1597 if (status) 1598 return status; 1599 1600 if (report_init) { 1601 if (hw->mac.type == I40E_MAC_XL710 && 1602 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1603 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1604 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1605 } else { 1606 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1607 hw->phy.phy_types |= 1608 ((u64)abilities->phy_type_ext << 32); 1609 } 1610 } 1611 1612 return status; 1613 } 1614 1615 /** 1616 * i40e_aq_set_phy_config 1617 * @hw: pointer to the hw struct 1618 * @config: structure with PHY configuration to be set 1619 * @cmd_details: pointer to command details structure or NULL 1620 * 1621 * Set the various PHY configuration parameters 1622 * supported on the Port.One or more of the Set PHY config parameters may be 1623 * ignored in an MFP mode as the PF may not have the privilege to set some 1624 * of the PHY Config parameters. This status will be indicated by the 1625 * command response. 1626 **/ 1627 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1628 struct i40e_aq_set_phy_config *config, 1629 struct i40e_asq_cmd_details *cmd_details) 1630 { 1631 struct i40e_aq_desc desc; 1632 struct i40e_aq_set_phy_config *cmd = 1633 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1634 enum i40e_status_code status; 1635 1636 if (!config) 1637 return I40E_ERR_PARAM; 1638 1639 i40e_fill_default_direct_cmd_desc(&desc, 1640 i40e_aqc_opc_set_phy_config); 1641 1642 *cmd = *config; 1643 1644 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1645 1646 return status; 1647 } 1648 1649 static noinline_for_stack enum i40e_status_code 1650 i40e_set_fc_status(struct i40e_hw *hw, 1651 struct i40e_aq_get_phy_abilities_resp *abilities, 1652 bool atomic_restart) 1653 { 1654 struct i40e_aq_set_phy_config config; 1655 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1656 u8 pause_mask = 0x0; 1657 1658 switch (fc_mode) { 1659 case I40E_FC_FULL: 1660 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1661 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1662 break; 1663 case I40E_FC_RX_PAUSE: 1664 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1665 break; 1666 case I40E_FC_TX_PAUSE: 1667 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1668 break; 1669 default: 1670 break; 1671 } 1672 1673 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1674 /* clear the old pause settings */ 1675 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1676 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1677 /* set the new abilities */ 1678 config.abilities |= pause_mask; 1679 /* If the abilities have changed, then set the new config */ 1680 if (config.abilities == abilities->abilities) 1681 return 0; 1682 1683 /* Auto restart link so settings take effect */ 1684 if (atomic_restart) 1685 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1686 /* Copy over all the old settings */ 1687 config.phy_type = abilities->phy_type; 1688 config.phy_type_ext = abilities->phy_type_ext; 1689 config.link_speed = abilities->link_speed; 1690 config.eee_capability = abilities->eee_capability; 1691 config.eeer = abilities->eeer_val; 1692 config.low_power_ctrl = abilities->d3_lpan; 1693 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1694 I40E_AQ_PHY_FEC_CONFIG_MASK; 1695 1696 return i40e_aq_set_phy_config(hw, &config, NULL); 1697 } 1698 1699 /** 1700 * i40e_set_fc 1701 * @hw: pointer to the hw struct 1702 * @aq_failures: buffer to return AdminQ failure information 1703 * @atomic_restart: whether to enable atomic link restart 1704 * 1705 * Set the requested flow control mode using set_phy_config. 1706 **/ 1707 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1708 bool atomic_restart) 1709 { 1710 struct i40e_aq_get_phy_abilities_resp abilities; 1711 enum i40e_status_code status; 1712 1713 *aq_failures = 0x0; 1714 1715 /* Get the current phy config */ 1716 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1717 NULL); 1718 if (status) { 1719 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1720 return status; 1721 } 1722 1723 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1724 if (status) 1725 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1726 1727 /* Update the link info */ 1728 status = i40e_update_link_info(hw); 1729 if (status) { 1730 /* Wait a little bit (on 40G cards it sometimes takes a really 1731 * long time for link to come back from the atomic reset) 1732 * and try once more 1733 */ 1734 msleep(1000); 1735 status = i40e_update_link_info(hw); 1736 } 1737 if (status) 1738 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1739 1740 return status; 1741 } 1742 1743 /** 1744 * i40e_aq_clear_pxe_mode 1745 * @hw: pointer to the hw struct 1746 * @cmd_details: pointer to command details structure or NULL 1747 * 1748 * Tell the firmware that the driver is taking over from PXE 1749 **/ 1750 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1751 struct i40e_asq_cmd_details *cmd_details) 1752 { 1753 i40e_status status; 1754 struct i40e_aq_desc desc; 1755 struct i40e_aqc_clear_pxe *cmd = 1756 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1757 1758 i40e_fill_default_direct_cmd_desc(&desc, 1759 i40e_aqc_opc_clear_pxe_mode); 1760 1761 cmd->rx_cnt = 0x2; 1762 1763 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1764 1765 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1766 1767 return status; 1768 } 1769 1770 /** 1771 * i40e_aq_set_link_restart_an 1772 * @hw: pointer to the hw struct 1773 * @enable_link: if true: enable link, if false: disable link 1774 * @cmd_details: pointer to command details structure or NULL 1775 * 1776 * Sets up the link and restarts the Auto-Negotiation over the link. 1777 **/ 1778 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1779 bool enable_link, 1780 struct i40e_asq_cmd_details *cmd_details) 1781 { 1782 struct i40e_aq_desc desc; 1783 struct i40e_aqc_set_link_restart_an *cmd = 1784 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1785 i40e_status status; 1786 1787 i40e_fill_default_direct_cmd_desc(&desc, 1788 i40e_aqc_opc_set_link_restart_an); 1789 1790 cmd->command = I40E_AQ_PHY_RESTART_AN; 1791 if (enable_link) 1792 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1793 else 1794 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1795 1796 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1797 1798 return status; 1799 } 1800 1801 /** 1802 * i40e_aq_get_link_info 1803 * @hw: pointer to the hw struct 1804 * @enable_lse: enable/disable LinkStatusEvent reporting 1805 * @link: pointer to link status structure - optional 1806 * @cmd_details: pointer to command details structure or NULL 1807 * 1808 * Returns the link status of the adapter. 1809 **/ 1810 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1811 bool enable_lse, struct i40e_link_status *link, 1812 struct i40e_asq_cmd_details *cmd_details) 1813 { 1814 struct i40e_aq_desc desc; 1815 struct i40e_aqc_get_link_status *resp = 1816 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1817 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1818 i40e_status status; 1819 bool tx_pause, rx_pause; 1820 u16 command_flags; 1821 1822 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1823 1824 if (enable_lse) 1825 command_flags = I40E_AQ_LSE_ENABLE; 1826 else 1827 command_flags = I40E_AQ_LSE_DISABLE; 1828 resp->command_flags = cpu_to_le16(command_flags); 1829 1830 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1831 1832 if (status) 1833 goto aq_get_link_info_exit; 1834 1835 /* save off old link status information */ 1836 hw->phy.link_info_old = *hw_link_info; 1837 1838 /* update link status */ 1839 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1840 hw->phy.media_type = i40e_get_media_type(hw); 1841 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1842 hw_link_info->link_info = resp->link_info; 1843 hw_link_info->an_info = resp->an_info; 1844 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1845 I40E_AQ_CONFIG_FEC_RS_ENA); 1846 hw_link_info->ext_info = resp->ext_info; 1847 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1848 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1849 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1850 1851 /* update fc info */ 1852 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1853 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1854 if (tx_pause & rx_pause) 1855 hw->fc.current_mode = I40E_FC_FULL; 1856 else if (tx_pause) 1857 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1858 else if (rx_pause) 1859 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1860 else 1861 hw->fc.current_mode = I40E_FC_NONE; 1862 1863 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1864 hw_link_info->crc_enable = true; 1865 else 1866 hw_link_info->crc_enable = false; 1867 1868 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1869 hw_link_info->lse_enable = true; 1870 else 1871 hw_link_info->lse_enable = false; 1872 1873 if ((hw->mac.type == I40E_MAC_XL710) && 1874 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1875 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1876 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1877 1878 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 1879 __le32 tmp; 1880 1881 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1882 hw->phy.phy_types = le32_to_cpu(tmp); 1883 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1884 } 1885 1886 /* save link status information */ 1887 if (link) 1888 *link = *hw_link_info; 1889 1890 /* flag cleared so helper functions don't call AQ again */ 1891 hw->phy.get_link_info = false; 1892 1893 aq_get_link_info_exit: 1894 return status; 1895 } 1896 1897 /** 1898 * i40e_aq_set_phy_int_mask 1899 * @hw: pointer to the hw struct 1900 * @mask: interrupt mask to be set 1901 * @cmd_details: pointer to command details structure or NULL 1902 * 1903 * Set link interrupt mask. 1904 **/ 1905 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1906 u16 mask, 1907 struct i40e_asq_cmd_details *cmd_details) 1908 { 1909 struct i40e_aq_desc desc; 1910 struct i40e_aqc_set_phy_int_mask *cmd = 1911 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1912 i40e_status status; 1913 1914 i40e_fill_default_direct_cmd_desc(&desc, 1915 i40e_aqc_opc_set_phy_int_mask); 1916 1917 cmd->event_mask = cpu_to_le16(mask); 1918 1919 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1920 1921 return status; 1922 } 1923 1924 /** 1925 * i40e_aq_set_phy_debug 1926 * @hw: pointer to the hw struct 1927 * @cmd_flags: debug command flags 1928 * @cmd_details: pointer to command details structure or NULL 1929 * 1930 * Reset the external PHY. 1931 **/ 1932 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1933 struct i40e_asq_cmd_details *cmd_details) 1934 { 1935 struct i40e_aq_desc desc; 1936 struct i40e_aqc_set_phy_debug *cmd = 1937 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1938 i40e_status status; 1939 1940 i40e_fill_default_direct_cmd_desc(&desc, 1941 i40e_aqc_opc_set_phy_debug); 1942 1943 cmd->command_flags = cmd_flags; 1944 1945 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1946 1947 return status; 1948 } 1949 1950 /** 1951 * i40e_aq_add_vsi 1952 * @hw: pointer to the hw struct 1953 * @vsi_ctx: pointer to a vsi context struct 1954 * @cmd_details: pointer to command details structure or NULL 1955 * 1956 * Add a VSI context to the hardware. 1957 **/ 1958 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1959 struct i40e_vsi_context *vsi_ctx, 1960 struct i40e_asq_cmd_details *cmd_details) 1961 { 1962 struct i40e_aq_desc desc; 1963 struct i40e_aqc_add_get_update_vsi *cmd = 1964 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1965 struct i40e_aqc_add_get_update_vsi_completion *resp = 1966 (struct i40e_aqc_add_get_update_vsi_completion *) 1967 &desc.params.raw; 1968 i40e_status status; 1969 1970 i40e_fill_default_direct_cmd_desc(&desc, 1971 i40e_aqc_opc_add_vsi); 1972 1973 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1974 cmd->connection_type = vsi_ctx->connection_type; 1975 cmd->vf_id = vsi_ctx->vf_num; 1976 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1977 1978 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1979 1980 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1981 sizeof(vsi_ctx->info), cmd_details); 1982 1983 if (status) 1984 goto aq_add_vsi_exit; 1985 1986 vsi_ctx->seid = le16_to_cpu(resp->seid); 1987 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1988 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1989 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1990 1991 aq_add_vsi_exit: 1992 return status; 1993 } 1994 1995 /** 1996 * i40e_aq_set_default_vsi 1997 * @hw: pointer to the hw struct 1998 * @seid: vsi number 1999 * @cmd_details: pointer to command details structure or NULL 2000 **/ 2001 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 2002 u16 seid, 2003 struct i40e_asq_cmd_details *cmd_details) 2004 { 2005 struct i40e_aq_desc desc; 2006 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2007 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2008 &desc.params.raw; 2009 i40e_status status; 2010 2011 i40e_fill_default_direct_cmd_desc(&desc, 2012 i40e_aqc_opc_set_vsi_promiscuous_modes); 2013 2014 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2015 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2016 cmd->seid = cpu_to_le16(seid); 2017 2018 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2019 2020 return status; 2021 } 2022 2023 /** 2024 * i40e_aq_clear_default_vsi 2025 * @hw: pointer to the hw struct 2026 * @seid: vsi number 2027 * @cmd_details: pointer to command details structure or NULL 2028 **/ 2029 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2030 u16 seid, 2031 struct i40e_asq_cmd_details *cmd_details) 2032 { 2033 struct i40e_aq_desc desc; 2034 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2035 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2036 &desc.params.raw; 2037 i40e_status status; 2038 2039 i40e_fill_default_direct_cmd_desc(&desc, 2040 i40e_aqc_opc_set_vsi_promiscuous_modes); 2041 2042 cmd->promiscuous_flags = cpu_to_le16(0); 2043 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2044 cmd->seid = cpu_to_le16(seid); 2045 2046 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2047 2048 return status; 2049 } 2050 2051 /** 2052 * i40e_aq_set_vsi_unicast_promiscuous 2053 * @hw: pointer to the hw struct 2054 * @seid: vsi number 2055 * @set: set unicast promiscuous enable/disable 2056 * @cmd_details: pointer to command details structure or NULL 2057 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2058 **/ 2059 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2060 u16 seid, bool set, 2061 struct i40e_asq_cmd_details *cmd_details, 2062 bool rx_only_promisc) 2063 { 2064 struct i40e_aq_desc desc; 2065 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2066 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2067 i40e_status status; 2068 u16 flags = 0; 2069 2070 i40e_fill_default_direct_cmd_desc(&desc, 2071 i40e_aqc_opc_set_vsi_promiscuous_modes); 2072 2073 if (set) { 2074 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2075 if (rx_only_promisc && 2076 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || 2077 (hw->aq.api_maj_ver > 1))) 2078 flags |= I40E_AQC_SET_VSI_PROMISC_TX; 2079 } 2080 2081 cmd->promiscuous_flags = cpu_to_le16(flags); 2082 2083 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2084 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || 2085 (hw->aq.api_maj_ver > 1)) 2086 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); 2087 2088 cmd->seid = cpu_to_le16(seid); 2089 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2090 2091 return status; 2092 } 2093 2094 /** 2095 * i40e_aq_set_vsi_multicast_promiscuous 2096 * @hw: pointer to the hw struct 2097 * @seid: vsi number 2098 * @set: set multicast promiscuous enable/disable 2099 * @cmd_details: pointer to command details structure or NULL 2100 **/ 2101 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2102 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2103 { 2104 struct i40e_aq_desc desc; 2105 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2106 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2107 i40e_status status; 2108 u16 flags = 0; 2109 2110 i40e_fill_default_direct_cmd_desc(&desc, 2111 i40e_aqc_opc_set_vsi_promiscuous_modes); 2112 2113 if (set) 2114 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2115 2116 cmd->promiscuous_flags = cpu_to_le16(flags); 2117 2118 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2119 2120 cmd->seid = cpu_to_le16(seid); 2121 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2122 2123 return status; 2124 } 2125 2126 /** 2127 * i40e_aq_set_vsi_mc_promisc_on_vlan 2128 * @hw: pointer to the hw struct 2129 * @seid: vsi number 2130 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2131 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2132 * @cmd_details: pointer to command details structure or NULL 2133 **/ 2134 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2135 u16 seid, bool enable, 2136 u16 vid, 2137 struct i40e_asq_cmd_details *cmd_details) 2138 { 2139 struct i40e_aq_desc desc; 2140 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2141 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2142 enum i40e_status_code status; 2143 u16 flags = 0; 2144 2145 i40e_fill_default_direct_cmd_desc(&desc, 2146 i40e_aqc_opc_set_vsi_promiscuous_modes); 2147 2148 if (enable) 2149 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2150 2151 cmd->promiscuous_flags = cpu_to_le16(flags); 2152 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2153 cmd->seid = cpu_to_le16(seid); 2154 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2155 2156 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2157 2158 return status; 2159 } 2160 2161 /** 2162 * i40e_aq_set_vsi_uc_promisc_on_vlan 2163 * @hw: pointer to the hw struct 2164 * @seid: vsi number 2165 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2166 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2167 * @cmd_details: pointer to command details structure or NULL 2168 **/ 2169 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2170 u16 seid, bool enable, 2171 u16 vid, 2172 struct i40e_asq_cmd_details *cmd_details) 2173 { 2174 struct i40e_aq_desc desc; 2175 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2176 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2177 enum i40e_status_code status; 2178 u16 flags = 0; 2179 2180 i40e_fill_default_direct_cmd_desc(&desc, 2181 i40e_aqc_opc_set_vsi_promiscuous_modes); 2182 2183 if (enable) 2184 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2185 2186 cmd->promiscuous_flags = cpu_to_le16(flags); 2187 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2188 cmd->seid = cpu_to_le16(seid); 2189 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2190 2191 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2192 2193 return status; 2194 } 2195 2196 /** 2197 * i40e_aq_set_vsi_bc_promisc_on_vlan 2198 * @hw: pointer to the hw struct 2199 * @seid: vsi number 2200 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2201 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2202 * @cmd_details: pointer to command details structure or NULL 2203 **/ 2204 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2205 u16 seid, bool enable, u16 vid, 2206 struct i40e_asq_cmd_details *cmd_details) 2207 { 2208 struct i40e_aq_desc desc; 2209 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2210 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2211 i40e_status status; 2212 u16 flags = 0; 2213 2214 i40e_fill_default_direct_cmd_desc(&desc, 2215 i40e_aqc_opc_set_vsi_promiscuous_modes); 2216 2217 if (enable) 2218 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2219 2220 cmd->promiscuous_flags = cpu_to_le16(flags); 2221 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2222 cmd->seid = cpu_to_le16(seid); 2223 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2224 2225 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2226 2227 return status; 2228 } 2229 2230 /** 2231 * i40e_aq_set_vsi_broadcast 2232 * @hw: pointer to the hw struct 2233 * @seid: vsi number 2234 * @set_filter: true to set filter, false to clear filter 2235 * @cmd_details: pointer to command details structure or NULL 2236 * 2237 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2238 **/ 2239 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2240 u16 seid, bool set_filter, 2241 struct i40e_asq_cmd_details *cmd_details) 2242 { 2243 struct i40e_aq_desc desc; 2244 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2245 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2246 i40e_status status; 2247 2248 i40e_fill_default_direct_cmd_desc(&desc, 2249 i40e_aqc_opc_set_vsi_promiscuous_modes); 2250 2251 if (set_filter) 2252 cmd->promiscuous_flags 2253 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2254 else 2255 cmd->promiscuous_flags 2256 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2257 2258 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2259 cmd->seid = cpu_to_le16(seid); 2260 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2261 2262 return status; 2263 } 2264 2265 /** 2266 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2267 * @hw: pointer to the hw struct 2268 * @seid: vsi number 2269 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2270 * @cmd_details: pointer to command details structure or NULL 2271 **/ 2272 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2273 u16 seid, bool enable, 2274 struct i40e_asq_cmd_details *cmd_details) 2275 { 2276 struct i40e_aq_desc desc; 2277 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2278 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2279 i40e_status status; 2280 u16 flags = 0; 2281 2282 i40e_fill_default_direct_cmd_desc(&desc, 2283 i40e_aqc_opc_set_vsi_promiscuous_modes); 2284 if (enable) 2285 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2286 2287 cmd->promiscuous_flags = cpu_to_le16(flags); 2288 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2289 cmd->seid = cpu_to_le16(seid); 2290 2291 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2292 2293 return status; 2294 } 2295 2296 /** 2297 * i40e_get_vsi_params - get VSI configuration info 2298 * @hw: pointer to the hw struct 2299 * @vsi_ctx: pointer to a vsi context struct 2300 * @cmd_details: pointer to command details structure or NULL 2301 **/ 2302 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2303 struct i40e_vsi_context *vsi_ctx, 2304 struct i40e_asq_cmd_details *cmd_details) 2305 { 2306 struct i40e_aq_desc desc; 2307 struct i40e_aqc_add_get_update_vsi *cmd = 2308 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2309 struct i40e_aqc_add_get_update_vsi_completion *resp = 2310 (struct i40e_aqc_add_get_update_vsi_completion *) 2311 &desc.params.raw; 2312 i40e_status status; 2313 2314 i40e_fill_default_direct_cmd_desc(&desc, 2315 i40e_aqc_opc_get_vsi_parameters); 2316 2317 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2318 2319 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2320 2321 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2322 sizeof(vsi_ctx->info), NULL); 2323 2324 if (status) 2325 goto aq_get_vsi_params_exit; 2326 2327 vsi_ctx->seid = le16_to_cpu(resp->seid); 2328 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2329 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2330 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2331 2332 aq_get_vsi_params_exit: 2333 return status; 2334 } 2335 2336 /** 2337 * i40e_aq_update_vsi_params 2338 * @hw: pointer to the hw struct 2339 * @vsi_ctx: pointer to a vsi context struct 2340 * @cmd_details: pointer to command details structure or NULL 2341 * 2342 * Update a VSI context. 2343 **/ 2344 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2345 struct i40e_vsi_context *vsi_ctx, 2346 struct i40e_asq_cmd_details *cmd_details) 2347 { 2348 struct i40e_aq_desc desc; 2349 struct i40e_aqc_add_get_update_vsi *cmd = 2350 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2351 struct i40e_aqc_add_get_update_vsi_completion *resp = 2352 (struct i40e_aqc_add_get_update_vsi_completion *) 2353 &desc.params.raw; 2354 i40e_status status; 2355 2356 i40e_fill_default_direct_cmd_desc(&desc, 2357 i40e_aqc_opc_update_vsi_parameters); 2358 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2359 2360 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2361 2362 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2363 sizeof(vsi_ctx->info), cmd_details); 2364 2365 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2366 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2367 2368 return status; 2369 } 2370 2371 /** 2372 * i40e_aq_get_switch_config 2373 * @hw: pointer to the hardware structure 2374 * @buf: pointer to the result buffer 2375 * @buf_size: length of input buffer 2376 * @start_seid: seid to start for the report, 0 == beginning 2377 * @cmd_details: pointer to command details structure or NULL 2378 * 2379 * Fill the buf with switch configuration returned from AdminQ command 2380 **/ 2381 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2382 struct i40e_aqc_get_switch_config_resp *buf, 2383 u16 buf_size, u16 *start_seid, 2384 struct i40e_asq_cmd_details *cmd_details) 2385 { 2386 struct i40e_aq_desc desc; 2387 struct i40e_aqc_switch_seid *scfg = 2388 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2389 i40e_status status; 2390 2391 i40e_fill_default_direct_cmd_desc(&desc, 2392 i40e_aqc_opc_get_switch_config); 2393 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2394 if (buf_size > I40E_AQ_LARGE_BUF) 2395 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2396 scfg->seid = cpu_to_le16(*start_seid); 2397 2398 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2399 *start_seid = le16_to_cpu(scfg->seid); 2400 2401 return status; 2402 } 2403 2404 /** 2405 * i40e_aq_set_switch_config 2406 * @hw: pointer to the hardware structure 2407 * @flags: bit flag values to set 2408 * @mode: cloud filter mode 2409 * @valid_flags: which bit flags to set 2410 * @mode: cloud filter mode 2411 * @cmd_details: pointer to command details structure or NULL 2412 * 2413 * Set switch configuration bits 2414 **/ 2415 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2416 u16 flags, 2417 u16 valid_flags, u8 mode, 2418 struct i40e_asq_cmd_details *cmd_details) 2419 { 2420 struct i40e_aq_desc desc; 2421 struct i40e_aqc_set_switch_config *scfg = 2422 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2423 enum i40e_status_code status; 2424 2425 i40e_fill_default_direct_cmd_desc(&desc, 2426 i40e_aqc_opc_set_switch_config); 2427 scfg->flags = cpu_to_le16(flags); 2428 scfg->valid_flags = cpu_to_le16(valid_flags); 2429 scfg->mode = mode; 2430 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2431 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2432 scfg->first_tag = cpu_to_le16(hw->first_tag); 2433 scfg->second_tag = cpu_to_le16(hw->second_tag); 2434 } 2435 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2436 2437 return status; 2438 } 2439 2440 /** 2441 * i40e_aq_get_firmware_version 2442 * @hw: pointer to the hw struct 2443 * @fw_major_version: firmware major version 2444 * @fw_minor_version: firmware minor version 2445 * @fw_build: firmware build number 2446 * @api_major_version: major queue version 2447 * @api_minor_version: minor queue version 2448 * @cmd_details: pointer to command details structure or NULL 2449 * 2450 * Get the firmware version from the admin queue commands 2451 **/ 2452 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2453 u16 *fw_major_version, u16 *fw_minor_version, 2454 u32 *fw_build, 2455 u16 *api_major_version, u16 *api_minor_version, 2456 struct i40e_asq_cmd_details *cmd_details) 2457 { 2458 struct i40e_aq_desc desc; 2459 struct i40e_aqc_get_version *resp = 2460 (struct i40e_aqc_get_version *)&desc.params.raw; 2461 i40e_status status; 2462 2463 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2464 2465 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2466 2467 if (!status) { 2468 if (fw_major_version) 2469 *fw_major_version = le16_to_cpu(resp->fw_major); 2470 if (fw_minor_version) 2471 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2472 if (fw_build) 2473 *fw_build = le32_to_cpu(resp->fw_build); 2474 if (api_major_version) 2475 *api_major_version = le16_to_cpu(resp->api_major); 2476 if (api_minor_version) 2477 *api_minor_version = le16_to_cpu(resp->api_minor); 2478 } 2479 2480 return status; 2481 } 2482 2483 /** 2484 * i40e_aq_send_driver_version 2485 * @hw: pointer to the hw struct 2486 * @dv: driver's major, minor version 2487 * @cmd_details: pointer to command details structure or NULL 2488 * 2489 * Send the driver version to the firmware 2490 **/ 2491 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2492 struct i40e_driver_version *dv, 2493 struct i40e_asq_cmd_details *cmd_details) 2494 { 2495 struct i40e_aq_desc desc; 2496 struct i40e_aqc_driver_version *cmd = 2497 (struct i40e_aqc_driver_version *)&desc.params.raw; 2498 i40e_status status; 2499 u16 len; 2500 2501 if (dv == NULL) 2502 return I40E_ERR_PARAM; 2503 2504 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2505 2506 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2507 cmd->driver_major_ver = dv->major_version; 2508 cmd->driver_minor_ver = dv->minor_version; 2509 cmd->driver_build_ver = dv->build_version; 2510 cmd->driver_subbuild_ver = dv->subbuild_version; 2511 2512 len = 0; 2513 while (len < sizeof(dv->driver_string) && 2514 (dv->driver_string[len] < 0x80) && 2515 dv->driver_string[len]) 2516 len++; 2517 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2518 len, cmd_details); 2519 2520 return status; 2521 } 2522 2523 /** 2524 * i40e_get_link_status - get status of the HW network link 2525 * @hw: pointer to the hw struct 2526 * @link_up: pointer to bool (true/false = linkup/linkdown) 2527 * 2528 * Variable link_up true if link is up, false if link is down. 2529 * The variable link_up is invalid if returned value of status != 0 2530 * 2531 * Side effect: LinkStatusEvent reporting becomes enabled 2532 **/ 2533 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2534 { 2535 i40e_status status = 0; 2536 2537 if (hw->phy.get_link_info) { 2538 status = i40e_update_link_info(hw); 2539 2540 if (status) 2541 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2542 status); 2543 } 2544 2545 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2546 2547 return status; 2548 } 2549 2550 /** 2551 * i40e_updatelink_status - update status of the HW network link 2552 * @hw: pointer to the hw struct 2553 **/ 2554 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2555 { 2556 struct i40e_aq_get_phy_abilities_resp abilities; 2557 i40e_status status = 0; 2558 2559 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2560 if (status) 2561 return status; 2562 2563 /* extra checking needed to ensure link info to user is timely */ 2564 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2565 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2566 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2567 status = i40e_aq_get_phy_capabilities(hw, false, false, 2568 &abilities, NULL); 2569 if (status) 2570 return status; 2571 2572 hw->phy.link_info.req_fec_info = 2573 abilities.fec_cfg_curr_mod_ext_info & 2574 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); 2575 2576 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2577 sizeof(hw->phy.link_info.module_type)); 2578 } 2579 2580 return status; 2581 } 2582 2583 /** 2584 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2585 * @hw: pointer to the hw struct 2586 * @uplink_seid: the MAC or other gizmo SEID 2587 * @downlink_seid: the VSI SEID 2588 * @enabled_tc: bitmap of TCs to be enabled 2589 * @default_port: true for default port VSI, false for control port 2590 * @veb_seid: pointer to where to put the resulting VEB SEID 2591 * @enable_stats: true to turn on VEB stats 2592 * @cmd_details: pointer to command details structure or NULL 2593 * 2594 * This asks the FW to add a VEB between the uplink and downlink 2595 * elements. If the uplink SEID is 0, this will be a floating VEB. 2596 **/ 2597 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2598 u16 downlink_seid, u8 enabled_tc, 2599 bool default_port, u16 *veb_seid, 2600 bool enable_stats, 2601 struct i40e_asq_cmd_details *cmd_details) 2602 { 2603 struct i40e_aq_desc desc; 2604 struct i40e_aqc_add_veb *cmd = 2605 (struct i40e_aqc_add_veb *)&desc.params.raw; 2606 struct i40e_aqc_add_veb_completion *resp = 2607 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2608 i40e_status status; 2609 u16 veb_flags = 0; 2610 2611 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2612 if (!!uplink_seid != !!downlink_seid) 2613 return I40E_ERR_PARAM; 2614 2615 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2616 2617 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2618 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2619 cmd->enable_tcs = enabled_tc; 2620 if (!uplink_seid) 2621 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2622 if (default_port) 2623 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2624 else 2625 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2626 2627 /* reverse logic here: set the bitflag to disable the stats */ 2628 if (!enable_stats) 2629 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2630 2631 cmd->veb_flags = cpu_to_le16(veb_flags); 2632 2633 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2634 2635 if (!status && veb_seid) 2636 *veb_seid = le16_to_cpu(resp->veb_seid); 2637 2638 return status; 2639 } 2640 2641 /** 2642 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2643 * @hw: pointer to the hw struct 2644 * @veb_seid: the SEID of the VEB to query 2645 * @switch_id: the uplink switch id 2646 * @floating: set to true if the VEB is floating 2647 * @statistic_index: index of the stats counter block for this VEB 2648 * @vebs_used: number of VEB's used by function 2649 * @vebs_free: total VEB's not reserved by any function 2650 * @cmd_details: pointer to command details structure or NULL 2651 * 2652 * This retrieves the parameters for a particular VEB, specified by 2653 * uplink_seid, and returns them to the caller. 2654 **/ 2655 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2656 u16 veb_seid, u16 *switch_id, 2657 bool *floating, u16 *statistic_index, 2658 u16 *vebs_used, u16 *vebs_free, 2659 struct i40e_asq_cmd_details *cmd_details) 2660 { 2661 struct i40e_aq_desc desc; 2662 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2663 (struct i40e_aqc_get_veb_parameters_completion *) 2664 &desc.params.raw; 2665 i40e_status status; 2666 2667 if (veb_seid == 0) 2668 return I40E_ERR_PARAM; 2669 2670 i40e_fill_default_direct_cmd_desc(&desc, 2671 i40e_aqc_opc_get_veb_parameters); 2672 cmd_resp->seid = cpu_to_le16(veb_seid); 2673 2674 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2675 if (status) 2676 goto get_veb_exit; 2677 2678 if (switch_id) 2679 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2680 if (statistic_index) 2681 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2682 if (vebs_used) 2683 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2684 if (vebs_free) 2685 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2686 if (floating) { 2687 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2688 2689 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2690 *floating = true; 2691 else 2692 *floating = false; 2693 } 2694 2695 get_veb_exit: 2696 return status; 2697 } 2698 2699 /** 2700 * i40e_aq_add_macvlan 2701 * @hw: pointer to the hw struct 2702 * @seid: VSI for the mac address 2703 * @mv_list: list of macvlans to be added 2704 * @count: length of the list 2705 * @cmd_details: pointer to command details structure or NULL 2706 * 2707 * Add MAC/VLAN addresses to the HW filtering 2708 **/ 2709 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2710 struct i40e_aqc_add_macvlan_element_data *mv_list, 2711 u16 count, struct i40e_asq_cmd_details *cmd_details) 2712 { 2713 struct i40e_aq_desc desc; 2714 struct i40e_aqc_macvlan *cmd = 2715 (struct i40e_aqc_macvlan *)&desc.params.raw; 2716 i40e_status status; 2717 u16 buf_size; 2718 int i; 2719 2720 if (count == 0 || !mv_list || !hw) 2721 return I40E_ERR_PARAM; 2722 2723 buf_size = count * sizeof(*mv_list); 2724 2725 /* prep the rest of the request */ 2726 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2727 cmd->num_addresses = cpu_to_le16(count); 2728 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2729 cmd->seid[1] = 0; 2730 cmd->seid[2] = 0; 2731 2732 for (i = 0; i < count; i++) 2733 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2734 mv_list[i].flags |= 2735 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2736 2737 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2738 if (buf_size > I40E_AQ_LARGE_BUF) 2739 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2740 2741 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2742 cmd_details); 2743 2744 return status; 2745 } 2746 2747 /** 2748 * i40e_aq_remove_macvlan 2749 * @hw: pointer to the hw struct 2750 * @seid: VSI for the mac address 2751 * @mv_list: list of macvlans to be removed 2752 * @count: length of the list 2753 * @cmd_details: pointer to command details structure or NULL 2754 * 2755 * Remove MAC/VLAN addresses from the HW filtering 2756 **/ 2757 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2758 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2759 u16 count, struct i40e_asq_cmd_details *cmd_details) 2760 { 2761 struct i40e_aq_desc desc; 2762 struct i40e_aqc_macvlan *cmd = 2763 (struct i40e_aqc_macvlan *)&desc.params.raw; 2764 i40e_status status; 2765 u16 buf_size; 2766 2767 if (count == 0 || !mv_list || !hw) 2768 return I40E_ERR_PARAM; 2769 2770 buf_size = count * sizeof(*mv_list); 2771 2772 /* prep the rest of the request */ 2773 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2774 cmd->num_addresses = cpu_to_le16(count); 2775 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2776 cmd->seid[1] = 0; 2777 cmd->seid[2] = 0; 2778 2779 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2780 if (buf_size > I40E_AQ_LARGE_BUF) 2781 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2782 2783 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2784 cmd_details); 2785 2786 return status; 2787 } 2788 2789 /** 2790 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2791 * @hw: pointer to the hw struct 2792 * @opcode: AQ opcode for add or delete mirror rule 2793 * @sw_seid: Switch SEID (to which rule refers) 2794 * @rule_type: Rule Type (ingress/egress/VLAN) 2795 * @id: Destination VSI SEID or Rule ID 2796 * @count: length of the list 2797 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2798 * @cmd_details: pointer to command details structure or NULL 2799 * @rule_id: Rule ID returned from FW 2800 * @rules_used: Number of rules used in internal switch 2801 * @rules_free: Number of rules free in internal switch 2802 * 2803 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2804 * VEBs/VEPA elements only 2805 **/ 2806 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2807 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2808 u16 count, __le16 *mr_list, 2809 struct i40e_asq_cmd_details *cmd_details, 2810 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2811 { 2812 struct i40e_aq_desc desc; 2813 struct i40e_aqc_add_delete_mirror_rule *cmd = 2814 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2815 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2816 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2817 i40e_status status; 2818 u16 buf_size; 2819 2820 buf_size = count * sizeof(*mr_list); 2821 2822 /* prep the rest of the request */ 2823 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2824 cmd->seid = cpu_to_le16(sw_seid); 2825 cmd->rule_type = cpu_to_le16(rule_type & 2826 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2827 cmd->num_entries = cpu_to_le16(count); 2828 /* Dest VSI for add, rule_id for delete */ 2829 cmd->destination = cpu_to_le16(id); 2830 if (mr_list) { 2831 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2832 I40E_AQ_FLAG_RD)); 2833 if (buf_size > I40E_AQ_LARGE_BUF) 2834 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2835 } 2836 2837 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2838 cmd_details); 2839 if (!status || 2840 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2841 if (rule_id) 2842 *rule_id = le16_to_cpu(resp->rule_id); 2843 if (rules_used) 2844 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2845 if (rules_free) 2846 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2847 } 2848 return status; 2849 } 2850 2851 /** 2852 * i40e_aq_add_mirrorrule - add a mirror rule 2853 * @hw: pointer to the hw struct 2854 * @sw_seid: Switch SEID (to which rule refers) 2855 * @rule_type: Rule Type (ingress/egress/VLAN) 2856 * @dest_vsi: SEID of VSI to which packets will be mirrored 2857 * @count: length of the list 2858 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2859 * @cmd_details: pointer to command details structure or NULL 2860 * @rule_id: Rule ID returned from FW 2861 * @rules_used: Number of rules used in internal switch 2862 * @rules_free: Number of rules free in internal switch 2863 * 2864 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2865 **/ 2866 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2867 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2868 struct i40e_asq_cmd_details *cmd_details, 2869 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2870 { 2871 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2872 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2873 if (count == 0 || !mr_list) 2874 return I40E_ERR_PARAM; 2875 } 2876 2877 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2878 rule_type, dest_vsi, count, mr_list, 2879 cmd_details, rule_id, rules_used, rules_free); 2880 } 2881 2882 /** 2883 * i40e_aq_delete_mirrorrule - delete a mirror rule 2884 * @hw: pointer to the hw struct 2885 * @sw_seid: Switch SEID (to which rule refers) 2886 * @rule_type: Rule Type (ingress/egress/VLAN) 2887 * @count: length of the list 2888 * @rule_id: Rule ID that is returned in the receive desc as part of 2889 * add_mirrorrule. 2890 * @mr_list: list of mirrored VLAN IDs to be removed 2891 * @cmd_details: pointer to command details structure or NULL 2892 * @rules_used: Number of rules used in internal switch 2893 * @rules_free: Number of rules free in internal switch 2894 * 2895 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2896 **/ 2897 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2898 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2899 struct i40e_asq_cmd_details *cmd_details, 2900 u16 *rules_used, u16 *rules_free) 2901 { 2902 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2903 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2904 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2905 * mirroring. For other rule_type, count and rule_type should 2906 * not matter. 2907 */ 2908 if (count == 0 || !mr_list) 2909 return I40E_ERR_PARAM; 2910 } 2911 2912 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2913 rule_type, rule_id, count, mr_list, 2914 cmd_details, NULL, rules_used, rules_free); 2915 } 2916 2917 /** 2918 * i40e_aq_send_msg_to_vf 2919 * @hw: pointer to the hardware structure 2920 * @vfid: VF id to send msg 2921 * @v_opcode: opcodes for VF-PF communication 2922 * @v_retval: return error code 2923 * @msg: pointer to the msg buffer 2924 * @msglen: msg length 2925 * @cmd_details: pointer to command details 2926 * 2927 * send msg to vf 2928 **/ 2929 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2930 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2931 struct i40e_asq_cmd_details *cmd_details) 2932 { 2933 struct i40e_aq_desc desc; 2934 struct i40e_aqc_pf_vf_message *cmd = 2935 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2936 i40e_status status; 2937 2938 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2939 cmd->id = cpu_to_le32(vfid); 2940 desc.cookie_high = cpu_to_le32(v_opcode); 2941 desc.cookie_low = cpu_to_le32(v_retval); 2942 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2943 if (msglen) { 2944 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2945 I40E_AQ_FLAG_RD)); 2946 if (msglen > I40E_AQ_LARGE_BUF) 2947 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2948 desc.datalen = cpu_to_le16(msglen); 2949 } 2950 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2951 2952 return status; 2953 } 2954 2955 /** 2956 * i40e_aq_debug_read_register 2957 * @hw: pointer to the hw struct 2958 * @reg_addr: register address 2959 * @reg_val: register value 2960 * @cmd_details: pointer to command details structure or NULL 2961 * 2962 * Read the register using the admin queue commands 2963 **/ 2964 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 2965 u32 reg_addr, u64 *reg_val, 2966 struct i40e_asq_cmd_details *cmd_details) 2967 { 2968 struct i40e_aq_desc desc; 2969 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2970 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2971 i40e_status status; 2972 2973 if (reg_val == NULL) 2974 return I40E_ERR_PARAM; 2975 2976 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2977 2978 cmd_resp->address = cpu_to_le32(reg_addr); 2979 2980 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2981 2982 if (!status) { 2983 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2984 (u64)le32_to_cpu(cmd_resp->value_low); 2985 } 2986 2987 return status; 2988 } 2989 2990 /** 2991 * i40e_aq_debug_write_register 2992 * @hw: pointer to the hw struct 2993 * @reg_addr: register address 2994 * @reg_val: register value 2995 * @cmd_details: pointer to command details structure or NULL 2996 * 2997 * Write to a register using the admin queue commands 2998 **/ 2999 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3000 u32 reg_addr, u64 reg_val, 3001 struct i40e_asq_cmd_details *cmd_details) 3002 { 3003 struct i40e_aq_desc desc; 3004 struct i40e_aqc_debug_reg_read_write *cmd = 3005 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3006 i40e_status status; 3007 3008 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3009 3010 cmd->address = cpu_to_le32(reg_addr); 3011 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3012 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3013 3014 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3015 3016 return status; 3017 } 3018 3019 /** 3020 * i40e_aq_request_resource 3021 * @hw: pointer to the hw struct 3022 * @resource: resource id 3023 * @access: access type 3024 * @sdp_number: resource number 3025 * @timeout: the maximum time in ms that the driver may hold the resource 3026 * @cmd_details: pointer to command details structure or NULL 3027 * 3028 * requests common resource using the admin queue commands 3029 **/ 3030 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3031 enum i40e_aq_resources_ids resource, 3032 enum i40e_aq_resource_access_type access, 3033 u8 sdp_number, u64 *timeout, 3034 struct i40e_asq_cmd_details *cmd_details) 3035 { 3036 struct i40e_aq_desc desc; 3037 struct i40e_aqc_request_resource *cmd_resp = 3038 (struct i40e_aqc_request_resource *)&desc.params.raw; 3039 i40e_status status; 3040 3041 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3042 3043 cmd_resp->resource_id = cpu_to_le16(resource); 3044 cmd_resp->access_type = cpu_to_le16(access); 3045 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3046 3047 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3048 /* The completion specifies the maximum time in ms that the driver 3049 * may hold the resource in the Timeout field. 3050 * If the resource is held by someone else, the command completes with 3051 * busy return value and the timeout field indicates the maximum time 3052 * the current owner of the resource has to free it. 3053 */ 3054 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3055 *timeout = le32_to_cpu(cmd_resp->timeout); 3056 3057 return status; 3058 } 3059 3060 /** 3061 * i40e_aq_release_resource 3062 * @hw: pointer to the hw struct 3063 * @resource: resource id 3064 * @sdp_number: resource number 3065 * @cmd_details: pointer to command details structure or NULL 3066 * 3067 * release common resource using the admin queue commands 3068 **/ 3069 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3070 enum i40e_aq_resources_ids resource, 3071 u8 sdp_number, 3072 struct i40e_asq_cmd_details *cmd_details) 3073 { 3074 struct i40e_aq_desc desc; 3075 struct i40e_aqc_request_resource *cmd = 3076 (struct i40e_aqc_request_resource *)&desc.params.raw; 3077 i40e_status status; 3078 3079 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3080 3081 cmd->resource_id = cpu_to_le16(resource); 3082 cmd->resource_number = cpu_to_le32(sdp_number); 3083 3084 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3085 3086 return status; 3087 } 3088 3089 /** 3090 * i40e_aq_read_nvm 3091 * @hw: pointer to the hw struct 3092 * @module_pointer: module pointer location in words from the NVM beginning 3093 * @offset: byte offset from the module beginning 3094 * @length: length of the section to be read (in bytes from the offset) 3095 * @data: command buffer (size [bytes] = length) 3096 * @last_command: tells if this is the last command in a series 3097 * @cmd_details: pointer to command details structure or NULL 3098 * 3099 * Read the NVM using the admin queue commands 3100 **/ 3101 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3102 u32 offset, u16 length, void *data, 3103 bool last_command, 3104 struct i40e_asq_cmd_details *cmd_details) 3105 { 3106 struct i40e_aq_desc desc; 3107 struct i40e_aqc_nvm_update *cmd = 3108 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3109 i40e_status status; 3110 3111 /* In offset the highest byte must be zeroed. */ 3112 if (offset & 0xFF000000) { 3113 status = I40E_ERR_PARAM; 3114 goto i40e_aq_read_nvm_exit; 3115 } 3116 3117 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3118 3119 /* If this is the last command in a series, set the proper flag. */ 3120 if (last_command) 3121 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3122 cmd->module_pointer = module_pointer; 3123 cmd->offset = cpu_to_le32(offset); 3124 cmd->length = cpu_to_le16(length); 3125 3126 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3127 if (length > I40E_AQ_LARGE_BUF) 3128 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3129 3130 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3131 3132 i40e_aq_read_nvm_exit: 3133 return status; 3134 } 3135 3136 /** 3137 * i40e_aq_erase_nvm 3138 * @hw: pointer to the hw struct 3139 * @module_pointer: module pointer location in words from the NVM beginning 3140 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3141 * @length: length of the section to be erased (expressed in 4 KB) 3142 * @last_command: tells if this is the last command in a series 3143 * @cmd_details: pointer to command details structure or NULL 3144 * 3145 * Erase the NVM sector using the admin queue commands 3146 **/ 3147 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3148 u32 offset, u16 length, bool last_command, 3149 struct i40e_asq_cmd_details *cmd_details) 3150 { 3151 struct i40e_aq_desc desc; 3152 struct i40e_aqc_nvm_update *cmd = 3153 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3154 i40e_status status; 3155 3156 /* In offset the highest byte must be zeroed. */ 3157 if (offset & 0xFF000000) { 3158 status = I40E_ERR_PARAM; 3159 goto i40e_aq_erase_nvm_exit; 3160 } 3161 3162 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3163 3164 /* If this is the last command in a series, set the proper flag. */ 3165 if (last_command) 3166 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3167 cmd->module_pointer = module_pointer; 3168 cmd->offset = cpu_to_le32(offset); 3169 cmd->length = cpu_to_le16(length); 3170 3171 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3172 3173 i40e_aq_erase_nvm_exit: 3174 return status; 3175 } 3176 3177 /** 3178 * i40e_parse_discover_capabilities 3179 * @hw: pointer to the hw struct 3180 * @buff: pointer to a buffer containing device/function capability records 3181 * @cap_count: number of capability records in the list 3182 * @list_type_opc: type of capabilities list to parse 3183 * 3184 * Parse the device/function capabilities list. 3185 **/ 3186 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3187 u32 cap_count, 3188 enum i40e_admin_queue_opc list_type_opc) 3189 { 3190 struct i40e_aqc_list_capabilities_element_resp *cap; 3191 u32 valid_functions, num_functions; 3192 u32 number, logical_id, phys_id; 3193 struct i40e_hw_capabilities *p; 3194 u16 id, ocp_cfg_word0; 3195 i40e_status status; 3196 u8 major_rev; 3197 u32 i = 0; 3198 3199 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3200 3201 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3202 p = &hw->dev_caps; 3203 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3204 p = &hw->func_caps; 3205 else 3206 return; 3207 3208 for (i = 0; i < cap_count; i++, cap++) { 3209 id = le16_to_cpu(cap->id); 3210 number = le32_to_cpu(cap->number); 3211 logical_id = le32_to_cpu(cap->logical_id); 3212 phys_id = le32_to_cpu(cap->phys_id); 3213 major_rev = cap->major_rev; 3214 3215 switch (id) { 3216 case I40E_AQ_CAP_ID_SWITCH_MODE: 3217 p->switch_mode = number; 3218 break; 3219 case I40E_AQ_CAP_ID_MNG_MODE: 3220 p->management_mode = number; 3221 if (major_rev > 1) { 3222 p->mng_protocols_over_mctp = logical_id; 3223 i40e_debug(hw, I40E_DEBUG_INIT, 3224 "HW Capability: Protocols over MCTP = %d\n", 3225 p->mng_protocols_over_mctp); 3226 } else { 3227 p->mng_protocols_over_mctp = 0; 3228 } 3229 break; 3230 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3231 p->npar_enable = number; 3232 break; 3233 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3234 p->os2bmc = number; 3235 break; 3236 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3237 p->valid_functions = number; 3238 break; 3239 case I40E_AQ_CAP_ID_SRIOV: 3240 if (number == 1) 3241 p->sr_iov_1_1 = true; 3242 break; 3243 case I40E_AQ_CAP_ID_VF: 3244 p->num_vfs = number; 3245 p->vf_base_id = logical_id; 3246 break; 3247 case I40E_AQ_CAP_ID_VMDQ: 3248 if (number == 1) 3249 p->vmdq = true; 3250 break; 3251 case I40E_AQ_CAP_ID_8021QBG: 3252 if (number == 1) 3253 p->evb_802_1_qbg = true; 3254 break; 3255 case I40E_AQ_CAP_ID_8021QBR: 3256 if (number == 1) 3257 p->evb_802_1_qbh = true; 3258 break; 3259 case I40E_AQ_CAP_ID_VSI: 3260 p->num_vsis = number; 3261 break; 3262 case I40E_AQ_CAP_ID_DCB: 3263 if (number == 1) { 3264 p->dcb = true; 3265 p->enabled_tcmap = logical_id; 3266 p->maxtc = phys_id; 3267 } 3268 break; 3269 case I40E_AQ_CAP_ID_FCOE: 3270 if (number == 1) 3271 p->fcoe = true; 3272 break; 3273 case I40E_AQ_CAP_ID_ISCSI: 3274 if (number == 1) 3275 p->iscsi = true; 3276 break; 3277 case I40E_AQ_CAP_ID_RSS: 3278 p->rss = true; 3279 p->rss_table_size = number; 3280 p->rss_table_entry_width = logical_id; 3281 break; 3282 case I40E_AQ_CAP_ID_RXQ: 3283 p->num_rx_qp = number; 3284 p->base_queue = phys_id; 3285 break; 3286 case I40E_AQ_CAP_ID_TXQ: 3287 p->num_tx_qp = number; 3288 p->base_queue = phys_id; 3289 break; 3290 case I40E_AQ_CAP_ID_MSIX: 3291 p->num_msix_vectors = number; 3292 i40e_debug(hw, I40E_DEBUG_INIT, 3293 "HW Capability: MSIX vector count = %d\n", 3294 p->num_msix_vectors); 3295 break; 3296 case I40E_AQ_CAP_ID_VF_MSIX: 3297 p->num_msix_vectors_vf = number; 3298 break; 3299 case I40E_AQ_CAP_ID_FLEX10: 3300 if (major_rev == 1) { 3301 if (number == 1) { 3302 p->flex10_enable = true; 3303 p->flex10_capable = true; 3304 } 3305 } else { 3306 /* Capability revision >= 2 */ 3307 if (number & 1) 3308 p->flex10_enable = true; 3309 if (number & 2) 3310 p->flex10_capable = true; 3311 } 3312 p->flex10_mode = logical_id; 3313 p->flex10_status = phys_id; 3314 break; 3315 case I40E_AQ_CAP_ID_CEM: 3316 if (number == 1) 3317 p->mgmt_cem = true; 3318 break; 3319 case I40E_AQ_CAP_ID_IWARP: 3320 if (number == 1) 3321 p->iwarp = true; 3322 break; 3323 case I40E_AQ_CAP_ID_LED: 3324 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3325 p->led[phys_id] = true; 3326 break; 3327 case I40E_AQ_CAP_ID_SDP: 3328 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3329 p->sdp[phys_id] = true; 3330 break; 3331 case I40E_AQ_CAP_ID_MDIO: 3332 if (number == 1) { 3333 p->mdio_port_num = phys_id; 3334 p->mdio_port_mode = logical_id; 3335 } 3336 break; 3337 case I40E_AQ_CAP_ID_1588: 3338 if (number == 1) 3339 p->ieee_1588 = true; 3340 break; 3341 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3342 p->fd = true; 3343 p->fd_filters_guaranteed = number; 3344 p->fd_filters_best_effort = logical_id; 3345 break; 3346 case I40E_AQ_CAP_ID_WSR_PROT: 3347 p->wr_csr_prot = (u64)number; 3348 p->wr_csr_prot |= (u64)logical_id << 32; 3349 break; 3350 case I40E_AQ_CAP_ID_NVM_MGMT: 3351 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3352 p->sec_rev_disabled = true; 3353 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3354 p->update_disabled = true; 3355 break; 3356 default: 3357 break; 3358 } 3359 } 3360 3361 if (p->fcoe) 3362 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3363 3364 /* Software override ensuring FCoE is disabled if npar or mfp 3365 * mode because it is not supported in these modes. 3366 */ 3367 if (p->npar_enable || p->flex10_enable) 3368 p->fcoe = false; 3369 3370 /* count the enabled ports (aka the "not disabled" ports) */ 3371 hw->num_ports = 0; 3372 for (i = 0; i < 4; i++) { 3373 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3374 u64 port_cfg = 0; 3375 3376 /* use AQ read to get the physical register offset instead 3377 * of the port relative offset 3378 */ 3379 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3380 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3381 hw->num_ports++; 3382 } 3383 3384 /* OCP cards case: if a mezz is removed the Ethernet port is at 3385 * disabled state in PRTGEN_CNF register. Additional NVM read is 3386 * needed in order to check if we are dealing with OCP card. 3387 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3388 * physical ports results in wrong partition id calculation and thus 3389 * not supporting WoL. 3390 */ 3391 if (hw->mac.type == I40E_MAC_X722) { 3392 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3393 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3394 2 * I40E_SR_OCP_CFG_WORD0, 3395 sizeof(ocp_cfg_word0), 3396 &ocp_cfg_word0, true, NULL); 3397 if (!status && 3398 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3399 hw->num_ports = 4; 3400 i40e_release_nvm(hw); 3401 } 3402 } 3403 3404 valid_functions = p->valid_functions; 3405 num_functions = 0; 3406 while (valid_functions) { 3407 if (valid_functions & 1) 3408 num_functions++; 3409 valid_functions >>= 1; 3410 } 3411 3412 /* partition id is 1-based, and functions are evenly spread 3413 * across the ports as partitions 3414 */ 3415 if (hw->num_ports != 0) { 3416 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3417 hw->num_partitions = num_functions / hw->num_ports; 3418 } 3419 3420 /* additional HW specific goodies that might 3421 * someday be HW version specific 3422 */ 3423 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3424 } 3425 3426 /** 3427 * i40e_aq_discover_capabilities 3428 * @hw: pointer to the hw struct 3429 * @buff: a virtual buffer to hold the capabilities 3430 * @buff_size: Size of the virtual buffer 3431 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3432 * @list_type_opc: capabilities type to discover - pass in the command opcode 3433 * @cmd_details: pointer to command details structure or NULL 3434 * 3435 * Get the device capabilities descriptions from the firmware 3436 **/ 3437 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3438 void *buff, u16 buff_size, u16 *data_size, 3439 enum i40e_admin_queue_opc list_type_opc, 3440 struct i40e_asq_cmd_details *cmd_details) 3441 { 3442 struct i40e_aqc_list_capabilites *cmd; 3443 struct i40e_aq_desc desc; 3444 i40e_status status = 0; 3445 3446 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3447 3448 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3449 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3450 status = I40E_ERR_PARAM; 3451 goto exit; 3452 } 3453 3454 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3455 3456 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3457 if (buff_size > I40E_AQ_LARGE_BUF) 3458 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3459 3460 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3461 *data_size = le16_to_cpu(desc.datalen); 3462 3463 if (status) 3464 goto exit; 3465 3466 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3467 list_type_opc); 3468 3469 exit: 3470 return status; 3471 } 3472 3473 /** 3474 * i40e_aq_update_nvm 3475 * @hw: pointer to the hw struct 3476 * @module_pointer: module pointer location in words from the NVM beginning 3477 * @offset: byte offset from the module beginning 3478 * @length: length of the section to be written (in bytes from the offset) 3479 * @data: command buffer (size [bytes] = length) 3480 * @last_command: tells if this is the last command in a series 3481 * @preservation_flags: Preservation mode flags 3482 * @cmd_details: pointer to command details structure or NULL 3483 * 3484 * Update the NVM using the admin queue commands 3485 **/ 3486 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3487 u32 offset, u16 length, void *data, 3488 bool last_command, u8 preservation_flags, 3489 struct i40e_asq_cmd_details *cmd_details) 3490 { 3491 struct i40e_aq_desc desc; 3492 struct i40e_aqc_nvm_update *cmd = 3493 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3494 i40e_status status; 3495 3496 /* In offset the highest byte must be zeroed. */ 3497 if (offset & 0xFF000000) { 3498 status = I40E_ERR_PARAM; 3499 goto i40e_aq_update_nvm_exit; 3500 } 3501 3502 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3503 3504 /* If this is the last command in a series, set the proper flag. */ 3505 if (last_command) 3506 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3507 if (hw->mac.type == I40E_MAC_X722) { 3508 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3509 cmd->command_flags |= 3510 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3511 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3512 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3513 cmd->command_flags |= 3514 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3515 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3516 } 3517 cmd->module_pointer = module_pointer; 3518 cmd->offset = cpu_to_le32(offset); 3519 cmd->length = cpu_to_le16(length); 3520 3521 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3522 if (length > I40E_AQ_LARGE_BUF) 3523 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3524 3525 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3526 3527 i40e_aq_update_nvm_exit: 3528 return status; 3529 } 3530 3531 /** 3532 * i40e_aq_rearrange_nvm 3533 * @hw: pointer to the hw struct 3534 * @rearrange_nvm: defines direction of rearrangement 3535 * @cmd_details: pointer to command details structure or NULL 3536 * 3537 * Rearrange NVM structure, available only for transition FW 3538 **/ 3539 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3540 u8 rearrange_nvm, 3541 struct i40e_asq_cmd_details *cmd_details) 3542 { 3543 struct i40e_aqc_nvm_update *cmd; 3544 i40e_status status; 3545 struct i40e_aq_desc desc; 3546 3547 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3548 3549 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3550 3551 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3552 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3553 3554 if (!rearrange_nvm) { 3555 status = I40E_ERR_PARAM; 3556 goto i40e_aq_rearrange_nvm_exit; 3557 } 3558 3559 cmd->command_flags |= rearrange_nvm; 3560 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3561 3562 i40e_aq_rearrange_nvm_exit: 3563 return status; 3564 } 3565 3566 /** 3567 * i40e_aq_get_lldp_mib 3568 * @hw: pointer to the hw struct 3569 * @bridge_type: type of bridge requested 3570 * @mib_type: Local, Remote or both Local and Remote MIBs 3571 * @buff: pointer to a user supplied buffer to store the MIB block 3572 * @buff_size: size of the buffer (in bytes) 3573 * @local_len : length of the returned Local LLDP MIB 3574 * @remote_len: length of the returned Remote LLDP MIB 3575 * @cmd_details: pointer to command details structure or NULL 3576 * 3577 * Requests the complete LLDP MIB (entire packet). 3578 **/ 3579 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3580 u8 mib_type, void *buff, u16 buff_size, 3581 u16 *local_len, u16 *remote_len, 3582 struct i40e_asq_cmd_details *cmd_details) 3583 { 3584 struct i40e_aq_desc desc; 3585 struct i40e_aqc_lldp_get_mib *cmd = 3586 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3587 struct i40e_aqc_lldp_get_mib *resp = 3588 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3589 i40e_status status; 3590 3591 if (buff_size == 0 || !buff) 3592 return I40E_ERR_PARAM; 3593 3594 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3595 /* Indirect Command */ 3596 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3597 3598 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3599 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3600 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3601 3602 desc.datalen = cpu_to_le16(buff_size); 3603 3604 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3605 if (buff_size > I40E_AQ_LARGE_BUF) 3606 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3607 3608 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3609 if (!status) { 3610 if (local_len != NULL) 3611 *local_len = le16_to_cpu(resp->local_len); 3612 if (remote_len != NULL) 3613 *remote_len = le16_to_cpu(resp->remote_len); 3614 } 3615 3616 return status; 3617 } 3618 3619 /** 3620 * i40e_aq_cfg_lldp_mib_change_event 3621 * @hw: pointer to the hw struct 3622 * @enable_update: Enable or Disable event posting 3623 * @cmd_details: pointer to command details structure or NULL 3624 * 3625 * Enable or Disable posting of an event on ARQ when LLDP MIB 3626 * associated with the interface changes 3627 **/ 3628 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3629 bool enable_update, 3630 struct i40e_asq_cmd_details *cmd_details) 3631 { 3632 struct i40e_aq_desc desc; 3633 struct i40e_aqc_lldp_update_mib *cmd = 3634 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3635 i40e_status status; 3636 3637 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3638 3639 if (!enable_update) 3640 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3641 3642 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3643 3644 return status; 3645 } 3646 3647 /** 3648 * i40e_aq_restore_lldp 3649 * @hw: pointer to the hw struct 3650 * @setting: pointer to factory setting variable or NULL 3651 * @restore: True if factory settings should be restored 3652 * @cmd_details: pointer to command details structure or NULL 3653 * 3654 * Restore LLDP Agent factory settings if @restore set to True. In other case 3655 * only returns factory setting in AQ response. 3656 **/ 3657 enum i40e_status_code 3658 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3659 struct i40e_asq_cmd_details *cmd_details) 3660 { 3661 struct i40e_aq_desc desc; 3662 struct i40e_aqc_lldp_restore *cmd = 3663 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3664 i40e_status status; 3665 3666 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3667 i40e_debug(hw, I40E_DEBUG_ALL, 3668 "Restore LLDP not supported by current FW version.\n"); 3669 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3670 } 3671 3672 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3673 3674 if (restore) 3675 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3676 3677 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3678 3679 if (setting) 3680 *setting = cmd->command & 1; 3681 3682 return status; 3683 } 3684 3685 /** 3686 * i40e_aq_stop_lldp 3687 * @hw: pointer to the hw struct 3688 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3689 * @persist: True if stop of LLDP should be persistent across power cycles 3690 * @cmd_details: pointer to command details structure or NULL 3691 * 3692 * Stop or Shutdown the embedded LLDP Agent 3693 **/ 3694 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3695 bool persist, 3696 struct i40e_asq_cmd_details *cmd_details) 3697 { 3698 struct i40e_aq_desc desc; 3699 struct i40e_aqc_lldp_stop *cmd = 3700 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3701 i40e_status status; 3702 3703 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3704 3705 if (shutdown_agent) 3706 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3707 3708 if (persist) { 3709 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3710 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3711 else 3712 i40e_debug(hw, I40E_DEBUG_ALL, 3713 "Persistent Stop LLDP not supported by current FW version.\n"); 3714 } 3715 3716 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3717 3718 return status; 3719 } 3720 3721 /** 3722 * i40e_aq_start_lldp 3723 * @hw: pointer to the hw struct 3724 * @buff: buffer for result 3725 * @persist: True if start of LLDP should be persistent across power cycles 3726 * @buff_size: buffer size 3727 * @cmd_details: pointer to command details structure or NULL 3728 * 3729 * Start the embedded LLDP Agent on all ports. 3730 **/ 3731 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3732 struct i40e_asq_cmd_details *cmd_details) 3733 { 3734 struct i40e_aq_desc desc; 3735 struct i40e_aqc_lldp_start *cmd = 3736 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3737 i40e_status status; 3738 3739 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3740 3741 cmd->command = I40E_AQ_LLDP_AGENT_START; 3742 3743 if (persist) { 3744 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3745 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3746 else 3747 i40e_debug(hw, I40E_DEBUG_ALL, 3748 "Persistent Start LLDP not supported by current FW version.\n"); 3749 } 3750 3751 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3752 3753 return status; 3754 } 3755 3756 /** 3757 * i40e_aq_set_dcb_parameters 3758 * @hw: pointer to the hw struct 3759 * @cmd_details: pointer to command details structure or NULL 3760 * @dcb_enable: True if DCB configuration needs to be applied 3761 * 3762 **/ 3763 enum i40e_status_code 3764 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3765 struct i40e_asq_cmd_details *cmd_details) 3766 { 3767 struct i40e_aq_desc desc; 3768 struct i40e_aqc_set_dcb_parameters *cmd = 3769 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3770 i40e_status status; 3771 3772 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3773 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3774 3775 i40e_fill_default_direct_cmd_desc(&desc, 3776 i40e_aqc_opc_set_dcb_parameters); 3777 3778 if (dcb_enable) { 3779 cmd->valid_flags = I40E_DCB_VALID; 3780 cmd->command = I40E_AQ_DCB_SET_AGENT; 3781 } 3782 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3783 3784 return status; 3785 } 3786 3787 /** 3788 * i40e_aq_get_cee_dcb_config 3789 * @hw: pointer to the hw struct 3790 * @buff: response buffer that stores CEE operational configuration 3791 * @buff_size: size of the buffer passed 3792 * @cmd_details: pointer to command details structure or NULL 3793 * 3794 * Get CEE DCBX mode operational configuration from firmware 3795 **/ 3796 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3797 void *buff, u16 buff_size, 3798 struct i40e_asq_cmd_details *cmd_details) 3799 { 3800 struct i40e_aq_desc desc; 3801 i40e_status status; 3802 3803 if (buff_size == 0 || !buff) 3804 return I40E_ERR_PARAM; 3805 3806 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3807 3808 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3809 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3810 cmd_details); 3811 3812 return status; 3813 } 3814 3815 /** 3816 * i40e_aq_add_udp_tunnel 3817 * @hw: pointer to the hw struct 3818 * @udp_port: the UDP port to add in Host byte order 3819 * @protocol_index: protocol index type 3820 * @filter_index: pointer to filter index 3821 * @cmd_details: pointer to command details structure or NULL 3822 * 3823 * Note: Firmware expects the udp_port value to be in Little Endian format, 3824 * and this function will call cpu_to_le16 to convert from Host byte order to 3825 * Little Endian order. 3826 **/ 3827 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3828 u16 udp_port, u8 protocol_index, 3829 u8 *filter_index, 3830 struct i40e_asq_cmd_details *cmd_details) 3831 { 3832 struct i40e_aq_desc desc; 3833 struct i40e_aqc_add_udp_tunnel *cmd = 3834 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3835 struct i40e_aqc_del_udp_tunnel_completion *resp = 3836 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3837 i40e_status status; 3838 3839 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3840 3841 cmd->udp_port = cpu_to_le16(udp_port); 3842 cmd->protocol_type = protocol_index; 3843 3844 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3845 3846 if (!status && filter_index) 3847 *filter_index = resp->index; 3848 3849 return status; 3850 } 3851 3852 /** 3853 * i40e_aq_del_udp_tunnel 3854 * @hw: pointer to the hw struct 3855 * @index: filter index 3856 * @cmd_details: pointer to command details structure or NULL 3857 **/ 3858 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3859 struct i40e_asq_cmd_details *cmd_details) 3860 { 3861 struct i40e_aq_desc desc; 3862 struct i40e_aqc_remove_udp_tunnel *cmd = 3863 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3864 i40e_status status; 3865 3866 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3867 3868 cmd->index = index; 3869 3870 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3871 3872 return status; 3873 } 3874 3875 /** 3876 * i40e_aq_delete_element - Delete switch element 3877 * @hw: pointer to the hw struct 3878 * @seid: the SEID to delete from the switch 3879 * @cmd_details: pointer to command details structure or NULL 3880 * 3881 * This deletes a switch element from the switch. 3882 **/ 3883 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3884 struct i40e_asq_cmd_details *cmd_details) 3885 { 3886 struct i40e_aq_desc desc; 3887 struct i40e_aqc_switch_seid *cmd = 3888 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3889 i40e_status status; 3890 3891 if (seid == 0) 3892 return I40E_ERR_PARAM; 3893 3894 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3895 3896 cmd->seid = cpu_to_le16(seid); 3897 3898 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3899 3900 return status; 3901 } 3902 3903 /** 3904 * i40e_aq_dcb_updated - DCB Updated Command 3905 * @hw: pointer to the hw struct 3906 * @cmd_details: pointer to command details structure or NULL 3907 * 3908 * EMP will return when the shared RPB settings have been 3909 * recomputed and modified. The retval field in the descriptor 3910 * will be set to 0 when RPB is modified. 3911 **/ 3912 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3913 struct i40e_asq_cmd_details *cmd_details) 3914 { 3915 struct i40e_aq_desc desc; 3916 i40e_status status; 3917 3918 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3919 3920 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3921 3922 return status; 3923 } 3924 3925 /** 3926 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3927 * @hw: pointer to the hw struct 3928 * @seid: seid for the physical port/switching component/vsi 3929 * @buff: Indirect buffer to hold data parameters and response 3930 * @buff_size: Indirect buffer size 3931 * @opcode: Tx scheduler AQ command opcode 3932 * @cmd_details: pointer to command details structure or NULL 3933 * 3934 * Generic command handler for Tx scheduler AQ commands 3935 **/ 3936 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3937 void *buff, u16 buff_size, 3938 enum i40e_admin_queue_opc opcode, 3939 struct i40e_asq_cmd_details *cmd_details) 3940 { 3941 struct i40e_aq_desc desc; 3942 struct i40e_aqc_tx_sched_ind *cmd = 3943 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3944 i40e_status status; 3945 bool cmd_param_flag = false; 3946 3947 switch (opcode) { 3948 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3949 case i40e_aqc_opc_configure_vsi_tc_bw: 3950 case i40e_aqc_opc_enable_switching_comp_ets: 3951 case i40e_aqc_opc_modify_switching_comp_ets: 3952 case i40e_aqc_opc_disable_switching_comp_ets: 3953 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3954 case i40e_aqc_opc_configure_switching_comp_bw_config: 3955 cmd_param_flag = true; 3956 break; 3957 case i40e_aqc_opc_query_vsi_bw_config: 3958 case i40e_aqc_opc_query_vsi_ets_sla_config: 3959 case i40e_aqc_opc_query_switching_comp_ets_config: 3960 case i40e_aqc_opc_query_port_ets_config: 3961 case i40e_aqc_opc_query_switching_comp_bw_config: 3962 cmd_param_flag = false; 3963 break; 3964 default: 3965 return I40E_ERR_PARAM; 3966 } 3967 3968 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3969 3970 /* Indirect command */ 3971 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3972 if (cmd_param_flag) 3973 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3974 if (buff_size > I40E_AQ_LARGE_BUF) 3975 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3976 3977 desc.datalen = cpu_to_le16(buff_size); 3978 3979 cmd->vsi_seid = cpu_to_le16(seid); 3980 3981 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3982 3983 return status; 3984 } 3985 3986 /** 3987 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3988 * @hw: pointer to the hw struct 3989 * @seid: VSI seid 3990 * @credit: BW limit credits (0 = disabled) 3991 * @max_credit: Max BW limit credits 3992 * @cmd_details: pointer to command details structure or NULL 3993 **/ 3994 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3995 u16 seid, u16 credit, u8 max_credit, 3996 struct i40e_asq_cmd_details *cmd_details) 3997 { 3998 struct i40e_aq_desc desc; 3999 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4000 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4001 i40e_status status; 4002 4003 i40e_fill_default_direct_cmd_desc(&desc, 4004 i40e_aqc_opc_configure_vsi_bw_limit); 4005 4006 cmd->vsi_seid = cpu_to_le16(seid); 4007 cmd->credit = cpu_to_le16(credit); 4008 cmd->max_credit = max_credit; 4009 4010 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4011 4012 return status; 4013 } 4014 4015 /** 4016 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4017 * @hw: pointer to the hw struct 4018 * @seid: VSI seid 4019 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4020 * @cmd_details: pointer to command details structure or NULL 4021 **/ 4022 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4023 u16 seid, 4024 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4025 struct i40e_asq_cmd_details *cmd_details) 4026 { 4027 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4028 i40e_aqc_opc_configure_vsi_tc_bw, 4029 cmd_details); 4030 } 4031 4032 /** 4033 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4034 * @hw: pointer to the hw struct 4035 * @seid: seid of the switching component connected to Physical Port 4036 * @ets_data: Buffer holding ETS parameters 4037 * @opcode: Tx scheduler AQ command opcode 4038 * @cmd_details: pointer to command details structure or NULL 4039 **/ 4040 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4041 u16 seid, 4042 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4043 enum i40e_admin_queue_opc opcode, 4044 struct i40e_asq_cmd_details *cmd_details) 4045 { 4046 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4047 sizeof(*ets_data), opcode, cmd_details); 4048 } 4049 4050 /** 4051 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4052 * @hw: pointer to the hw struct 4053 * @seid: seid of the switching component 4054 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4055 * @cmd_details: pointer to command details structure or NULL 4056 **/ 4057 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4058 u16 seid, 4059 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4060 struct i40e_asq_cmd_details *cmd_details) 4061 { 4062 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4063 i40e_aqc_opc_configure_switching_comp_bw_config, 4064 cmd_details); 4065 } 4066 4067 /** 4068 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4069 * @hw: pointer to the hw struct 4070 * @seid: seid of the VSI 4071 * @bw_data: Buffer to hold VSI BW configuration 4072 * @cmd_details: pointer to command details structure or NULL 4073 **/ 4074 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4075 u16 seid, 4076 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4077 struct i40e_asq_cmd_details *cmd_details) 4078 { 4079 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4080 i40e_aqc_opc_query_vsi_bw_config, 4081 cmd_details); 4082 } 4083 4084 /** 4085 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4086 * @hw: pointer to the hw struct 4087 * @seid: seid of the VSI 4088 * @bw_data: Buffer to hold VSI BW configuration per TC 4089 * @cmd_details: pointer to command details structure or NULL 4090 **/ 4091 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4092 u16 seid, 4093 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4094 struct i40e_asq_cmd_details *cmd_details) 4095 { 4096 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4097 i40e_aqc_opc_query_vsi_ets_sla_config, 4098 cmd_details); 4099 } 4100 4101 /** 4102 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4103 * @hw: pointer to the hw struct 4104 * @seid: seid of the switching component 4105 * @bw_data: Buffer to hold switching component's per TC BW config 4106 * @cmd_details: pointer to command details structure or NULL 4107 **/ 4108 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4109 u16 seid, 4110 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4111 struct i40e_asq_cmd_details *cmd_details) 4112 { 4113 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4114 i40e_aqc_opc_query_switching_comp_ets_config, 4115 cmd_details); 4116 } 4117 4118 /** 4119 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4120 * @hw: pointer to the hw struct 4121 * @seid: seid of the VSI or switching component connected to Physical Port 4122 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4123 * @cmd_details: pointer to command details structure or NULL 4124 **/ 4125 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4126 u16 seid, 4127 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4128 struct i40e_asq_cmd_details *cmd_details) 4129 { 4130 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4131 i40e_aqc_opc_query_port_ets_config, 4132 cmd_details); 4133 } 4134 4135 /** 4136 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4137 * @hw: pointer to the hw struct 4138 * @seid: seid of the switching component 4139 * @bw_data: Buffer to hold switching component's BW configuration 4140 * @cmd_details: pointer to command details structure or NULL 4141 **/ 4142 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4143 u16 seid, 4144 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4145 struct i40e_asq_cmd_details *cmd_details) 4146 { 4147 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4148 i40e_aqc_opc_query_switching_comp_bw_config, 4149 cmd_details); 4150 } 4151 4152 /** 4153 * i40e_validate_filter_settings 4154 * @hw: pointer to the hardware structure 4155 * @settings: Filter control settings 4156 * 4157 * Check and validate the filter control settings passed. 4158 * The function checks for the valid filter/context sizes being 4159 * passed for FCoE and PE. 4160 * 4161 * Returns 0 if the values passed are valid and within 4162 * range else returns an error. 4163 **/ 4164 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4165 struct i40e_filter_control_settings *settings) 4166 { 4167 u32 fcoe_cntx_size, fcoe_filt_size; 4168 u32 pe_cntx_size, pe_filt_size; 4169 u32 fcoe_fmax; 4170 u32 val; 4171 4172 /* Validate FCoE settings passed */ 4173 switch (settings->fcoe_filt_num) { 4174 case I40E_HASH_FILTER_SIZE_1K: 4175 case I40E_HASH_FILTER_SIZE_2K: 4176 case I40E_HASH_FILTER_SIZE_4K: 4177 case I40E_HASH_FILTER_SIZE_8K: 4178 case I40E_HASH_FILTER_SIZE_16K: 4179 case I40E_HASH_FILTER_SIZE_32K: 4180 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4181 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4182 break; 4183 default: 4184 return I40E_ERR_PARAM; 4185 } 4186 4187 switch (settings->fcoe_cntx_num) { 4188 case I40E_DMA_CNTX_SIZE_512: 4189 case I40E_DMA_CNTX_SIZE_1K: 4190 case I40E_DMA_CNTX_SIZE_2K: 4191 case I40E_DMA_CNTX_SIZE_4K: 4192 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4193 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4194 break; 4195 default: 4196 return I40E_ERR_PARAM; 4197 } 4198 4199 /* Validate PE settings passed */ 4200 switch (settings->pe_filt_num) { 4201 case I40E_HASH_FILTER_SIZE_1K: 4202 case I40E_HASH_FILTER_SIZE_2K: 4203 case I40E_HASH_FILTER_SIZE_4K: 4204 case I40E_HASH_FILTER_SIZE_8K: 4205 case I40E_HASH_FILTER_SIZE_16K: 4206 case I40E_HASH_FILTER_SIZE_32K: 4207 case I40E_HASH_FILTER_SIZE_64K: 4208 case I40E_HASH_FILTER_SIZE_128K: 4209 case I40E_HASH_FILTER_SIZE_256K: 4210 case I40E_HASH_FILTER_SIZE_512K: 4211 case I40E_HASH_FILTER_SIZE_1M: 4212 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4213 pe_filt_size <<= (u32)settings->pe_filt_num; 4214 break; 4215 default: 4216 return I40E_ERR_PARAM; 4217 } 4218 4219 switch (settings->pe_cntx_num) { 4220 case I40E_DMA_CNTX_SIZE_512: 4221 case I40E_DMA_CNTX_SIZE_1K: 4222 case I40E_DMA_CNTX_SIZE_2K: 4223 case I40E_DMA_CNTX_SIZE_4K: 4224 case I40E_DMA_CNTX_SIZE_8K: 4225 case I40E_DMA_CNTX_SIZE_16K: 4226 case I40E_DMA_CNTX_SIZE_32K: 4227 case I40E_DMA_CNTX_SIZE_64K: 4228 case I40E_DMA_CNTX_SIZE_128K: 4229 case I40E_DMA_CNTX_SIZE_256K: 4230 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4231 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4232 break; 4233 default: 4234 return I40E_ERR_PARAM; 4235 } 4236 4237 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4238 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4239 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4240 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4241 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4242 return I40E_ERR_INVALID_SIZE; 4243 4244 return 0; 4245 } 4246 4247 /** 4248 * i40e_set_filter_control 4249 * @hw: pointer to the hardware structure 4250 * @settings: Filter control settings 4251 * 4252 * Set the Queue Filters for PE/FCoE and enable filters required 4253 * for a single PF. It is expected that these settings are programmed 4254 * at the driver initialization time. 4255 **/ 4256 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4257 struct i40e_filter_control_settings *settings) 4258 { 4259 i40e_status ret = 0; 4260 u32 hash_lut_size = 0; 4261 u32 val; 4262 4263 if (!settings) 4264 return I40E_ERR_PARAM; 4265 4266 /* Validate the input settings */ 4267 ret = i40e_validate_filter_settings(hw, settings); 4268 if (ret) 4269 return ret; 4270 4271 /* Read the PF Queue Filter control register */ 4272 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4273 4274 /* Program required PE hash buckets for the PF */ 4275 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4276 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4277 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4278 /* Program required PE contexts for the PF */ 4279 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4280 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4281 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4282 4283 /* Program required FCoE hash buckets for the PF */ 4284 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4285 val |= ((u32)settings->fcoe_filt_num << 4286 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4287 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4288 /* Program required FCoE DDP contexts for the PF */ 4289 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4290 val |= ((u32)settings->fcoe_cntx_num << 4291 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4292 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4293 4294 /* Program Hash LUT size for the PF */ 4295 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4296 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4297 hash_lut_size = 1; 4298 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4299 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4300 4301 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4302 if (settings->enable_fdir) 4303 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4304 if (settings->enable_ethtype) 4305 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4306 if (settings->enable_macvlan) 4307 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4308 4309 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4310 4311 return 0; 4312 } 4313 4314 /** 4315 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4316 * @hw: pointer to the hw struct 4317 * @mac_addr: MAC address to use in the filter 4318 * @ethtype: Ethertype to use in the filter 4319 * @flags: Flags that needs to be applied to the filter 4320 * @vsi_seid: seid of the control VSI 4321 * @queue: VSI queue number to send the packet to 4322 * @is_add: Add control packet filter if True else remove 4323 * @stats: Structure to hold information on control filter counts 4324 * @cmd_details: pointer to command details structure or NULL 4325 * 4326 * This command will Add or Remove control packet filter for a control VSI. 4327 * In return it will update the total number of perfect filter count in 4328 * the stats member. 4329 **/ 4330 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4331 u8 *mac_addr, u16 ethtype, u16 flags, 4332 u16 vsi_seid, u16 queue, bool is_add, 4333 struct i40e_control_filter_stats *stats, 4334 struct i40e_asq_cmd_details *cmd_details) 4335 { 4336 struct i40e_aq_desc desc; 4337 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4338 (struct i40e_aqc_add_remove_control_packet_filter *) 4339 &desc.params.raw; 4340 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4341 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4342 &desc.params.raw; 4343 i40e_status status; 4344 4345 if (vsi_seid == 0) 4346 return I40E_ERR_PARAM; 4347 4348 if (is_add) { 4349 i40e_fill_default_direct_cmd_desc(&desc, 4350 i40e_aqc_opc_add_control_packet_filter); 4351 cmd->queue = cpu_to_le16(queue); 4352 } else { 4353 i40e_fill_default_direct_cmd_desc(&desc, 4354 i40e_aqc_opc_remove_control_packet_filter); 4355 } 4356 4357 if (mac_addr) 4358 ether_addr_copy(cmd->mac, mac_addr); 4359 4360 cmd->etype = cpu_to_le16(ethtype); 4361 cmd->flags = cpu_to_le16(flags); 4362 cmd->seid = cpu_to_le16(vsi_seid); 4363 4364 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4365 4366 if (!status && stats) { 4367 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4368 stats->etype_used = le16_to_cpu(resp->etype_used); 4369 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4370 stats->etype_free = le16_to_cpu(resp->etype_free); 4371 } 4372 4373 return status; 4374 } 4375 4376 /** 4377 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4378 * @hw: pointer to the hw struct 4379 * @seid: VSI seid to add ethertype filter from 4380 **/ 4381 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4382 u16 seid) 4383 { 4384 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4385 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4386 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4387 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4388 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4389 i40e_status status; 4390 4391 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4392 seid, 0, true, NULL, 4393 NULL); 4394 if (status) 4395 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4396 } 4397 4398 /** 4399 * i40e_aq_alternate_read 4400 * @hw: pointer to the hardware structure 4401 * @reg_addr0: address of first dword to be read 4402 * @reg_val0: pointer for data read from 'reg_addr0' 4403 * @reg_addr1: address of second dword to be read 4404 * @reg_val1: pointer for data read from 'reg_addr1' 4405 * 4406 * Read one or two dwords from alternate structure. Fields are indicated 4407 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4408 * is not passed then only register at 'reg_addr0' is read. 4409 * 4410 **/ 4411 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4412 u32 reg_addr0, u32 *reg_val0, 4413 u32 reg_addr1, u32 *reg_val1) 4414 { 4415 struct i40e_aq_desc desc; 4416 struct i40e_aqc_alternate_write *cmd_resp = 4417 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4418 i40e_status status; 4419 4420 if (!reg_val0) 4421 return I40E_ERR_PARAM; 4422 4423 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4424 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4425 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4426 4427 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4428 4429 if (!status) { 4430 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4431 4432 if (reg_val1) 4433 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4434 } 4435 4436 return status; 4437 } 4438 4439 /** 4440 * i40e_aq_resume_port_tx 4441 * @hw: pointer to the hardware structure 4442 * @cmd_details: pointer to command details structure or NULL 4443 * 4444 * Resume port's Tx traffic 4445 **/ 4446 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4447 struct i40e_asq_cmd_details *cmd_details) 4448 { 4449 struct i40e_aq_desc desc; 4450 i40e_status status; 4451 4452 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4453 4454 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4455 4456 return status; 4457 } 4458 4459 /** 4460 * i40e_set_pci_config_data - store PCI bus info 4461 * @hw: pointer to hardware structure 4462 * @link_status: the link status word from PCI config space 4463 * 4464 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4465 **/ 4466 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4467 { 4468 hw->bus.type = i40e_bus_type_pci_express; 4469 4470 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4471 case PCI_EXP_LNKSTA_NLW_X1: 4472 hw->bus.width = i40e_bus_width_pcie_x1; 4473 break; 4474 case PCI_EXP_LNKSTA_NLW_X2: 4475 hw->bus.width = i40e_bus_width_pcie_x2; 4476 break; 4477 case PCI_EXP_LNKSTA_NLW_X4: 4478 hw->bus.width = i40e_bus_width_pcie_x4; 4479 break; 4480 case PCI_EXP_LNKSTA_NLW_X8: 4481 hw->bus.width = i40e_bus_width_pcie_x8; 4482 break; 4483 default: 4484 hw->bus.width = i40e_bus_width_unknown; 4485 break; 4486 } 4487 4488 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4489 case PCI_EXP_LNKSTA_CLS_2_5GB: 4490 hw->bus.speed = i40e_bus_speed_2500; 4491 break; 4492 case PCI_EXP_LNKSTA_CLS_5_0GB: 4493 hw->bus.speed = i40e_bus_speed_5000; 4494 break; 4495 case PCI_EXP_LNKSTA_CLS_8_0GB: 4496 hw->bus.speed = i40e_bus_speed_8000; 4497 break; 4498 default: 4499 hw->bus.speed = i40e_bus_speed_unknown; 4500 break; 4501 } 4502 } 4503 4504 /** 4505 * i40e_aq_debug_dump 4506 * @hw: pointer to the hardware structure 4507 * @cluster_id: specific cluster to dump 4508 * @table_id: table id within cluster 4509 * @start_index: index of line in the block to read 4510 * @buff_size: dump buffer size 4511 * @buff: dump buffer 4512 * @ret_buff_size: actual buffer size returned 4513 * @ret_next_table: next block to read 4514 * @ret_next_index: next index to read 4515 * @cmd_details: pointer to command details structure or NULL 4516 * 4517 * Dump internal FW/HW data for debug purposes. 4518 * 4519 **/ 4520 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4521 u8 table_id, u32 start_index, u16 buff_size, 4522 void *buff, u16 *ret_buff_size, 4523 u8 *ret_next_table, u32 *ret_next_index, 4524 struct i40e_asq_cmd_details *cmd_details) 4525 { 4526 struct i40e_aq_desc desc; 4527 struct i40e_aqc_debug_dump_internals *cmd = 4528 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4529 struct i40e_aqc_debug_dump_internals *resp = 4530 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4531 i40e_status status; 4532 4533 if (buff_size == 0 || !buff) 4534 return I40E_ERR_PARAM; 4535 4536 i40e_fill_default_direct_cmd_desc(&desc, 4537 i40e_aqc_opc_debug_dump_internals); 4538 /* Indirect Command */ 4539 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4540 if (buff_size > I40E_AQ_LARGE_BUF) 4541 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4542 4543 cmd->cluster_id = cluster_id; 4544 cmd->table_id = table_id; 4545 cmd->idx = cpu_to_le32(start_index); 4546 4547 desc.datalen = cpu_to_le16(buff_size); 4548 4549 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4550 if (!status) { 4551 if (ret_buff_size) 4552 *ret_buff_size = le16_to_cpu(desc.datalen); 4553 if (ret_next_table) 4554 *ret_next_table = resp->table_id; 4555 if (ret_next_index) 4556 *ret_next_index = le32_to_cpu(resp->idx); 4557 } 4558 4559 return status; 4560 } 4561 4562 /** 4563 * i40e_read_bw_from_alt_ram 4564 * @hw: pointer to the hardware structure 4565 * @max_bw: pointer for max_bw read 4566 * @min_bw: pointer for min_bw read 4567 * @min_valid: pointer for bool that is true if min_bw is a valid value 4568 * @max_valid: pointer for bool that is true if max_bw is a valid value 4569 * 4570 * Read bw from the alternate ram for the given pf 4571 **/ 4572 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4573 u32 *max_bw, u32 *min_bw, 4574 bool *min_valid, bool *max_valid) 4575 { 4576 i40e_status status; 4577 u32 max_bw_addr, min_bw_addr; 4578 4579 /* Calculate the address of the min/max bw registers */ 4580 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4581 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4582 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4583 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4584 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4585 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4586 4587 /* Read the bandwidths from alt ram */ 4588 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4589 min_bw_addr, min_bw); 4590 4591 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4592 *min_valid = true; 4593 else 4594 *min_valid = false; 4595 4596 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4597 *max_valid = true; 4598 else 4599 *max_valid = false; 4600 4601 return status; 4602 } 4603 4604 /** 4605 * i40e_aq_configure_partition_bw 4606 * @hw: pointer to the hardware structure 4607 * @bw_data: Buffer holding valid pfs and bw limits 4608 * @cmd_details: pointer to command details 4609 * 4610 * Configure partitions guaranteed/max bw 4611 **/ 4612 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4613 struct i40e_aqc_configure_partition_bw_data *bw_data, 4614 struct i40e_asq_cmd_details *cmd_details) 4615 { 4616 i40e_status status; 4617 struct i40e_aq_desc desc; 4618 u16 bwd_size = sizeof(*bw_data); 4619 4620 i40e_fill_default_direct_cmd_desc(&desc, 4621 i40e_aqc_opc_configure_partition_bw); 4622 4623 /* Indirect command */ 4624 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4625 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4626 4627 if (bwd_size > I40E_AQ_LARGE_BUF) 4628 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4629 4630 desc.datalen = cpu_to_le16(bwd_size); 4631 4632 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4633 cmd_details); 4634 4635 return status; 4636 } 4637 4638 /** 4639 * i40e_read_phy_register_clause22 4640 * @hw: pointer to the HW structure 4641 * @reg: register address in the page 4642 * @phy_addr: PHY address on MDIO interface 4643 * @value: PHY register value 4644 * 4645 * Reads specified PHY register value 4646 **/ 4647 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4648 u16 reg, u8 phy_addr, u16 *value) 4649 { 4650 i40e_status status = I40E_ERR_TIMEOUT; 4651 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4652 u32 command = 0; 4653 u16 retry = 1000; 4654 4655 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4656 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4657 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4658 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4659 (I40E_GLGEN_MSCA_MDICMD_MASK); 4660 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4661 do { 4662 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4663 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4664 status = 0; 4665 break; 4666 } 4667 udelay(10); 4668 retry--; 4669 } while (retry); 4670 4671 if (status) { 4672 i40e_debug(hw, I40E_DEBUG_PHY, 4673 "PHY: Can't write command to external PHY.\n"); 4674 } else { 4675 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4676 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4677 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4678 } 4679 4680 return status; 4681 } 4682 4683 /** 4684 * i40e_write_phy_register_clause22 4685 * @hw: pointer to the HW structure 4686 * @reg: register address in the page 4687 * @phy_addr: PHY address on MDIO interface 4688 * @value: PHY register value 4689 * 4690 * Writes specified PHY register value 4691 **/ 4692 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4693 u16 reg, u8 phy_addr, u16 value) 4694 { 4695 i40e_status status = I40E_ERR_TIMEOUT; 4696 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4697 u32 command = 0; 4698 u16 retry = 1000; 4699 4700 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4701 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4702 4703 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4704 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4705 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4706 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4707 (I40E_GLGEN_MSCA_MDICMD_MASK); 4708 4709 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4710 do { 4711 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4712 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4713 status = 0; 4714 break; 4715 } 4716 udelay(10); 4717 retry--; 4718 } while (retry); 4719 4720 return status; 4721 } 4722 4723 /** 4724 * i40e_read_phy_register_clause45 4725 * @hw: pointer to the HW structure 4726 * @page: registers page number 4727 * @reg: register address in the page 4728 * @phy_addr: PHY address on MDIO interface 4729 * @value: PHY register value 4730 * 4731 * Reads specified PHY register value 4732 **/ 4733 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4734 u8 page, u16 reg, u8 phy_addr, u16 *value) 4735 { 4736 i40e_status status = I40E_ERR_TIMEOUT; 4737 u32 command = 0; 4738 u16 retry = 1000; 4739 u8 port_num = hw->func_caps.mdio_port_num; 4740 4741 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4742 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4743 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4744 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4745 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4746 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4747 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4748 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4749 do { 4750 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4751 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4752 status = 0; 4753 break; 4754 } 4755 usleep_range(10, 20); 4756 retry--; 4757 } while (retry); 4758 4759 if (status) { 4760 i40e_debug(hw, I40E_DEBUG_PHY, 4761 "PHY: Can't write command to external PHY.\n"); 4762 goto phy_read_end; 4763 } 4764 4765 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4766 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4767 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4768 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4769 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4770 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4771 status = I40E_ERR_TIMEOUT; 4772 retry = 1000; 4773 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4774 do { 4775 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4776 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4777 status = 0; 4778 break; 4779 } 4780 usleep_range(10, 20); 4781 retry--; 4782 } while (retry); 4783 4784 if (!status) { 4785 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4786 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4787 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4788 } else { 4789 i40e_debug(hw, I40E_DEBUG_PHY, 4790 "PHY: Can't read register value from external PHY.\n"); 4791 } 4792 4793 phy_read_end: 4794 return status; 4795 } 4796 4797 /** 4798 * i40e_write_phy_register_clause45 4799 * @hw: pointer to the HW structure 4800 * @page: registers page number 4801 * @reg: register address in the page 4802 * @phy_addr: PHY address on MDIO interface 4803 * @value: PHY register value 4804 * 4805 * Writes value to specified PHY register 4806 **/ 4807 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4808 u8 page, u16 reg, u8 phy_addr, u16 value) 4809 { 4810 i40e_status status = I40E_ERR_TIMEOUT; 4811 u32 command = 0; 4812 u16 retry = 1000; 4813 u8 port_num = hw->func_caps.mdio_port_num; 4814 4815 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4816 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4817 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4818 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4819 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4820 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4821 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4822 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4823 do { 4824 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4825 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4826 status = 0; 4827 break; 4828 } 4829 usleep_range(10, 20); 4830 retry--; 4831 } while (retry); 4832 if (status) { 4833 i40e_debug(hw, I40E_DEBUG_PHY, 4834 "PHY: Can't write command to external PHY.\n"); 4835 goto phy_write_end; 4836 } 4837 4838 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4839 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4840 4841 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4842 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4843 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4844 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4845 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4846 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4847 status = I40E_ERR_TIMEOUT; 4848 retry = 1000; 4849 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4850 do { 4851 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4852 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4853 status = 0; 4854 break; 4855 } 4856 usleep_range(10, 20); 4857 retry--; 4858 } while (retry); 4859 4860 phy_write_end: 4861 return status; 4862 } 4863 4864 /** 4865 * i40e_write_phy_register 4866 * @hw: pointer to the HW structure 4867 * @page: registers page number 4868 * @reg: register address in the page 4869 * @phy_addr: PHY address on MDIO interface 4870 * @value: PHY register value 4871 * 4872 * Writes value to specified PHY register 4873 **/ 4874 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4875 u8 page, u16 reg, u8 phy_addr, u16 value) 4876 { 4877 i40e_status status; 4878 4879 switch (hw->device_id) { 4880 case I40E_DEV_ID_1G_BASE_T_X722: 4881 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4882 value); 4883 break; 4884 case I40E_DEV_ID_10G_BASE_T: 4885 case I40E_DEV_ID_10G_BASE_T4: 4886 case I40E_DEV_ID_10G_BASE_T_X722: 4887 case I40E_DEV_ID_25G_B: 4888 case I40E_DEV_ID_25G_SFP28: 4889 status = i40e_write_phy_register_clause45(hw, page, reg, 4890 phy_addr, value); 4891 break; 4892 default: 4893 status = I40E_ERR_UNKNOWN_PHY; 4894 break; 4895 } 4896 4897 return status; 4898 } 4899 4900 /** 4901 * i40e_read_phy_register 4902 * @hw: pointer to the HW structure 4903 * @page: registers page number 4904 * @reg: register address in the page 4905 * @phy_addr: PHY address on MDIO interface 4906 * @value: PHY register value 4907 * 4908 * Reads specified PHY register value 4909 **/ 4910 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 4911 u8 page, u16 reg, u8 phy_addr, u16 *value) 4912 { 4913 i40e_status status; 4914 4915 switch (hw->device_id) { 4916 case I40E_DEV_ID_1G_BASE_T_X722: 4917 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4918 value); 4919 break; 4920 case I40E_DEV_ID_10G_BASE_T: 4921 case I40E_DEV_ID_10G_BASE_T4: 4922 case I40E_DEV_ID_10G_BASE_T_BC: 4923 case I40E_DEV_ID_10G_BASE_T_X722: 4924 case I40E_DEV_ID_25G_B: 4925 case I40E_DEV_ID_25G_SFP28: 4926 status = i40e_read_phy_register_clause45(hw, page, reg, 4927 phy_addr, value); 4928 break; 4929 default: 4930 status = I40E_ERR_UNKNOWN_PHY; 4931 break; 4932 } 4933 4934 return status; 4935 } 4936 4937 /** 4938 * i40e_get_phy_address 4939 * @hw: pointer to the HW structure 4940 * @dev_num: PHY port num that address we want 4941 * 4942 * Gets PHY address for current port 4943 **/ 4944 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4945 { 4946 u8 port_num = hw->func_caps.mdio_port_num; 4947 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4948 4949 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4950 } 4951 4952 /** 4953 * i40e_blink_phy_led 4954 * @hw: pointer to the HW structure 4955 * @time: time how long led will blinks in secs 4956 * @interval: gap between LED on and off in msecs 4957 * 4958 * Blinks PHY link LED 4959 **/ 4960 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 4961 u32 time, u32 interval) 4962 { 4963 i40e_status status = 0; 4964 u32 i; 4965 u16 led_ctl; 4966 u16 gpio_led_port; 4967 u16 led_reg; 4968 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4969 u8 phy_addr = 0; 4970 u8 port_num; 4971 4972 i = rd32(hw, I40E_PFGEN_PORTNUM); 4973 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4974 phy_addr = i40e_get_phy_address(hw, port_num); 4975 4976 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4977 led_addr++) { 4978 status = i40e_read_phy_register_clause45(hw, 4979 I40E_PHY_COM_REG_PAGE, 4980 led_addr, phy_addr, 4981 &led_reg); 4982 if (status) 4983 goto phy_blinking_end; 4984 led_ctl = led_reg; 4985 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4986 led_reg = 0; 4987 status = i40e_write_phy_register_clause45(hw, 4988 I40E_PHY_COM_REG_PAGE, 4989 led_addr, phy_addr, 4990 led_reg); 4991 if (status) 4992 goto phy_blinking_end; 4993 break; 4994 } 4995 } 4996 4997 if (time > 0 && interval > 0) { 4998 for (i = 0; i < time * 1000; i += interval) { 4999 status = i40e_read_phy_register_clause45(hw, 5000 I40E_PHY_COM_REG_PAGE, 5001 led_addr, phy_addr, &led_reg); 5002 if (status) 5003 goto restore_config; 5004 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5005 led_reg = 0; 5006 else 5007 led_reg = I40E_PHY_LED_MANUAL_ON; 5008 status = i40e_write_phy_register_clause45(hw, 5009 I40E_PHY_COM_REG_PAGE, 5010 led_addr, phy_addr, led_reg); 5011 if (status) 5012 goto restore_config; 5013 msleep(interval); 5014 } 5015 } 5016 5017 restore_config: 5018 status = i40e_write_phy_register_clause45(hw, 5019 I40E_PHY_COM_REG_PAGE, 5020 led_addr, phy_addr, led_ctl); 5021 5022 phy_blinking_end: 5023 return status; 5024 } 5025 5026 /** 5027 * i40e_led_get_reg - read LED register 5028 * @hw: pointer to the HW structure 5029 * @led_addr: LED register address 5030 * @reg_val: read register value 5031 **/ 5032 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5033 u32 *reg_val) 5034 { 5035 enum i40e_status_code status; 5036 u8 phy_addr = 0; 5037 u8 port_num; 5038 u32 i; 5039 5040 *reg_val = 0; 5041 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5042 status = 5043 i40e_aq_get_phy_register(hw, 5044 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5045 I40E_PHY_COM_REG_PAGE, 5046 I40E_PHY_LED_PROV_REG_1, 5047 reg_val, NULL); 5048 } else { 5049 i = rd32(hw, I40E_PFGEN_PORTNUM); 5050 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5051 phy_addr = i40e_get_phy_address(hw, port_num); 5052 status = i40e_read_phy_register_clause45(hw, 5053 I40E_PHY_COM_REG_PAGE, 5054 led_addr, phy_addr, 5055 (u16 *)reg_val); 5056 } 5057 return status; 5058 } 5059 5060 /** 5061 * i40e_led_set_reg - write LED register 5062 * @hw: pointer to the HW structure 5063 * @led_addr: LED register address 5064 * @reg_val: register value to write 5065 **/ 5066 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5067 u32 reg_val) 5068 { 5069 enum i40e_status_code status; 5070 u8 phy_addr = 0; 5071 u8 port_num; 5072 u32 i; 5073 5074 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5075 status = 5076 i40e_aq_set_phy_register(hw, 5077 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5078 I40E_PHY_COM_REG_PAGE, 5079 I40E_PHY_LED_PROV_REG_1, 5080 reg_val, NULL); 5081 } else { 5082 i = rd32(hw, I40E_PFGEN_PORTNUM); 5083 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5084 phy_addr = i40e_get_phy_address(hw, port_num); 5085 status = i40e_write_phy_register_clause45(hw, 5086 I40E_PHY_COM_REG_PAGE, 5087 led_addr, phy_addr, 5088 (u16)reg_val); 5089 } 5090 5091 return status; 5092 } 5093 5094 /** 5095 * i40e_led_get_phy - return current on/off mode 5096 * @hw: pointer to the hw struct 5097 * @led_addr: address of led register to use 5098 * @val: original value of register to use 5099 * 5100 **/ 5101 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5102 u16 *val) 5103 { 5104 i40e_status status = 0; 5105 u16 gpio_led_port; 5106 u8 phy_addr = 0; 5107 u16 reg_val; 5108 u16 temp_addr; 5109 u8 port_num; 5110 u32 i; 5111 u32 reg_val_aq; 5112 5113 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5114 status = 5115 i40e_aq_get_phy_register(hw, 5116 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5117 I40E_PHY_COM_REG_PAGE, 5118 I40E_PHY_LED_PROV_REG_1, 5119 ®_val_aq, NULL); 5120 if (status == I40E_SUCCESS) 5121 *val = (u16)reg_val_aq; 5122 return status; 5123 } 5124 temp_addr = I40E_PHY_LED_PROV_REG_1; 5125 i = rd32(hw, I40E_PFGEN_PORTNUM); 5126 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5127 phy_addr = i40e_get_phy_address(hw, port_num); 5128 5129 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5130 temp_addr++) { 5131 status = i40e_read_phy_register_clause45(hw, 5132 I40E_PHY_COM_REG_PAGE, 5133 temp_addr, phy_addr, 5134 ®_val); 5135 if (status) 5136 return status; 5137 *val = reg_val; 5138 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5139 *led_addr = temp_addr; 5140 break; 5141 } 5142 } 5143 return status; 5144 } 5145 5146 /** 5147 * i40e_led_set_phy 5148 * @hw: pointer to the HW structure 5149 * @on: true or false 5150 * @led_addr: address of led register to use 5151 * @mode: original val plus bit for set or ignore 5152 * 5153 * Set led's on or off when controlled by the PHY 5154 * 5155 **/ 5156 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5157 u16 led_addr, u32 mode) 5158 { 5159 i40e_status status = 0; 5160 u32 led_ctl = 0; 5161 u32 led_reg = 0; 5162 5163 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5164 if (status) 5165 return status; 5166 led_ctl = led_reg; 5167 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5168 led_reg = 0; 5169 status = i40e_led_set_reg(hw, led_addr, led_reg); 5170 if (status) 5171 return status; 5172 } 5173 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5174 if (status) 5175 goto restore_config; 5176 if (on) 5177 led_reg = I40E_PHY_LED_MANUAL_ON; 5178 else 5179 led_reg = 0; 5180 5181 status = i40e_led_set_reg(hw, led_addr, led_reg); 5182 if (status) 5183 goto restore_config; 5184 if (mode & I40E_PHY_LED_MODE_ORIG) { 5185 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5186 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5187 } 5188 return status; 5189 5190 restore_config: 5191 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5192 return status; 5193 } 5194 5195 /** 5196 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5197 * @hw: pointer to the hw struct 5198 * @reg_addr: register address 5199 * @reg_val: ptr to register value 5200 * @cmd_details: pointer to command details structure or NULL 5201 * 5202 * Use the firmware to read the Rx control register, 5203 * especially useful if the Rx unit is under heavy pressure 5204 **/ 5205 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5206 u32 reg_addr, u32 *reg_val, 5207 struct i40e_asq_cmd_details *cmd_details) 5208 { 5209 struct i40e_aq_desc desc; 5210 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5211 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5212 i40e_status status; 5213 5214 if (!reg_val) 5215 return I40E_ERR_PARAM; 5216 5217 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5218 5219 cmd_resp->address = cpu_to_le32(reg_addr); 5220 5221 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5222 5223 if (status == 0) 5224 *reg_val = le32_to_cpu(cmd_resp->value); 5225 5226 return status; 5227 } 5228 5229 /** 5230 * i40e_read_rx_ctl - read from an Rx control register 5231 * @hw: pointer to the hw struct 5232 * @reg_addr: register address 5233 **/ 5234 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5235 { 5236 i40e_status status = 0; 5237 bool use_register; 5238 int retry = 5; 5239 u32 val = 0; 5240 5241 use_register = (((hw->aq.api_maj_ver == 1) && 5242 (hw->aq.api_min_ver < 5)) || 5243 (hw->mac.type == I40E_MAC_X722)); 5244 if (!use_register) { 5245 do_retry: 5246 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5247 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5248 usleep_range(1000, 2000); 5249 retry--; 5250 goto do_retry; 5251 } 5252 } 5253 5254 /* if the AQ access failed, try the old-fashioned way */ 5255 if (status || use_register) 5256 val = rd32(hw, reg_addr); 5257 5258 return val; 5259 } 5260 5261 /** 5262 * i40e_aq_rx_ctl_write_register 5263 * @hw: pointer to the hw struct 5264 * @reg_addr: register address 5265 * @reg_val: register value 5266 * @cmd_details: pointer to command details structure or NULL 5267 * 5268 * Use the firmware to write to an Rx control register, 5269 * especially useful if the Rx unit is under heavy pressure 5270 **/ 5271 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5272 u32 reg_addr, u32 reg_val, 5273 struct i40e_asq_cmd_details *cmd_details) 5274 { 5275 struct i40e_aq_desc desc; 5276 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5277 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5278 i40e_status status; 5279 5280 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5281 5282 cmd->address = cpu_to_le32(reg_addr); 5283 cmd->value = cpu_to_le32(reg_val); 5284 5285 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5286 5287 return status; 5288 } 5289 5290 /** 5291 * i40e_write_rx_ctl - write to an Rx control register 5292 * @hw: pointer to the hw struct 5293 * @reg_addr: register address 5294 * @reg_val: register value 5295 **/ 5296 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5297 { 5298 i40e_status status = 0; 5299 bool use_register; 5300 int retry = 5; 5301 5302 use_register = (((hw->aq.api_maj_ver == 1) && 5303 (hw->aq.api_min_ver < 5)) || 5304 (hw->mac.type == I40E_MAC_X722)); 5305 if (!use_register) { 5306 do_retry: 5307 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5308 reg_val, NULL); 5309 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5310 usleep_range(1000, 2000); 5311 retry--; 5312 goto do_retry; 5313 } 5314 } 5315 5316 /* if the AQ access failed, try the old-fashioned way */ 5317 if (status || use_register) 5318 wr32(hw, reg_addr, reg_val); 5319 } 5320 5321 /** 5322 * i40e_aq_set_phy_register 5323 * @hw: pointer to the hw struct 5324 * @phy_select: select which phy should be accessed 5325 * @dev_addr: PHY device address 5326 * @reg_addr: PHY register address 5327 * @reg_val: new register value 5328 * @cmd_details: pointer to command details structure or NULL 5329 * 5330 * Write the external PHY register. 5331 **/ 5332 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, 5333 u8 phy_select, u8 dev_addr, 5334 u32 reg_addr, u32 reg_val, 5335 struct i40e_asq_cmd_details *cmd_details) 5336 { 5337 struct i40e_aq_desc desc; 5338 struct i40e_aqc_phy_register_access *cmd = 5339 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5340 i40e_status status; 5341 5342 i40e_fill_default_direct_cmd_desc(&desc, 5343 i40e_aqc_opc_set_phy_register); 5344 5345 cmd->phy_interface = phy_select; 5346 cmd->dev_address = dev_addr; 5347 cmd->reg_address = cpu_to_le32(reg_addr); 5348 cmd->reg_value = cpu_to_le32(reg_val); 5349 5350 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5351 5352 return status; 5353 } 5354 5355 /** 5356 * i40e_aq_get_phy_register 5357 * @hw: pointer to the hw struct 5358 * @phy_select: select which phy should be accessed 5359 * @dev_addr: PHY device address 5360 * @reg_addr: PHY register address 5361 * @reg_val: read register value 5362 * @cmd_details: pointer to command details structure or NULL 5363 * 5364 * Read the external PHY register. 5365 **/ 5366 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, 5367 u8 phy_select, u8 dev_addr, 5368 u32 reg_addr, u32 *reg_val, 5369 struct i40e_asq_cmd_details *cmd_details) 5370 { 5371 struct i40e_aq_desc desc; 5372 struct i40e_aqc_phy_register_access *cmd = 5373 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5374 i40e_status status; 5375 5376 i40e_fill_default_direct_cmd_desc(&desc, 5377 i40e_aqc_opc_get_phy_register); 5378 5379 cmd->phy_interface = phy_select; 5380 cmd->dev_address = dev_addr; 5381 cmd->reg_address = cpu_to_le32(reg_addr); 5382 5383 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5384 if (!status) 5385 *reg_val = le32_to_cpu(cmd->reg_value); 5386 5387 return status; 5388 } 5389 5390 /** 5391 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5392 * @hw: pointer to the hw struct 5393 * @buff: command buffer (size in bytes = buff_size) 5394 * @buff_size: buffer size in bytes 5395 * @track_id: package tracking id 5396 * @error_offset: returns error offset 5397 * @error_info: returns error information 5398 * @cmd_details: pointer to command details structure or NULL 5399 **/ 5400 enum 5401 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5402 u16 buff_size, u32 track_id, 5403 u32 *error_offset, u32 *error_info, 5404 struct i40e_asq_cmd_details *cmd_details) 5405 { 5406 struct i40e_aq_desc desc; 5407 struct i40e_aqc_write_personalization_profile *cmd = 5408 (struct i40e_aqc_write_personalization_profile *) 5409 &desc.params.raw; 5410 struct i40e_aqc_write_ddp_resp *resp; 5411 i40e_status status; 5412 5413 i40e_fill_default_direct_cmd_desc(&desc, 5414 i40e_aqc_opc_write_personalization_profile); 5415 5416 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5417 if (buff_size > I40E_AQ_LARGE_BUF) 5418 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5419 5420 desc.datalen = cpu_to_le16(buff_size); 5421 5422 cmd->profile_track_id = cpu_to_le32(track_id); 5423 5424 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5425 if (!status) { 5426 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5427 if (error_offset) 5428 *error_offset = le32_to_cpu(resp->error_offset); 5429 if (error_info) 5430 *error_info = le32_to_cpu(resp->error_info); 5431 } 5432 5433 return status; 5434 } 5435 5436 /** 5437 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5438 * @hw: pointer to the hw struct 5439 * @buff: command buffer (size in bytes = buff_size) 5440 * @buff_size: buffer size in bytes 5441 * @flags: AdminQ command flags 5442 * @cmd_details: pointer to command details structure or NULL 5443 **/ 5444 enum 5445 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5446 u16 buff_size, u8 flags, 5447 struct i40e_asq_cmd_details *cmd_details) 5448 { 5449 struct i40e_aq_desc desc; 5450 struct i40e_aqc_get_applied_profiles *cmd = 5451 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5452 i40e_status status; 5453 5454 i40e_fill_default_direct_cmd_desc(&desc, 5455 i40e_aqc_opc_get_personalization_profile_list); 5456 5457 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5458 if (buff_size > I40E_AQ_LARGE_BUF) 5459 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5460 desc.datalen = cpu_to_le16(buff_size); 5461 5462 cmd->flags = flags; 5463 5464 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5465 5466 return status; 5467 } 5468 5469 /** 5470 * i40e_find_segment_in_package 5471 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5472 * @pkg_hdr: pointer to the package header to be searched 5473 * 5474 * This function searches a package file for a particular segment type. On 5475 * success it returns a pointer to the segment header, otherwise it will 5476 * return NULL. 5477 **/ 5478 struct i40e_generic_seg_header * 5479 i40e_find_segment_in_package(u32 segment_type, 5480 struct i40e_package_header *pkg_hdr) 5481 { 5482 struct i40e_generic_seg_header *segment; 5483 u32 i; 5484 5485 /* Search all package segments for the requested segment type */ 5486 for (i = 0; i < pkg_hdr->segment_count; i++) { 5487 segment = 5488 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5489 pkg_hdr->segment_offset[i]); 5490 5491 if (segment->type == segment_type) 5492 return segment; 5493 } 5494 5495 return NULL; 5496 } 5497 5498 /* Get section table in profile */ 5499 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5500 do { \ 5501 struct i40e_profile_segment *p = (profile); \ 5502 u32 count; \ 5503 u32 *nvm; \ 5504 count = p->device_table_count; \ 5505 nvm = (u32 *)&p->device_table[count]; \ 5506 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5507 } while (0) 5508 5509 /* Get section header in profile */ 5510 #define I40E_SECTION_HEADER(profile, offset) \ 5511 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5512 5513 /** 5514 * i40e_find_section_in_profile 5515 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5516 * @profile: pointer to the i40e segment header to be searched 5517 * 5518 * This function searches i40e segment for a particular section type. On 5519 * success it returns a pointer to the section header, otherwise it will 5520 * return NULL. 5521 **/ 5522 struct i40e_profile_section_header * 5523 i40e_find_section_in_profile(u32 section_type, 5524 struct i40e_profile_segment *profile) 5525 { 5526 struct i40e_profile_section_header *sec; 5527 struct i40e_section_table *sec_tbl; 5528 u32 sec_off; 5529 u32 i; 5530 5531 if (profile->header.type != SEGMENT_TYPE_I40E) 5532 return NULL; 5533 5534 I40E_SECTION_TABLE(profile, sec_tbl); 5535 5536 for (i = 0; i < sec_tbl->section_count; i++) { 5537 sec_off = sec_tbl->section_offset[i]; 5538 sec = I40E_SECTION_HEADER(profile, sec_off); 5539 if (sec->section.type == section_type) 5540 return sec; 5541 } 5542 5543 return NULL; 5544 } 5545 5546 /** 5547 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5548 * @hw: pointer to the hw struct 5549 * @aq: command buffer containing all data to execute AQ 5550 **/ 5551 static enum 5552 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5553 struct i40e_profile_aq_section *aq) 5554 { 5555 i40e_status status; 5556 struct i40e_aq_desc desc; 5557 u8 *msg = NULL; 5558 u16 msglen; 5559 5560 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5561 desc.flags |= cpu_to_le16(aq->flags); 5562 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5563 5564 msglen = aq->datalen; 5565 if (msglen) { 5566 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5567 I40E_AQ_FLAG_RD)); 5568 if (msglen > I40E_AQ_LARGE_BUF) 5569 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5570 desc.datalen = cpu_to_le16(msglen); 5571 msg = &aq->data[0]; 5572 } 5573 5574 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5575 5576 if (status) { 5577 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5578 "unable to exec DDP AQ opcode %u, error %d\n", 5579 aq->opcode, status); 5580 return status; 5581 } 5582 5583 /* copy returned desc to aq_buf */ 5584 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5585 5586 return 0; 5587 } 5588 5589 /** 5590 * i40e_validate_profile 5591 * @hw: pointer to the hardware structure 5592 * @profile: pointer to the profile segment of the package to be validated 5593 * @track_id: package tracking id 5594 * @rollback: flag if the profile is for rollback. 5595 * 5596 * Validates supported devices and profile's sections. 5597 */ 5598 static enum i40e_status_code 5599 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5600 u32 track_id, bool rollback) 5601 { 5602 struct i40e_profile_section_header *sec = NULL; 5603 i40e_status status = 0; 5604 struct i40e_section_table *sec_tbl; 5605 u32 vendor_dev_id; 5606 u32 dev_cnt; 5607 u32 sec_off; 5608 u32 i; 5609 5610 if (track_id == I40E_DDP_TRACKID_INVALID) { 5611 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5612 return I40E_NOT_SUPPORTED; 5613 } 5614 5615 dev_cnt = profile->device_table_count; 5616 for (i = 0; i < dev_cnt; i++) { 5617 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5618 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5619 hw->device_id == (vendor_dev_id & 0xFFFF)) 5620 break; 5621 } 5622 if (dev_cnt && i == dev_cnt) { 5623 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5624 "Device doesn't support DDP\n"); 5625 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5626 } 5627 5628 I40E_SECTION_TABLE(profile, sec_tbl); 5629 5630 /* Validate sections types */ 5631 for (i = 0; i < sec_tbl->section_count; i++) { 5632 sec_off = sec_tbl->section_offset[i]; 5633 sec = I40E_SECTION_HEADER(profile, sec_off); 5634 if (rollback) { 5635 if (sec->section.type == SECTION_TYPE_MMIO || 5636 sec->section.type == SECTION_TYPE_AQ || 5637 sec->section.type == SECTION_TYPE_RB_AQ) { 5638 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5639 "Not a roll-back package\n"); 5640 return I40E_NOT_SUPPORTED; 5641 } 5642 } else { 5643 if (sec->section.type == SECTION_TYPE_RB_AQ || 5644 sec->section.type == SECTION_TYPE_RB_MMIO) { 5645 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5646 "Not an original package\n"); 5647 return I40E_NOT_SUPPORTED; 5648 } 5649 } 5650 } 5651 5652 return status; 5653 } 5654 5655 /** 5656 * i40e_write_profile 5657 * @hw: pointer to the hardware structure 5658 * @profile: pointer to the profile segment of the package to be downloaded 5659 * @track_id: package tracking id 5660 * 5661 * Handles the download of a complete package. 5662 */ 5663 enum i40e_status_code 5664 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5665 u32 track_id) 5666 { 5667 i40e_status status = 0; 5668 struct i40e_section_table *sec_tbl; 5669 struct i40e_profile_section_header *sec = NULL; 5670 struct i40e_profile_aq_section *ddp_aq; 5671 u32 section_size = 0; 5672 u32 offset = 0, info = 0; 5673 u32 sec_off; 5674 u32 i; 5675 5676 status = i40e_validate_profile(hw, profile, track_id, false); 5677 if (status) 5678 return status; 5679 5680 I40E_SECTION_TABLE(profile, sec_tbl); 5681 5682 for (i = 0; i < sec_tbl->section_count; i++) { 5683 sec_off = sec_tbl->section_offset[i]; 5684 sec = I40E_SECTION_HEADER(profile, sec_off); 5685 /* Process generic admin command */ 5686 if (sec->section.type == SECTION_TYPE_AQ) { 5687 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5688 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5689 if (status) { 5690 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5691 "Failed to execute aq: section %d, opcode %u\n", 5692 i, ddp_aq->opcode); 5693 break; 5694 } 5695 sec->section.type = SECTION_TYPE_RB_AQ; 5696 } 5697 5698 /* Skip any non-mmio sections */ 5699 if (sec->section.type != SECTION_TYPE_MMIO) 5700 continue; 5701 5702 section_size = sec->section.size + 5703 sizeof(struct i40e_profile_section_header); 5704 5705 /* Write MMIO section */ 5706 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5707 track_id, &offset, &info, NULL); 5708 if (status) { 5709 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5710 "Failed to write profile: section %d, offset %d, info %d\n", 5711 i, offset, info); 5712 break; 5713 } 5714 } 5715 return status; 5716 } 5717 5718 /** 5719 * i40e_rollback_profile 5720 * @hw: pointer to the hardware structure 5721 * @profile: pointer to the profile segment of the package to be removed 5722 * @track_id: package tracking id 5723 * 5724 * Rolls back previously loaded package. 5725 */ 5726 enum i40e_status_code 5727 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5728 u32 track_id) 5729 { 5730 struct i40e_profile_section_header *sec = NULL; 5731 i40e_status status = 0; 5732 struct i40e_section_table *sec_tbl; 5733 u32 offset = 0, info = 0; 5734 u32 section_size = 0; 5735 u32 sec_off; 5736 int i; 5737 5738 status = i40e_validate_profile(hw, profile, track_id, true); 5739 if (status) 5740 return status; 5741 5742 I40E_SECTION_TABLE(profile, sec_tbl); 5743 5744 /* For rollback write sections in reverse */ 5745 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5746 sec_off = sec_tbl->section_offset[i]; 5747 sec = I40E_SECTION_HEADER(profile, sec_off); 5748 5749 /* Skip any non-rollback sections */ 5750 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5751 continue; 5752 5753 section_size = sec->section.size + 5754 sizeof(struct i40e_profile_section_header); 5755 5756 /* Write roll-back MMIO section */ 5757 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5758 track_id, &offset, &info, NULL); 5759 if (status) { 5760 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5761 "Failed to write profile: section %d, offset %d, info %d\n", 5762 i, offset, info); 5763 break; 5764 } 5765 } 5766 return status; 5767 } 5768 5769 /** 5770 * i40e_add_pinfo_to_list 5771 * @hw: pointer to the hardware structure 5772 * @profile: pointer to the profile segment of the package 5773 * @profile_info_sec: buffer for information section 5774 * @track_id: package tracking id 5775 * 5776 * Register a profile to the list of loaded profiles. 5777 */ 5778 enum i40e_status_code 5779 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5780 struct i40e_profile_segment *profile, 5781 u8 *profile_info_sec, u32 track_id) 5782 { 5783 i40e_status status = 0; 5784 struct i40e_profile_section_header *sec = NULL; 5785 struct i40e_profile_info *pinfo; 5786 u32 offset = 0, info = 0; 5787 5788 sec = (struct i40e_profile_section_header *)profile_info_sec; 5789 sec->tbl_size = 1; 5790 sec->data_end = sizeof(struct i40e_profile_section_header) + 5791 sizeof(struct i40e_profile_info); 5792 sec->section.type = SECTION_TYPE_INFO; 5793 sec->section.offset = sizeof(struct i40e_profile_section_header); 5794 sec->section.size = sizeof(struct i40e_profile_info); 5795 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5796 sec->section.offset); 5797 pinfo->track_id = track_id; 5798 pinfo->version = profile->version; 5799 pinfo->op = I40E_DDP_ADD_TRACKID; 5800 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5801 5802 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5803 track_id, &offset, &info, NULL); 5804 5805 return status; 5806 } 5807 5808 /** 5809 * i40e_aq_add_cloud_filters 5810 * @hw: pointer to the hardware structure 5811 * @seid: VSI seid to add cloud filters from 5812 * @filters: Buffer which contains the filters to be added 5813 * @filter_count: number of filters contained in the buffer 5814 * 5815 * Set the cloud filters for a given VSI. The contents of the 5816 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5817 * of the function. 5818 * 5819 **/ 5820 enum i40e_status_code 5821 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5822 struct i40e_aqc_cloud_filters_element_data *filters, 5823 u8 filter_count) 5824 { 5825 struct i40e_aq_desc desc; 5826 struct i40e_aqc_add_remove_cloud_filters *cmd = 5827 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5828 enum i40e_status_code status; 5829 u16 buff_len; 5830 5831 i40e_fill_default_direct_cmd_desc(&desc, 5832 i40e_aqc_opc_add_cloud_filters); 5833 5834 buff_len = filter_count * sizeof(*filters); 5835 desc.datalen = cpu_to_le16(buff_len); 5836 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5837 cmd->num_filters = filter_count; 5838 cmd->seid = cpu_to_le16(seid); 5839 5840 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5841 5842 return status; 5843 } 5844 5845 /** 5846 * i40e_aq_add_cloud_filters_bb 5847 * @hw: pointer to the hardware structure 5848 * @seid: VSI seid to add cloud filters from 5849 * @filters: Buffer which contains the filters in big buffer to be added 5850 * @filter_count: number of filters contained in the buffer 5851 * 5852 * Set the big buffer cloud filters for a given VSI. The contents of the 5853 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5854 * function. 5855 * 5856 **/ 5857 enum i40e_status_code 5858 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5859 struct i40e_aqc_cloud_filters_element_bb *filters, 5860 u8 filter_count) 5861 { 5862 struct i40e_aq_desc desc; 5863 struct i40e_aqc_add_remove_cloud_filters *cmd = 5864 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5865 i40e_status status; 5866 u16 buff_len; 5867 int i; 5868 5869 i40e_fill_default_direct_cmd_desc(&desc, 5870 i40e_aqc_opc_add_cloud_filters); 5871 5872 buff_len = filter_count * sizeof(*filters); 5873 desc.datalen = cpu_to_le16(buff_len); 5874 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5875 cmd->num_filters = filter_count; 5876 cmd->seid = cpu_to_le16(seid); 5877 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5878 5879 for (i = 0; i < filter_count; i++) { 5880 u16 tnl_type; 5881 u32 ti; 5882 5883 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5884 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5885 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5886 5887 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5888 * one more byte further than normally used for Tenant ID in 5889 * other tunnel types. 5890 */ 5891 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5892 ti = le32_to_cpu(filters[i].element.tenant_id); 5893 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5894 } 5895 } 5896 5897 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5898 5899 return status; 5900 } 5901 5902 /** 5903 * i40e_aq_rem_cloud_filters 5904 * @hw: pointer to the hardware structure 5905 * @seid: VSI seid to remove cloud filters from 5906 * @filters: Buffer which contains the filters to be removed 5907 * @filter_count: number of filters contained in the buffer 5908 * 5909 * Remove the cloud filters for a given VSI. The contents of the 5910 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5911 * of the function. 5912 * 5913 **/ 5914 enum i40e_status_code 5915 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5916 struct i40e_aqc_cloud_filters_element_data *filters, 5917 u8 filter_count) 5918 { 5919 struct i40e_aq_desc desc; 5920 struct i40e_aqc_add_remove_cloud_filters *cmd = 5921 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5922 enum i40e_status_code status; 5923 u16 buff_len; 5924 5925 i40e_fill_default_direct_cmd_desc(&desc, 5926 i40e_aqc_opc_remove_cloud_filters); 5927 5928 buff_len = filter_count * sizeof(*filters); 5929 desc.datalen = cpu_to_le16(buff_len); 5930 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5931 cmd->num_filters = filter_count; 5932 cmd->seid = cpu_to_le16(seid); 5933 5934 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5935 5936 return status; 5937 } 5938 5939 /** 5940 * i40e_aq_rem_cloud_filters_bb 5941 * @hw: pointer to the hardware structure 5942 * @seid: VSI seid to remove cloud filters from 5943 * @filters: Buffer which contains the filters in big buffer to be removed 5944 * @filter_count: number of filters contained in the buffer 5945 * 5946 * Remove the big buffer cloud filters for a given VSI. The contents of the 5947 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5948 * function. 5949 * 5950 **/ 5951 enum i40e_status_code 5952 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5953 struct i40e_aqc_cloud_filters_element_bb *filters, 5954 u8 filter_count) 5955 { 5956 struct i40e_aq_desc desc; 5957 struct i40e_aqc_add_remove_cloud_filters *cmd = 5958 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5959 i40e_status status; 5960 u16 buff_len; 5961 int i; 5962 5963 i40e_fill_default_direct_cmd_desc(&desc, 5964 i40e_aqc_opc_remove_cloud_filters); 5965 5966 buff_len = filter_count * sizeof(*filters); 5967 desc.datalen = cpu_to_le16(buff_len); 5968 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5969 cmd->num_filters = filter_count; 5970 cmd->seid = cpu_to_le16(seid); 5971 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5972 5973 for (i = 0; i < filter_count; i++) { 5974 u16 tnl_type; 5975 u32 ti; 5976 5977 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5978 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5979 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5980 5981 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5982 * one more byte further than normally used for Tenant ID in 5983 * other tunnel types. 5984 */ 5985 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5986 ti = le32_to_cpu(filters[i].element.tenant_id); 5987 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5988 } 5989 } 5990 5991 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5992 5993 return status; 5994 } 5995