1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e_type.h" 5 #include "i40e_adminq.h" 6 #include "i40e_prototype.h" 7 #include <linux/avf/virtchnl.h> 8 9 /** 10 * i40e_set_mac_type - Sets MAC type 11 * @hw: pointer to the HW structure 12 * 13 * This function sets the mac type of the adapter based on the 14 * vendor ID and device ID stored in the hw structure. 15 **/ 16 static i40e_status i40e_set_mac_type(struct i40e_hw *hw) 17 { 18 i40e_status status = 0; 19 20 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 21 switch (hw->device_id) { 22 case I40E_DEV_ID_SFP_XL710: 23 case I40E_DEV_ID_QEMU: 24 case I40E_DEV_ID_KX_B: 25 case I40E_DEV_ID_KX_C: 26 case I40E_DEV_ID_QSFP_A: 27 case I40E_DEV_ID_QSFP_B: 28 case I40E_DEV_ID_QSFP_C: 29 case I40E_DEV_ID_10G_BASE_T: 30 case I40E_DEV_ID_10G_BASE_T4: 31 case I40E_DEV_ID_10G_B: 32 case I40E_DEV_ID_10G_SFP: 33 case I40E_DEV_ID_20G_KR2: 34 case I40E_DEV_ID_20G_KR2_A: 35 case I40E_DEV_ID_25G_B: 36 case I40E_DEV_ID_25G_SFP28: 37 case I40E_DEV_ID_X710_N3000: 38 case I40E_DEV_ID_XXV710_N3000: 39 hw->mac.type = I40E_MAC_XL710; 40 break; 41 case I40E_DEV_ID_KX_X722: 42 case I40E_DEV_ID_QSFP_X722: 43 case I40E_DEV_ID_SFP_X722: 44 case I40E_DEV_ID_1G_BASE_T_X722: 45 case I40E_DEV_ID_10G_BASE_T_X722: 46 case I40E_DEV_ID_SFP_I_X722: 47 hw->mac.type = I40E_MAC_X722; 48 break; 49 default: 50 hw->mac.type = I40E_MAC_GENERIC; 51 break; 52 } 53 } else { 54 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 55 } 56 57 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 58 hw->mac.type, status); 59 return status; 60 } 61 62 /** 63 * i40e_aq_str - convert AQ err code to a string 64 * @hw: pointer to the HW structure 65 * @aq_err: the AQ error code to convert 66 **/ 67 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 68 { 69 switch (aq_err) { 70 case I40E_AQ_RC_OK: 71 return "OK"; 72 case I40E_AQ_RC_EPERM: 73 return "I40E_AQ_RC_EPERM"; 74 case I40E_AQ_RC_ENOENT: 75 return "I40E_AQ_RC_ENOENT"; 76 case I40E_AQ_RC_ESRCH: 77 return "I40E_AQ_RC_ESRCH"; 78 case I40E_AQ_RC_EINTR: 79 return "I40E_AQ_RC_EINTR"; 80 case I40E_AQ_RC_EIO: 81 return "I40E_AQ_RC_EIO"; 82 case I40E_AQ_RC_ENXIO: 83 return "I40E_AQ_RC_ENXIO"; 84 case I40E_AQ_RC_E2BIG: 85 return "I40E_AQ_RC_E2BIG"; 86 case I40E_AQ_RC_EAGAIN: 87 return "I40E_AQ_RC_EAGAIN"; 88 case I40E_AQ_RC_ENOMEM: 89 return "I40E_AQ_RC_ENOMEM"; 90 case I40E_AQ_RC_EACCES: 91 return "I40E_AQ_RC_EACCES"; 92 case I40E_AQ_RC_EFAULT: 93 return "I40E_AQ_RC_EFAULT"; 94 case I40E_AQ_RC_EBUSY: 95 return "I40E_AQ_RC_EBUSY"; 96 case I40E_AQ_RC_EEXIST: 97 return "I40E_AQ_RC_EEXIST"; 98 case I40E_AQ_RC_EINVAL: 99 return "I40E_AQ_RC_EINVAL"; 100 case I40E_AQ_RC_ENOTTY: 101 return "I40E_AQ_RC_ENOTTY"; 102 case I40E_AQ_RC_ENOSPC: 103 return "I40E_AQ_RC_ENOSPC"; 104 case I40E_AQ_RC_ENOSYS: 105 return "I40E_AQ_RC_ENOSYS"; 106 case I40E_AQ_RC_ERANGE: 107 return "I40E_AQ_RC_ERANGE"; 108 case I40E_AQ_RC_EFLUSHED: 109 return "I40E_AQ_RC_EFLUSHED"; 110 case I40E_AQ_RC_BAD_ADDR: 111 return "I40E_AQ_RC_BAD_ADDR"; 112 case I40E_AQ_RC_EMODE: 113 return "I40E_AQ_RC_EMODE"; 114 case I40E_AQ_RC_EFBIG: 115 return "I40E_AQ_RC_EFBIG"; 116 } 117 118 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 119 return hw->err_str; 120 } 121 122 /** 123 * i40e_stat_str - convert status err code to a string 124 * @hw: pointer to the HW structure 125 * @stat_err: the status error code to convert 126 **/ 127 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 128 { 129 switch (stat_err) { 130 case 0: 131 return "OK"; 132 case I40E_ERR_NVM: 133 return "I40E_ERR_NVM"; 134 case I40E_ERR_NVM_CHECKSUM: 135 return "I40E_ERR_NVM_CHECKSUM"; 136 case I40E_ERR_PHY: 137 return "I40E_ERR_PHY"; 138 case I40E_ERR_CONFIG: 139 return "I40E_ERR_CONFIG"; 140 case I40E_ERR_PARAM: 141 return "I40E_ERR_PARAM"; 142 case I40E_ERR_MAC_TYPE: 143 return "I40E_ERR_MAC_TYPE"; 144 case I40E_ERR_UNKNOWN_PHY: 145 return "I40E_ERR_UNKNOWN_PHY"; 146 case I40E_ERR_LINK_SETUP: 147 return "I40E_ERR_LINK_SETUP"; 148 case I40E_ERR_ADAPTER_STOPPED: 149 return "I40E_ERR_ADAPTER_STOPPED"; 150 case I40E_ERR_INVALID_MAC_ADDR: 151 return "I40E_ERR_INVALID_MAC_ADDR"; 152 case I40E_ERR_DEVICE_NOT_SUPPORTED: 153 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 154 case I40E_ERR_MASTER_REQUESTS_PENDING: 155 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 156 case I40E_ERR_INVALID_LINK_SETTINGS: 157 return "I40E_ERR_INVALID_LINK_SETTINGS"; 158 case I40E_ERR_AUTONEG_NOT_COMPLETE: 159 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 160 case I40E_ERR_RESET_FAILED: 161 return "I40E_ERR_RESET_FAILED"; 162 case I40E_ERR_SWFW_SYNC: 163 return "I40E_ERR_SWFW_SYNC"; 164 case I40E_ERR_NO_AVAILABLE_VSI: 165 return "I40E_ERR_NO_AVAILABLE_VSI"; 166 case I40E_ERR_NO_MEMORY: 167 return "I40E_ERR_NO_MEMORY"; 168 case I40E_ERR_BAD_PTR: 169 return "I40E_ERR_BAD_PTR"; 170 case I40E_ERR_RING_FULL: 171 return "I40E_ERR_RING_FULL"; 172 case I40E_ERR_INVALID_PD_ID: 173 return "I40E_ERR_INVALID_PD_ID"; 174 case I40E_ERR_INVALID_QP_ID: 175 return "I40E_ERR_INVALID_QP_ID"; 176 case I40E_ERR_INVALID_CQ_ID: 177 return "I40E_ERR_INVALID_CQ_ID"; 178 case I40E_ERR_INVALID_CEQ_ID: 179 return "I40E_ERR_INVALID_CEQ_ID"; 180 case I40E_ERR_INVALID_AEQ_ID: 181 return "I40E_ERR_INVALID_AEQ_ID"; 182 case I40E_ERR_INVALID_SIZE: 183 return "I40E_ERR_INVALID_SIZE"; 184 case I40E_ERR_INVALID_ARP_INDEX: 185 return "I40E_ERR_INVALID_ARP_INDEX"; 186 case I40E_ERR_INVALID_FPM_FUNC_ID: 187 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 188 case I40E_ERR_QP_INVALID_MSG_SIZE: 189 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 190 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 191 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 192 case I40E_ERR_INVALID_FRAG_COUNT: 193 return "I40E_ERR_INVALID_FRAG_COUNT"; 194 case I40E_ERR_QUEUE_EMPTY: 195 return "I40E_ERR_QUEUE_EMPTY"; 196 case I40E_ERR_INVALID_ALIGNMENT: 197 return "I40E_ERR_INVALID_ALIGNMENT"; 198 case I40E_ERR_FLUSHED_QUEUE: 199 return "I40E_ERR_FLUSHED_QUEUE"; 200 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 201 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 202 case I40E_ERR_INVALID_IMM_DATA_SIZE: 203 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 204 case I40E_ERR_TIMEOUT: 205 return "I40E_ERR_TIMEOUT"; 206 case I40E_ERR_OPCODE_MISMATCH: 207 return "I40E_ERR_OPCODE_MISMATCH"; 208 case I40E_ERR_CQP_COMPL_ERROR: 209 return "I40E_ERR_CQP_COMPL_ERROR"; 210 case I40E_ERR_INVALID_VF_ID: 211 return "I40E_ERR_INVALID_VF_ID"; 212 case I40E_ERR_INVALID_HMCFN_ID: 213 return "I40E_ERR_INVALID_HMCFN_ID"; 214 case I40E_ERR_BACKING_PAGE_ERROR: 215 return "I40E_ERR_BACKING_PAGE_ERROR"; 216 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 217 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 218 case I40E_ERR_INVALID_PBLE_INDEX: 219 return "I40E_ERR_INVALID_PBLE_INDEX"; 220 case I40E_ERR_INVALID_SD_INDEX: 221 return "I40E_ERR_INVALID_SD_INDEX"; 222 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 223 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 224 case I40E_ERR_INVALID_SD_TYPE: 225 return "I40E_ERR_INVALID_SD_TYPE"; 226 case I40E_ERR_MEMCPY_FAILED: 227 return "I40E_ERR_MEMCPY_FAILED"; 228 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 229 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 230 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 231 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 232 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 233 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 234 case I40E_ERR_SRQ_ENABLED: 235 return "I40E_ERR_SRQ_ENABLED"; 236 case I40E_ERR_ADMIN_QUEUE_ERROR: 237 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 238 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 239 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 240 case I40E_ERR_BUF_TOO_SHORT: 241 return "I40E_ERR_BUF_TOO_SHORT"; 242 case I40E_ERR_ADMIN_QUEUE_FULL: 243 return "I40E_ERR_ADMIN_QUEUE_FULL"; 244 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 245 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 246 case I40E_ERR_BAD_IWARP_CQE: 247 return "I40E_ERR_BAD_IWARP_CQE"; 248 case I40E_ERR_NVM_BLANK_MODE: 249 return "I40E_ERR_NVM_BLANK_MODE"; 250 case I40E_ERR_NOT_IMPLEMENTED: 251 return "I40E_ERR_NOT_IMPLEMENTED"; 252 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 253 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 254 case I40E_ERR_DIAG_TEST_FAILED: 255 return "I40E_ERR_DIAG_TEST_FAILED"; 256 case I40E_ERR_NOT_READY: 257 return "I40E_ERR_NOT_READY"; 258 case I40E_NOT_SUPPORTED: 259 return "I40E_NOT_SUPPORTED"; 260 case I40E_ERR_FIRMWARE_API_VERSION: 261 return "I40E_ERR_FIRMWARE_API_VERSION"; 262 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 263 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 264 } 265 266 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 267 return hw->err_str; 268 } 269 270 /** 271 * i40e_debug_aq 272 * @hw: debug mask related to admin queue 273 * @mask: debug mask 274 * @desc: pointer to admin queue descriptor 275 * @buffer: pointer to command buffer 276 * @buf_len: max length of buffer 277 * 278 * Dumps debug log about adminq command with descriptor contents. 279 **/ 280 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 281 void *buffer, u16 buf_len) 282 { 283 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 284 u32 effective_mask = hw->debug_mask & mask; 285 char prefix[27]; 286 u16 len; 287 u8 *buf = (u8 *)buffer; 288 289 if (!effective_mask || !desc) 290 return; 291 292 len = le16_to_cpu(aq_desc->datalen); 293 294 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 295 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 296 le16_to_cpu(aq_desc->opcode), 297 le16_to_cpu(aq_desc->flags), 298 le16_to_cpu(aq_desc->datalen), 299 le16_to_cpu(aq_desc->retval)); 300 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 301 "\tcookie (h,l) 0x%08X 0x%08X\n", 302 le32_to_cpu(aq_desc->cookie_high), 303 le32_to_cpu(aq_desc->cookie_low)); 304 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 305 "\tparam (0,1) 0x%08X 0x%08X\n", 306 le32_to_cpu(aq_desc->params.internal.param0), 307 le32_to_cpu(aq_desc->params.internal.param1)); 308 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 309 "\taddr (h,l) 0x%08X 0x%08X\n", 310 le32_to_cpu(aq_desc->params.external.addr_high), 311 le32_to_cpu(aq_desc->params.external.addr_low)); 312 313 if (buffer && buf_len != 0 && len != 0 && 314 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 315 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 316 if (buf_len < len) 317 len = buf_len; 318 319 snprintf(prefix, sizeof(prefix), 320 "i40e %02x:%02x.%x: \t0x", 321 hw->bus.bus_id, 322 hw->bus.device, 323 hw->bus.func); 324 325 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 326 16, 1, buf, len, false); 327 } 328 } 329 330 /** 331 * i40e_check_asq_alive 332 * @hw: pointer to the hw struct 333 * 334 * Returns true if Queue is enabled else false. 335 **/ 336 bool i40e_check_asq_alive(struct i40e_hw *hw) 337 { 338 if (hw->aq.asq.len) 339 return !!(rd32(hw, hw->aq.asq.len) & 340 I40E_PF_ATQLEN_ATQENABLE_MASK); 341 else 342 return false; 343 } 344 345 /** 346 * i40e_aq_queue_shutdown 347 * @hw: pointer to the hw struct 348 * @unloading: is the driver unloading itself 349 * 350 * Tell the Firmware that we're shutting down the AdminQ and whether 351 * or not the driver is unloading as well. 352 **/ 353 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 354 bool unloading) 355 { 356 struct i40e_aq_desc desc; 357 struct i40e_aqc_queue_shutdown *cmd = 358 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 359 i40e_status status; 360 361 i40e_fill_default_direct_cmd_desc(&desc, 362 i40e_aqc_opc_queue_shutdown); 363 364 if (unloading) 365 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 366 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 367 368 return status; 369 } 370 371 /** 372 * i40e_aq_get_set_rss_lut 373 * @hw: pointer to the hardware structure 374 * @vsi_id: vsi fw index 375 * @pf_lut: for PF table set true, for VSI table set false 376 * @lut: pointer to the lut buffer provided by the caller 377 * @lut_size: size of the lut buffer 378 * @set: set true to set the table, false to get the table 379 * 380 * Internal function to get or set RSS look up table 381 **/ 382 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 383 u16 vsi_id, bool pf_lut, 384 u8 *lut, u16 lut_size, 385 bool set) 386 { 387 i40e_status status; 388 struct i40e_aq_desc desc; 389 struct i40e_aqc_get_set_rss_lut *cmd_resp = 390 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 391 392 if (set) 393 i40e_fill_default_direct_cmd_desc(&desc, 394 i40e_aqc_opc_set_rss_lut); 395 else 396 i40e_fill_default_direct_cmd_desc(&desc, 397 i40e_aqc_opc_get_rss_lut); 398 399 /* Indirect command */ 400 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 401 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 402 403 cmd_resp->vsi_id = 404 cpu_to_le16((u16)((vsi_id << 405 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 406 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 407 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 408 409 if (pf_lut) 410 cmd_resp->flags |= cpu_to_le16((u16) 411 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 412 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 413 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 414 else 415 cmd_resp->flags |= cpu_to_le16((u16) 416 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 419 420 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 421 422 return status; 423 } 424 425 /** 426 * i40e_aq_get_rss_lut 427 * @hw: pointer to the hardware structure 428 * @vsi_id: vsi fw index 429 * @pf_lut: for PF table set true, for VSI table set false 430 * @lut: pointer to the lut buffer provided by the caller 431 * @lut_size: size of the lut buffer 432 * 433 * get the RSS lookup table, PF or VSI type 434 **/ 435 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 436 bool pf_lut, u8 *lut, u16 lut_size) 437 { 438 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 439 false); 440 } 441 442 /** 443 * i40e_aq_set_rss_lut 444 * @hw: pointer to the hardware structure 445 * @vsi_id: vsi fw index 446 * @pf_lut: for PF table set true, for VSI table set false 447 * @lut: pointer to the lut buffer provided by the caller 448 * @lut_size: size of the lut buffer 449 * 450 * set the RSS lookup table, PF or VSI type 451 **/ 452 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 453 bool pf_lut, u8 *lut, u16 lut_size) 454 { 455 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 456 } 457 458 /** 459 * i40e_aq_get_set_rss_key 460 * @hw: pointer to the hw struct 461 * @vsi_id: vsi fw index 462 * @key: pointer to key info struct 463 * @set: set true to set the key, false to get the key 464 * 465 * get the RSS key per VSI 466 **/ 467 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 468 u16 vsi_id, 469 struct i40e_aqc_get_set_rss_key_data *key, 470 bool set) 471 { 472 i40e_status status; 473 struct i40e_aq_desc desc; 474 struct i40e_aqc_get_set_rss_key *cmd_resp = 475 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 476 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 477 478 if (set) 479 i40e_fill_default_direct_cmd_desc(&desc, 480 i40e_aqc_opc_set_rss_key); 481 else 482 i40e_fill_default_direct_cmd_desc(&desc, 483 i40e_aqc_opc_get_rss_key); 484 485 /* Indirect command */ 486 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 487 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 488 489 cmd_resp->vsi_id = 490 cpu_to_le16((u16)((vsi_id << 491 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 492 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 493 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 494 495 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 496 497 return status; 498 } 499 500 /** 501 * i40e_aq_get_rss_key 502 * @hw: pointer to the hw struct 503 * @vsi_id: vsi fw index 504 * @key: pointer to key info struct 505 * 506 **/ 507 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 508 u16 vsi_id, 509 struct i40e_aqc_get_set_rss_key_data *key) 510 { 511 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 512 } 513 514 /** 515 * i40e_aq_set_rss_key 516 * @hw: pointer to the hw struct 517 * @vsi_id: vsi fw index 518 * @key: pointer to key info struct 519 * 520 * set the RSS key per VSI 521 **/ 522 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 523 u16 vsi_id, 524 struct i40e_aqc_get_set_rss_key_data *key) 525 { 526 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 527 } 528 529 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 530 * hardware to a bit-field that can be used by SW to more easily determine the 531 * packet type. 532 * 533 * Macros are used to shorten the table lines and make this table human 534 * readable. 535 * 536 * We store the PTYPE in the top byte of the bit field - this is just so that 537 * we can check that the table doesn't have a row missing, as the index into 538 * the table should be the PTYPE. 539 * 540 * Typical work flow: 541 * 542 * IF NOT i40e_ptype_lookup[ptype].known 543 * THEN 544 * Packet is unknown 545 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 546 * Use the rest of the fields to look at the tunnels, inner protocols, etc 547 * ELSE 548 * Use the enum i40e_rx_l2_ptype to decode the packet type 549 * ENDIF 550 */ 551 552 /* macro to make the table lines short */ 553 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 554 { PTYPE, \ 555 1, \ 556 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 557 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 558 I40E_RX_PTYPE_##OUTER_FRAG, \ 559 I40E_RX_PTYPE_TUNNEL_##T, \ 560 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 561 I40E_RX_PTYPE_##TEF, \ 562 I40E_RX_PTYPE_INNER_PROT_##I, \ 563 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 564 565 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 566 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 567 568 /* shorter macros makes the table fit but are terse */ 569 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 570 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 571 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 572 573 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 574 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 575 /* L2 Packet types */ 576 I40E_PTT_UNUSED_ENTRY(0), 577 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 578 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 579 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 580 I40E_PTT_UNUSED_ENTRY(4), 581 I40E_PTT_UNUSED_ENTRY(5), 582 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 583 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 584 I40E_PTT_UNUSED_ENTRY(8), 585 I40E_PTT_UNUSED_ENTRY(9), 586 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 587 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 588 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 589 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 590 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 591 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 599 /* Non Tunneled IPv4 */ 600 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 601 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 602 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 603 I40E_PTT_UNUSED_ENTRY(25), 604 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 605 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 606 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 607 608 /* IPv4 --> IPv4 */ 609 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 610 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 611 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 612 I40E_PTT_UNUSED_ENTRY(32), 613 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 614 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 615 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 616 617 /* IPv4 --> IPv6 */ 618 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 619 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 620 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 621 I40E_PTT_UNUSED_ENTRY(39), 622 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 623 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 624 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 625 626 /* IPv4 --> GRE/NAT */ 627 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 628 629 /* IPv4 --> GRE/NAT --> IPv4 */ 630 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 631 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 632 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 633 I40E_PTT_UNUSED_ENTRY(47), 634 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 635 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 636 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 637 638 /* IPv4 --> GRE/NAT --> IPv6 */ 639 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 640 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 641 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 642 I40E_PTT_UNUSED_ENTRY(54), 643 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 644 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 645 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 646 647 /* IPv4 --> GRE/NAT --> MAC */ 648 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 649 650 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 651 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 652 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 653 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 654 I40E_PTT_UNUSED_ENTRY(62), 655 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 656 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 657 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 658 659 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 660 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 661 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 662 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 663 I40E_PTT_UNUSED_ENTRY(69), 664 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 665 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 666 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 667 668 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 669 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 670 671 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 672 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 673 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 674 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 675 I40E_PTT_UNUSED_ENTRY(77), 676 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 677 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 678 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 679 680 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 681 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 682 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 683 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 684 I40E_PTT_UNUSED_ENTRY(84), 685 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 686 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 687 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 688 689 /* Non Tunneled IPv6 */ 690 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 691 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 692 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 693 I40E_PTT_UNUSED_ENTRY(91), 694 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 695 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 696 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 697 698 /* IPv6 --> IPv4 */ 699 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 700 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 701 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 702 I40E_PTT_UNUSED_ENTRY(98), 703 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 704 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 705 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 706 707 /* IPv6 --> IPv6 */ 708 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 709 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 710 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 711 I40E_PTT_UNUSED_ENTRY(105), 712 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 713 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 714 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 715 716 /* IPv6 --> GRE/NAT */ 717 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 718 719 /* IPv6 --> GRE/NAT -> IPv4 */ 720 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 721 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 722 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 723 I40E_PTT_UNUSED_ENTRY(113), 724 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 725 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 726 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 727 728 /* IPv6 --> GRE/NAT -> IPv6 */ 729 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 730 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 731 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 732 I40E_PTT_UNUSED_ENTRY(120), 733 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 734 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 735 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 736 737 /* IPv6 --> GRE/NAT -> MAC */ 738 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 739 740 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 741 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 742 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 743 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 744 I40E_PTT_UNUSED_ENTRY(128), 745 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 746 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 747 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 748 749 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 750 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 751 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 752 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 753 I40E_PTT_UNUSED_ENTRY(135), 754 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 755 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 756 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 757 758 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 759 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 760 761 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 762 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 763 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 764 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 765 I40E_PTT_UNUSED_ENTRY(143), 766 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 767 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 768 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 769 770 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 771 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 772 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 773 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 774 I40E_PTT_UNUSED_ENTRY(150), 775 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 776 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 777 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 778 779 /* unused entries */ 780 I40E_PTT_UNUSED_ENTRY(154), 781 I40E_PTT_UNUSED_ENTRY(155), 782 I40E_PTT_UNUSED_ENTRY(156), 783 I40E_PTT_UNUSED_ENTRY(157), 784 I40E_PTT_UNUSED_ENTRY(158), 785 I40E_PTT_UNUSED_ENTRY(159), 786 787 I40E_PTT_UNUSED_ENTRY(160), 788 I40E_PTT_UNUSED_ENTRY(161), 789 I40E_PTT_UNUSED_ENTRY(162), 790 I40E_PTT_UNUSED_ENTRY(163), 791 I40E_PTT_UNUSED_ENTRY(164), 792 I40E_PTT_UNUSED_ENTRY(165), 793 I40E_PTT_UNUSED_ENTRY(166), 794 I40E_PTT_UNUSED_ENTRY(167), 795 I40E_PTT_UNUSED_ENTRY(168), 796 I40E_PTT_UNUSED_ENTRY(169), 797 798 I40E_PTT_UNUSED_ENTRY(170), 799 I40E_PTT_UNUSED_ENTRY(171), 800 I40E_PTT_UNUSED_ENTRY(172), 801 I40E_PTT_UNUSED_ENTRY(173), 802 I40E_PTT_UNUSED_ENTRY(174), 803 I40E_PTT_UNUSED_ENTRY(175), 804 I40E_PTT_UNUSED_ENTRY(176), 805 I40E_PTT_UNUSED_ENTRY(177), 806 I40E_PTT_UNUSED_ENTRY(178), 807 I40E_PTT_UNUSED_ENTRY(179), 808 809 I40E_PTT_UNUSED_ENTRY(180), 810 I40E_PTT_UNUSED_ENTRY(181), 811 I40E_PTT_UNUSED_ENTRY(182), 812 I40E_PTT_UNUSED_ENTRY(183), 813 I40E_PTT_UNUSED_ENTRY(184), 814 I40E_PTT_UNUSED_ENTRY(185), 815 I40E_PTT_UNUSED_ENTRY(186), 816 I40E_PTT_UNUSED_ENTRY(187), 817 I40E_PTT_UNUSED_ENTRY(188), 818 I40E_PTT_UNUSED_ENTRY(189), 819 820 I40E_PTT_UNUSED_ENTRY(190), 821 I40E_PTT_UNUSED_ENTRY(191), 822 I40E_PTT_UNUSED_ENTRY(192), 823 I40E_PTT_UNUSED_ENTRY(193), 824 I40E_PTT_UNUSED_ENTRY(194), 825 I40E_PTT_UNUSED_ENTRY(195), 826 I40E_PTT_UNUSED_ENTRY(196), 827 I40E_PTT_UNUSED_ENTRY(197), 828 I40E_PTT_UNUSED_ENTRY(198), 829 I40E_PTT_UNUSED_ENTRY(199), 830 831 I40E_PTT_UNUSED_ENTRY(200), 832 I40E_PTT_UNUSED_ENTRY(201), 833 I40E_PTT_UNUSED_ENTRY(202), 834 I40E_PTT_UNUSED_ENTRY(203), 835 I40E_PTT_UNUSED_ENTRY(204), 836 I40E_PTT_UNUSED_ENTRY(205), 837 I40E_PTT_UNUSED_ENTRY(206), 838 I40E_PTT_UNUSED_ENTRY(207), 839 I40E_PTT_UNUSED_ENTRY(208), 840 I40E_PTT_UNUSED_ENTRY(209), 841 842 I40E_PTT_UNUSED_ENTRY(210), 843 I40E_PTT_UNUSED_ENTRY(211), 844 I40E_PTT_UNUSED_ENTRY(212), 845 I40E_PTT_UNUSED_ENTRY(213), 846 I40E_PTT_UNUSED_ENTRY(214), 847 I40E_PTT_UNUSED_ENTRY(215), 848 I40E_PTT_UNUSED_ENTRY(216), 849 I40E_PTT_UNUSED_ENTRY(217), 850 I40E_PTT_UNUSED_ENTRY(218), 851 I40E_PTT_UNUSED_ENTRY(219), 852 853 I40E_PTT_UNUSED_ENTRY(220), 854 I40E_PTT_UNUSED_ENTRY(221), 855 I40E_PTT_UNUSED_ENTRY(222), 856 I40E_PTT_UNUSED_ENTRY(223), 857 I40E_PTT_UNUSED_ENTRY(224), 858 I40E_PTT_UNUSED_ENTRY(225), 859 I40E_PTT_UNUSED_ENTRY(226), 860 I40E_PTT_UNUSED_ENTRY(227), 861 I40E_PTT_UNUSED_ENTRY(228), 862 I40E_PTT_UNUSED_ENTRY(229), 863 864 I40E_PTT_UNUSED_ENTRY(230), 865 I40E_PTT_UNUSED_ENTRY(231), 866 I40E_PTT_UNUSED_ENTRY(232), 867 I40E_PTT_UNUSED_ENTRY(233), 868 I40E_PTT_UNUSED_ENTRY(234), 869 I40E_PTT_UNUSED_ENTRY(235), 870 I40E_PTT_UNUSED_ENTRY(236), 871 I40E_PTT_UNUSED_ENTRY(237), 872 I40E_PTT_UNUSED_ENTRY(238), 873 I40E_PTT_UNUSED_ENTRY(239), 874 875 I40E_PTT_UNUSED_ENTRY(240), 876 I40E_PTT_UNUSED_ENTRY(241), 877 I40E_PTT_UNUSED_ENTRY(242), 878 I40E_PTT_UNUSED_ENTRY(243), 879 I40E_PTT_UNUSED_ENTRY(244), 880 I40E_PTT_UNUSED_ENTRY(245), 881 I40E_PTT_UNUSED_ENTRY(246), 882 I40E_PTT_UNUSED_ENTRY(247), 883 I40E_PTT_UNUSED_ENTRY(248), 884 I40E_PTT_UNUSED_ENTRY(249), 885 886 I40E_PTT_UNUSED_ENTRY(250), 887 I40E_PTT_UNUSED_ENTRY(251), 888 I40E_PTT_UNUSED_ENTRY(252), 889 I40E_PTT_UNUSED_ENTRY(253), 890 I40E_PTT_UNUSED_ENTRY(254), 891 I40E_PTT_UNUSED_ENTRY(255) 892 }; 893 894 /** 895 * i40e_init_shared_code - Initialize the shared code 896 * @hw: pointer to hardware structure 897 * 898 * This assigns the MAC type and PHY code and inits the NVM. 899 * Does not touch the hardware. This function must be called prior to any 900 * other function in the shared code. The i40e_hw structure should be 901 * memset to 0 prior to calling this function. The following fields in 902 * hw structure should be filled in prior to calling this function: 903 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 904 * subsystem_vendor_id, and revision_id 905 **/ 906 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 907 { 908 i40e_status status = 0; 909 u32 port, ari, func_rid; 910 911 i40e_set_mac_type(hw); 912 913 switch (hw->mac.type) { 914 case I40E_MAC_XL710: 915 case I40E_MAC_X722: 916 break; 917 default: 918 return I40E_ERR_DEVICE_NOT_SUPPORTED; 919 } 920 921 hw->phy.get_link_info = true; 922 923 /* Determine port number and PF number*/ 924 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 925 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 926 hw->port = (u8)port; 927 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 928 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 929 func_rid = rd32(hw, I40E_PF_FUNC_RID); 930 if (ari) 931 hw->pf_id = (u8)(func_rid & 0xff); 932 else 933 hw->pf_id = (u8)(func_rid & 0x7); 934 935 if (hw->mac.type == I40E_MAC_X722) 936 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 937 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 938 939 status = i40e_init_nvm(hw); 940 return status; 941 } 942 943 /** 944 * i40e_aq_mac_address_read - Retrieve the MAC addresses 945 * @hw: pointer to the hw struct 946 * @flags: a return indicator of what addresses were added to the addr store 947 * @addrs: the requestor's mac addr store 948 * @cmd_details: pointer to command details structure or NULL 949 **/ 950 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 951 u16 *flags, 952 struct i40e_aqc_mac_address_read_data *addrs, 953 struct i40e_asq_cmd_details *cmd_details) 954 { 955 struct i40e_aq_desc desc; 956 struct i40e_aqc_mac_address_read *cmd_data = 957 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 958 i40e_status status; 959 960 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 961 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 962 963 status = i40e_asq_send_command(hw, &desc, addrs, 964 sizeof(*addrs), cmd_details); 965 *flags = le16_to_cpu(cmd_data->command_flags); 966 967 return status; 968 } 969 970 /** 971 * i40e_aq_mac_address_write - Change the MAC addresses 972 * @hw: pointer to the hw struct 973 * @flags: indicates which MAC to be written 974 * @mac_addr: address to write 975 * @cmd_details: pointer to command details structure or NULL 976 **/ 977 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 978 u16 flags, u8 *mac_addr, 979 struct i40e_asq_cmd_details *cmd_details) 980 { 981 struct i40e_aq_desc desc; 982 struct i40e_aqc_mac_address_write *cmd_data = 983 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 984 i40e_status status; 985 986 i40e_fill_default_direct_cmd_desc(&desc, 987 i40e_aqc_opc_mac_address_write); 988 cmd_data->command_flags = cpu_to_le16(flags); 989 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 990 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 991 ((u32)mac_addr[3] << 16) | 992 ((u32)mac_addr[4] << 8) | 993 mac_addr[5]); 994 995 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 996 997 return status; 998 } 999 1000 /** 1001 * i40e_get_mac_addr - get MAC address 1002 * @hw: pointer to the HW structure 1003 * @mac_addr: pointer to MAC address 1004 * 1005 * Reads the adapter's MAC address from register 1006 **/ 1007 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1008 { 1009 struct i40e_aqc_mac_address_read_data addrs; 1010 i40e_status status; 1011 u16 flags = 0; 1012 1013 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1014 1015 if (flags & I40E_AQC_LAN_ADDR_VALID) 1016 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1017 1018 return status; 1019 } 1020 1021 /** 1022 * i40e_get_port_mac_addr - get Port MAC address 1023 * @hw: pointer to the HW structure 1024 * @mac_addr: pointer to Port MAC address 1025 * 1026 * Reads the adapter's Port MAC address 1027 **/ 1028 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1029 { 1030 struct i40e_aqc_mac_address_read_data addrs; 1031 i40e_status status; 1032 u16 flags = 0; 1033 1034 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1035 if (status) 1036 return status; 1037 1038 if (flags & I40E_AQC_PORT_ADDR_VALID) 1039 ether_addr_copy(mac_addr, addrs.port_mac); 1040 else 1041 status = I40E_ERR_INVALID_MAC_ADDR; 1042 1043 return status; 1044 } 1045 1046 /** 1047 * i40e_pre_tx_queue_cfg - pre tx queue configure 1048 * @hw: pointer to the HW structure 1049 * @queue: target PF queue index 1050 * @enable: state change request 1051 * 1052 * Handles hw requirement to indicate intention to enable 1053 * or disable target queue. 1054 **/ 1055 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1056 { 1057 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1058 u32 reg_block = 0; 1059 u32 reg_val; 1060 1061 if (abs_queue_idx >= 128) { 1062 reg_block = abs_queue_idx / 128; 1063 abs_queue_idx %= 128; 1064 } 1065 1066 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1067 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1068 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1069 1070 if (enable) 1071 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1072 else 1073 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1074 1075 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1076 } 1077 1078 /** 1079 * i40e_read_pba_string - Reads part number string from EEPROM 1080 * @hw: pointer to hardware structure 1081 * @pba_num: stores the part number string from the EEPROM 1082 * @pba_num_size: part number string buffer length 1083 * 1084 * Reads the part number string from the EEPROM. 1085 **/ 1086 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1087 u32 pba_num_size) 1088 { 1089 i40e_status status = 0; 1090 u16 pba_word = 0; 1091 u16 pba_size = 0; 1092 u16 pba_ptr = 0; 1093 u16 i = 0; 1094 1095 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1096 if (status || (pba_word != 0xFAFA)) { 1097 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1098 return status; 1099 } 1100 1101 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1102 if (status) { 1103 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1104 return status; 1105 } 1106 1107 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1108 if (status) { 1109 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1110 return status; 1111 } 1112 1113 /* Subtract one to get PBA word count (PBA Size word is included in 1114 * total size) 1115 */ 1116 pba_size--; 1117 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1118 hw_dbg(hw, "Buffer to small for PBA data.\n"); 1119 return I40E_ERR_PARAM; 1120 } 1121 1122 for (i = 0; i < pba_size; i++) { 1123 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1124 if (status) { 1125 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1126 return status; 1127 } 1128 1129 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1130 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1131 } 1132 pba_num[(pba_size * 2)] = '\0'; 1133 1134 return status; 1135 } 1136 1137 /** 1138 * i40e_get_media_type - Gets media type 1139 * @hw: pointer to the hardware structure 1140 **/ 1141 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1142 { 1143 enum i40e_media_type media; 1144 1145 switch (hw->phy.link_info.phy_type) { 1146 case I40E_PHY_TYPE_10GBASE_SR: 1147 case I40E_PHY_TYPE_10GBASE_LR: 1148 case I40E_PHY_TYPE_1000BASE_SX: 1149 case I40E_PHY_TYPE_1000BASE_LX: 1150 case I40E_PHY_TYPE_40GBASE_SR4: 1151 case I40E_PHY_TYPE_40GBASE_LR4: 1152 case I40E_PHY_TYPE_25GBASE_LR: 1153 case I40E_PHY_TYPE_25GBASE_SR: 1154 media = I40E_MEDIA_TYPE_FIBER; 1155 break; 1156 case I40E_PHY_TYPE_100BASE_TX: 1157 case I40E_PHY_TYPE_1000BASE_T: 1158 case I40E_PHY_TYPE_2_5GBASE_T: 1159 case I40E_PHY_TYPE_5GBASE_T: 1160 case I40E_PHY_TYPE_10GBASE_T: 1161 media = I40E_MEDIA_TYPE_BASET; 1162 break; 1163 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1164 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1165 case I40E_PHY_TYPE_10GBASE_CR1: 1166 case I40E_PHY_TYPE_40GBASE_CR4: 1167 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1168 case I40E_PHY_TYPE_40GBASE_AOC: 1169 case I40E_PHY_TYPE_10GBASE_AOC: 1170 case I40E_PHY_TYPE_25GBASE_CR: 1171 case I40E_PHY_TYPE_25GBASE_AOC: 1172 case I40E_PHY_TYPE_25GBASE_ACC: 1173 media = I40E_MEDIA_TYPE_DA; 1174 break; 1175 case I40E_PHY_TYPE_1000BASE_KX: 1176 case I40E_PHY_TYPE_10GBASE_KX4: 1177 case I40E_PHY_TYPE_10GBASE_KR: 1178 case I40E_PHY_TYPE_40GBASE_KR4: 1179 case I40E_PHY_TYPE_20GBASE_KR2: 1180 case I40E_PHY_TYPE_25GBASE_KR: 1181 media = I40E_MEDIA_TYPE_BACKPLANE; 1182 break; 1183 case I40E_PHY_TYPE_SGMII: 1184 case I40E_PHY_TYPE_XAUI: 1185 case I40E_PHY_TYPE_XFI: 1186 case I40E_PHY_TYPE_XLAUI: 1187 case I40E_PHY_TYPE_XLPPI: 1188 default: 1189 media = I40E_MEDIA_TYPE_UNKNOWN; 1190 break; 1191 } 1192 1193 return media; 1194 } 1195 1196 /** 1197 * i40e_poll_globr - Poll for Global Reset completion 1198 * @hw: pointer to the hardware structure 1199 * @retry_limit: how many times to retry before failure 1200 **/ 1201 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1202 u32 retry_limit) 1203 { 1204 u32 cnt, reg = 0; 1205 1206 for (cnt = 0; cnt < retry_limit; cnt++) { 1207 reg = rd32(hw, I40E_GLGEN_RSTAT); 1208 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1209 return 0; 1210 msleep(100); 1211 } 1212 1213 hw_dbg(hw, "Global reset failed.\n"); 1214 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1215 1216 return I40E_ERR_RESET_FAILED; 1217 } 1218 1219 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1220 #define I40E_PF_RESET_WAIT_COUNT 200 1221 /** 1222 * i40e_pf_reset - Reset the PF 1223 * @hw: pointer to the hardware structure 1224 * 1225 * Assuming someone else has triggered a global reset, 1226 * assure the global reset is complete and then reset the PF 1227 **/ 1228 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1229 { 1230 u32 cnt = 0; 1231 u32 cnt1 = 0; 1232 u32 reg = 0; 1233 u32 grst_del; 1234 1235 /* Poll for Global Reset steady state in case of recent GRST. 1236 * The grst delay value is in 100ms units, and we'll wait a 1237 * couple counts longer to be sure we don't just miss the end. 1238 */ 1239 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1240 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1241 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1242 1243 /* It can take upto 15 secs for GRST steady state. 1244 * Bump it to 16 secs max to be safe. 1245 */ 1246 grst_del = grst_del * 20; 1247 1248 for (cnt = 0; cnt < grst_del; cnt++) { 1249 reg = rd32(hw, I40E_GLGEN_RSTAT); 1250 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1251 break; 1252 msleep(100); 1253 } 1254 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1255 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1256 return I40E_ERR_RESET_FAILED; 1257 } 1258 1259 /* Now Wait for the FW to be ready */ 1260 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1261 reg = rd32(hw, I40E_GLNVM_ULD); 1262 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1263 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1264 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1265 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1266 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1267 break; 1268 } 1269 usleep_range(10000, 20000); 1270 } 1271 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1272 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1273 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1274 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1275 return I40E_ERR_RESET_FAILED; 1276 } 1277 1278 /* If there was a Global Reset in progress when we got here, 1279 * we don't need to do the PF Reset 1280 */ 1281 if (!cnt) { 1282 u32 reg2 = 0; 1283 if (hw->revision_id == 0) 1284 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1285 else 1286 cnt = I40E_PF_RESET_WAIT_COUNT; 1287 reg = rd32(hw, I40E_PFGEN_CTRL); 1288 wr32(hw, I40E_PFGEN_CTRL, 1289 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1290 for (; cnt; cnt--) { 1291 reg = rd32(hw, I40E_PFGEN_CTRL); 1292 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1293 break; 1294 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1295 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1296 break; 1297 usleep_range(1000, 2000); 1298 } 1299 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1300 if (i40e_poll_globr(hw, grst_del)) 1301 return I40E_ERR_RESET_FAILED; 1302 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1303 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1304 return I40E_ERR_RESET_FAILED; 1305 } 1306 } 1307 1308 i40e_clear_pxe_mode(hw); 1309 1310 return 0; 1311 } 1312 1313 /** 1314 * i40e_clear_hw - clear out any left over hw state 1315 * @hw: pointer to the hw struct 1316 * 1317 * Clear queues and interrupts, typically called at init time, 1318 * but after the capabilities have been found so we know how many 1319 * queues and msix vectors have been allocated. 1320 **/ 1321 void i40e_clear_hw(struct i40e_hw *hw) 1322 { 1323 u32 num_queues, base_queue; 1324 u32 num_pf_int; 1325 u32 num_vf_int; 1326 u32 num_vfs; 1327 u32 i, j; 1328 u32 val; 1329 u32 eol = 0x7ff; 1330 1331 /* get number of interrupts, queues, and VFs */ 1332 val = rd32(hw, I40E_GLPCI_CNF2); 1333 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1334 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1335 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1336 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1337 1338 val = rd32(hw, I40E_PFLAN_QALLOC); 1339 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1340 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1341 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1342 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1343 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1344 num_queues = (j - base_queue) + 1; 1345 else 1346 num_queues = 0; 1347 1348 val = rd32(hw, I40E_PF_VT_PFALLOC); 1349 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1350 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1351 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1352 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1353 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1354 num_vfs = (j - i) + 1; 1355 else 1356 num_vfs = 0; 1357 1358 /* stop all the interrupts */ 1359 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1360 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1361 for (i = 0; i < num_pf_int - 2; i++) 1362 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1363 1364 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1365 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1366 wr32(hw, I40E_PFINT_LNKLST0, val); 1367 for (i = 0; i < num_pf_int - 2; i++) 1368 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1369 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1370 for (i = 0; i < num_vfs; i++) 1371 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1372 for (i = 0; i < num_vf_int - 2; i++) 1373 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1374 1375 /* warn the HW of the coming Tx disables */ 1376 for (i = 0; i < num_queues; i++) { 1377 u32 abs_queue_idx = base_queue + i; 1378 u32 reg_block = 0; 1379 1380 if (abs_queue_idx >= 128) { 1381 reg_block = abs_queue_idx / 128; 1382 abs_queue_idx %= 128; 1383 } 1384 1385 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1386 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1387 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1388 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1389 1390 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1391 } 1392 udelay(400); 1393 1394 /* stop all the queues */ 1395 for (i = 0; i < num_queues; i++) { 1396 wr32(hw, I40E_QINT_TQCTL(i), 0); 1397 wr32(hw, I40E_QTX_ENA(i), 0); 1398 wr32(hw, I40E_QINT_RQCTL(i), 0); 1399 wr32(hw, I40E_QRX_ENA(i), 0); 1400 } 1401 1402 /* short wait for all queue disables to settle */ 1403 udelay(50); 1404 } 1405 1406 /** 1407 * i40e_clear_pxe_mode - clear pxe operations mode 1408 * @hw: pointer to the hw struct 1409 * 1410 * Make sure all PXE mode settings are cleared, including things 1411 * like descriptor fetch/write-back mode. 1412 **/ 1413 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1414 { 1415 u32 reg; 1416 1417 if (i40e_check_asq_alive(hw)) 1418 i40e_aq_clear_pxe_mode(hw, NULL); 1419 1420 /* Clear single descriptor fetch/write-back mode */ 1421 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1422 1423 if (hw->revision_id == 0) { 1424 /* As a work around clear PXE_MODE instead of setting it */ 1425 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1426 } else { 1427 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1428 } 1429 } 1430 1431 /** 1432 * i40e_led_is_mine - helper to find matching led 1433 * @hw: pointer to the hw struct 1434 * @idx: index into GPIO registers 1435 * 1436 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1437 */ 1438 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1439 { 1440 u32 gpio_val = 0; 1441 u32 port; 1442 1443 if (!hw->func_caps.led[idx]) 1444 return 0; 1445 1446 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1447 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1448 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1449 1450 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1451 * if it is not our port then ignore 1452 */ 1453 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1454 (port != hw->port)) 1455 return 0; 1456 1457 return gpio_val; 1458 } 1459 1460 #define I40E_COMBINED_ACTIVITY 0xA 1461 #define I40E_FILTER_ACTIVITY 0xE 1462 #define I40E_LINK_ACTIVITY 0xC 1463 #define I40E_MAC_ACTIVITY 0xD 1464 #define I40E_LED0 22 1465 1466 /** 1467 * i40e_led_get - return current on/off mode 1468 * @hw: pointer to the hw struct 1469 * 1470 * The value returned is the 'mode' field as defined in the 1471 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1472 * values are variations of possible behaviors relating to 1473 * blink, link, and wire. 1474 **/ 1475 u32 i40e_led_get(struct i40e_hw *hw) 1476 { 1477 u32 mode = 0; 1478 int i; 1479 1480 /* as per the documentation GPIO 22-29 are the LED 1481 * GPIO pins named LED0..LED7 1482 */ 1483 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1484 u32 gpio_val = i40e_led_is_mine(hw, i); 1485 1486 if (!gpio_val) 1487 continue; 1488 1489 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1490 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1491 break; 1492 } 1493 1494 return mode; 1495 } 1496 1497 /** 1498 * i40e_led_set - set new on/off mode 1499 * @hw: pointer to the hw struct 1500 * @mode: 0=off, 0xf=on (else see manual for mode details) 1501 * @blink: true if the LED should blink when on, false if steady 1502 * 1503 * if this function is used to turn on the blink it should 1504 * be used to disable the blink when restoring the original state. 1505 **/ 1506 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1507 { 1508 int i; 1509 1510 if (mode & 0xfffffff0) 1511 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1512 1513 /* as per the documentation GPIO 22-29 are the LED 1514 * GPIO pins named LED0..LED7 1515 */ 1516 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1517 u32 gpio_val = i40e_led_is_mine(hw, i); 1518 1519 if (!gpio_val) 1520 continue; 1521 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1522 /* this & is a bit of paranoia, but serves as a range check */ 1523 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1524 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1525 1526 if (blink) 1527 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1528 else 1529 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1530 1531 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1532 break; 1533 } 1534 } 1535 1536 /* Admin command wrappers */ 1537 1538 /** 1539 * i40e_aq_get_phy_capabilities 1540 * @hw: pointer to the hw struct 1541 * @abilities: structure for PHY capabilities to be filled 1542 * @qualified_modules: report Qualified Modules 1543 * @report_init: report init capabilities (active are default) 1544 * @cmd_details: pointer to command details structure or NULL 1545 * 1546 * Returns the various PHY abilities supported on the Port. 1547 **/ 1548 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1549 bool qualified_modules, bool report_init, 1550 struct i40e_aq_get_phy_abilities_resp *abilities, 1551 struct i40e_asq_cmd_details *cmd_details) 1552 { 1553 struct i40e_aq_desc desc; 1554 i40e_status status; 1555 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1556 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1557 1558 if (!abilities) 1559 return I40E_ERR_PARAM; 1560 1561 do { 1562 i40e_fill_default_direct_cmd_desc(&desc, 1563 i40e_aqc_opc_get_phy_abilities); 1564 1565 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1566 if (abilities_size > I40E_AQ_LARGE_BUF) 1567 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1568 1569 if (qualified_modules) 1570 desc.params.external.param0 |= 1571 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1572 1573 if (report_init) 1574 desc.params.external.param0 |= 1575 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1576 1577 status = i40e_asq_send_command(hw, &desc, abilities, 1578 abilities_size, cmd_details); 1579 1580 if (status) 1581 break; 1582 1583 if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { 1584 status = I40E_ERR_UNKNOWN_PHY; 1585 break; 1586 } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { 1587 usleep_range(1000, 2000); 1588 total_delay++; 1589 status = I40E_ERR_TIMEOUT; 1590 } 1591 } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && 1592 (total_delay < max_delay)); 1593 1594 if (status) 1595 return status; 1596 1597 if (report_init) { 1598 if (hw->mac.type == I40E_MAC_XL710 && 1599 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1600 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1601 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1602 } else { 1603 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1604 hw->phy.phy_types |= 1605 ((u64)abilities->phy_type_ext << 32); 1606 } 1607 } 1608 1609 return status; 1610 } 1611 1612 /** 1613 * i40e_aq_set_phy_config 1614 * @hw: pointer to the hw struct 1615 * @config: structure with PHY configuration to be set 1616 * @cmd_details: pointer to command details structure or NULL 1617 * 1618 * Set the various PHY configuration parameters 1619 * supported on the Port.One or more of the Set PHY config parameters may be 1620 * ignored in an MFP mode as the PF may not have the privilege to set some 1621 * of the PHY Config parameters. This status will be indicated by the 1622 * command response. 1623 **/ 1624 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1625 struct i40e_aq_set_phy_config *config, 1626 struct i40e_asq_cmd_details *cmd_details) 1627 { 1628 struct i40e_aq_desc desc; 1629 struct i40e_aq_set_phy_config *cmd = 1630 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1631 enum i40e_status_code status; 1632 1633 if (!config) 1634 return I40E_ERR_PARAM; 1635 1636 i40e_fill_default_direct_cmd_desc(&desc, 1637 i40e_aqc_opc_set_phy_config); 1638 1639 *cmd = *config; 1640 1641 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1642 1643 return status; 1644 } 1645 1646 /** 1647 * i40e_set_fc 1648 * @hw: pointer to the hw struct 1649 * @aq_failures: buffer to return AdminQ failure information 1650 * @atomic_restart: whether to enable atomic link restart 1651 * 1652 * Set the requested flow control mode using set_phy_config. 1653 **/ 1654 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1655 bool atomic_restart) 1656 { 1657 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1658 struct i40e_aq_get_phy_abilities_resp abilities; 1659 struct i40e_aq_set_phy_config config; 1660 enum i40e_status_code status; 1661 u8 pause_mask = 0x0; 1662 1663 *aq_failures = 0x0; 1664 1665 switch (fc_mode) { 1666 case I40E_FC_FULL: 1667 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1668 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1669 break; 1670 case I40E_FC_RX_PAUSE: 1671 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1672 break; 1673 case I40E_FC_TX_PAUSE: 1674 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1675 break; 1676 default: 1677 break; 1678 } 1679 1680 /* Get the current phy config */ 1681 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1682 NULL); 1683 if (status) { 1684 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1685 return status; 1686 } 1687 1688 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1689 /* clear the old pause settings */ 1690 config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1691 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1692 /* set the new abilities */ 1693 config.abilities |= pause_mask; 1694 /* If the abilities have changed, then set the new config */ 1695 if (config.abilities != abilities.abilities) { 1696 /* Auto restart link so settings take effect */ 1697 if (atomic_restart) 1698 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1699 /* Copy over all the old settings */ 1700 config.phy_type = abilities.phy_type; 1701 config.phy_type_ext = abilities.phy_type_ext; 1702 config.link_speed = abilities.link_speed; 1703 config.eee_capability = abilities.eee_capability; 1704 config.eeer = abilities.eeer_val; 1705 config.low_power_ctrl = abilities.d3_lpan; 1706 config.fec_config = abilities.fec_cfg_curr_mod_ext_info & 1707 I40E_AQ_PHY_FEC_CONFIG_MASK; 1708 status = i40e_aq_set_phy_config(hw, &config, NULL); 1709 1710 if (status) 1711 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1712 } 1713 /* Update the link info */ 1714 status = i40e_update_link_info(hw); 1715 if (status) { 1716 /* Wait a little bit (on 40G cards it sometimes takes a really 1717 * long time for link to come back from the atomic reset) 1718 * and try once more 1719 */ 1720 msleep(1000); 1721 status = i40e_update_link_info(hw); 1722 } 1723 if (status) 1724 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1725 1726 return status; 1727 } 1728 1729 /** 1730 * i40e_aq_clear_pxe_mode 1731 * @hw: pointer to the hw struct 1732 * @cmd_details: pointer to command details structure or NULL 1733 * 1734 * Tell the firmware that the driver is taking over from PXE 1735 **/ 1736 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1737 struct i40e_asq_cmd_details *cmd_details) 1738 { 1739 i40e_status status; 1740 struct i40e_aq_desc desc; 1741 struct i40e_aqc_clear_pxe *cmd = 1742 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1743 1744 i40e_fill_default_direct_cmd_desc(&desc, 1745 i40e_aqc_opc_clear_pxe_mode); 1746 1747 cmd->rx_cnt = 0x2; 1748 1749 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1750 1751 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1752 1753 return status; 1754 } 1755 1756 /** 1757 * i40e_aq_set_link_restart_an 1758 * @hw: pointer to the hw struct 1759 * @enable_link: if true: enable link, if false: disable link 1760 * @cmd_details: pointer to command details structure or NULL 1761 * 1762 * Sets up the link and restarts the Auto-Negotiation over the link. 1763 **/ 1764 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1765 bool enable_link, 1766 struct i40e_asq_cmd_details *cmd_details) 1767 { 1768 struct i40e_aq_desc desc; 1769 struct i40e_aqc_set_link_restart_an *cmd = 1770 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1771 i40e_status status; 1772 1773 i40e_fill_default_direct_cmd_desc(&desc, 1774 i40e_aqc_opc_set_link_restart_an); 1775 1776 cmd->command = I40E_AQ_PHY_RESTART_AN; 1777 if (enable_link) 1778 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1779 else 1780 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1781 1782 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1783 1784 return status; 1785 } 1786 1787 /** 1788 * i40e_aq_get_link_info 1789 * @hw: pointer to the hw struct 1790 * @enable_lse: enable/disable LinkStatusEvent reporting 1791 * @link: pointer to link status structure - optional 1792 * @cmd_details: pointer to command details structure or NULL 1793 * 1794 * Returns the link status of the adapter. 1795 **/ 1796 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1797 bool enable_lse, struct i40e_link_status *link, 1798 struct i40e_asq_cmd_details *cmd_details) 1799 { 1800 struct i40e_aq_desc desc; 1801 struct i40e_aqc_get_link_status *resp = 1802 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1803 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1804 i40e_status status; 1805 bool tx_pause, rx_pause; 1806 u16 command_flags; 1807 1808 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1809 1810 if (enable_lse) 1811 command_flags = I40E_AQ_LSE_ENABLE; 1812 else 1813 command_flags = I40E_AQ_LSE_DISABLE; 1814 resp->command_flags = cpu_to_le16(command_flags); 1815 1816 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1817 1818 if (status) 1819 goto aq_get_link_info_exit; 1820 1821 /* save off old link status information */ 1822 hw->phy.link_info_old = *hw_link_info; 1823 1824 /* update link status */ 1825 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1826 hw->phy.media_type = i40e_get_media_type(hw); 1827 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1828 hw_link_info->link_info = resp->link_info; 1829 hw_link_info->an_info = resp->an_info; 1830 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1831 I40E_AQ_CONFIG_FEC_RS_ENA); 1832 hw_link_info->ext_info = resp->ext_info; 1833 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1834 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1835 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1836 1837 /* update fc info */ 1838 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1839 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1840 if (tx_pause & rx_pause) 1841 hw->fc.current_mode = I40E_FC_FULL; 1842 else if (tx_pause) 1843 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1844 else if (rx_pause) 1845 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1846 else 1847 hw->fc.current_mode = I40E_FC_NONE; 1848 1849 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1850 hw_link_info->crc_enable = true; 1851 else 1852 hw_link_info->crc_enable = false; 1853 1854 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1855 hw_link_info->lse_enable = true; 1856 else 1857 hw_link_info->lse_enable = false; 1858 1859 if ((hw->mac.type == I40E_MAC_XL710) && 1860 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1861 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1862 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1863 1864 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 1865 __le32 tmp; 1866 1867 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1868 hw->phy.phy_types = le32_to_cpu(tmp); 1869 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1870 } 1871 1872 /* save link status information */ 1873 if (link) 1874 *link = *hw_link_info; 1875 1876 /* flag cleared so helper functions don't call AQ again */ 1877 hw->phy.get_link_info = false; 1878 1879 aq_get_link_info_exit: 1880 return status; 1881 } 1882 1883 /** 1884 * i40e_aq_set_phy_int_mask 1885 * @hw: pointer to the hw struct 1886 * @mask: interrupt mask to be set 1887 * @cmd_details: pointer to command details structure or NULL 1888 * 1889 * Set link interrupt mask. 1890 **/ 1891 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1892 u16 mask, 1893 struct i40e_asq_cmd_details *cmd_details) 1894 { 1895 struct i40e_aq_desc desc; 1896 struct i40e_aqc_set_phy_int_mask *cmd = 1897 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1898 i40e_status status; 1899 1900 i40e_fill_default_direct_cmd_desc(&desc, 1901 i40e_aqc_opc_set_phy_int_mask); 1902 1903 cmd->event_mask = cpu_to_le16(mask); 1904 1905 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1906 1907 return status; 1908 } 1909 1910 /** 1911 * i40e_aq_set_phy_debug 1912 * @hw: pointer to the hw struct 1913 * @cmd_flags: debug command flags 1914 * @cmd_details: pointer to command details structure or NULL 1915 * 1916 * Reset the external PHY. 1917 **/ 1918 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1919 struct i40e_asq_cmd_details *cmd_details) 1920 { 1921 struct i40e_aq_desc desc; 1922 struct i40e_aqc_set_phy_debug *cmd = 1923 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1924 i40e_status status; 1925 1926 i40e_fill_default_direct_cmd_desc(&desc, 1927 i40e_aqc_opc_set_phy_debug); 1928 1929 cmd->command_flags = cmd_flags; 1930 1931 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1932 1933 return status; 1934 } 1935 1936 /** 1937 * i40e_aq_add_vsi 1938 * @hw: pointer to the hw struct 1939 * @vsi_ctx: pointer to a vsi context struct 1940 * @cmd_details: pointer to command details structure or NULL 1941 * 1942 * Add a VSI context to the hardware. 1943 **/ 1944 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1945 struct i40e_vsi_context *vsi_ctx, 1946 struct i40e_asq_cmd_details *cmd_details) 1947 { 1948 struct i40e_aq_desc desc; 1949 struct i40e_aqc_add_get_update_vsi *cmd = 1950 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1951 struct i40e_aqc_add_get_update_vsi_completion *resp = 1952 (struct i40e_aqc_add_get_update_vsi_completion *) 1953 &desc.params.raw; 1954 i40e_status status; 1955 1956 i40e_fill_default_direct_cmd_desc(&desc, 1957 i40e_aqc_opc_add_vsi); 1958 1959 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1960 cmd->connection_type = vsi_ctx->connection_type; 1961 cmd->vf_id = vsi_ctx->vf_num; 1962 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1963 1964 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1965 1966 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1967 sizeof(vsi_ctx->info), cmd_details); 1968 1969 if (status) 1970 goto aq_add_vsi_exit; 1971 1972 vsi_ctx->seid = le16_to_cpu(resp->seid); 1973 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1974 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1975 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1976 1977 aq_add_vsi_exit: 1978 return status; 1979 } 1980 1981 /** 1982 * i40e_aq_set_default_vsi 1983 * @hw: pointer to the hw struct 1984 * @seid: vsi number 1985 * @cmd_details: pointer to command details structure or NULL 1986 **/ 1987 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 1988 u16 seid, 1989 struct i40e_asq_cmd_details *cmd_details) 1990 { 1991 struct i40e_aq_desc desc; 1992 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1993 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1994 &desc.params.raw; 1995 i40e_status status; 1996 1997 i40e_fill_default_direct_cmd_desc(&desc, 1998 i40e_aqc_opc_set_vsi_promiscuous_modes); 1999 2000 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2001 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2002 cmd->seid = cpu_to_le16(seid); 2003 2004 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2005 2006 return status; 2007 } 2008 2009 /** 2010 * i40e_aq_clear_default_vsi 2011 * @hw: pointer to the hw struct 2012 * @seid: vsi number 2013 * @cmd_details: pointer to command details structure or NULL 2014 **/ 2015 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2016 u16 seid, 2017 struct i40e_asq_cmd_details *cmd_details) 2018 { 2019 struct i40e_aq_desc desc; 2020 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2021 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2022 &desc.params.raw; 2023 i40e_status status; 2024 2025 i40e_fill_default_direct_cmd_desc(&desc, 2026 i40e_aqc_opc_set_vsi_promiscuous_modes); 2027 2028 cmd->promiscuous_flags = cpu_to_le16(0); 2029 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2030 cmd->seid = cpu_to_le16(seid); 2031 2032 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2033 2034 return status; 2035 } 2036 2037 /** 2038 * i40e_aq_set_vsi_unicast_promiscuous 2039 * @hw: pointer to the hw struct 2040 * @seid: vsi number 2041 * @set: set unicast promiscuous enable/disable 2042 * @cmd_details: pointer to command details structure or NULL 2043 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2044 **/ 2045 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2046 u16 seid, bool set, 2047 struct i40e_asq_cmd_details *cmd_details, 2048 bool rx_only_promisc) 2049 { 2050 struct i40e_aq_desc desc; 2051 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2052 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2053 i40e_status status; 2054 u16 flags = 0; 2055 2056 i40e_fill_default_direct_cmd_desc(&desc, 2057 i40e_aqc_opc_set_vsi_promiscuous_modes); 2058 2059 if (set) { 2060 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2061 if (rx_only_promisc && 2062 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || 2063 (hw->aq.api_maj_ver > 1))) 2064 flags |= I40E_AQC_SET_VSI_PROMISC_TX; 2065 } 2066 2067 cmd->promiscuous_flags = cpu_to_le16(flags); 2068 2069 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2070 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || 2071 (hw->aq.api_maj_ver > 1)) 2072 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); 2073 2074 cmd->seid = cpu_to_le16(seid); 2075 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2076 2077 return status; 2078 } 2079 2080 /** 2081 * i40e_aq_set_vsi_multicast_promiscuous 2082 * @hw: pointer to the hw struct 2083 * @seid: vsi number 2084 * @set: set multicast promiscuous enable/disable 2085 * @cmd_details: pointer to command details structure or NULL 2086 **/ 2087 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2088 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2089 { 2090 struct i40e_aq_desc desc; 2091 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2092 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2093 i40e_status status; 2094 u16 flags = 0; 2095 2096 i40e_fill_default_direct_cmd_desc(&desc, 2097 i40e_aqc_opc_set_vsi_promiscuous_modes); 2098 2099 if (set) 2100 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2101 2102 cmd->promiscuous_flags = cpu_to_le16(flags); 2103 2104 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2105 2106 cmd->seid = cpu_to_le16(seid); 2107 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2108 2109 return status; 2110 } 2111 2112 /** 2113 * i40e_aq_set_vsi_mc_promisc_on_vlan 2114 * @hw: pointer to the hw struct 2115 * @seid: vsi number 2116 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2117 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2118 * @cmd_details: pointer to command details structure or NULL 2119 **/ 2120 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2121 u16 seid, bool enable, 2122 u16 vid, 2123 struct i40e_asq_cmd_details *cmd_details) 2124 { 2125 struct i40e_aq_desc desc; 2126 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2127 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2128 enum i40e_status_code status; 2129 u16 flags = 0; 2130 2131 i40e_fill_default_direct_cmd_desc(&desc, 2132 i40e_aqc_opc_set_vsi_promiscuous_modes); 2133 2134 if (enable) 2135 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2136 2137 cmd->promiscuous_flags = cpu_to_le16(flags); 2138 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2139 cmd->seid = cpu_to_le16(seid); 2140 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2141 2142 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2143 2144 return status; 2145 } 2146 2147 /** 2148 * i40e_aq_set_vsi_uc_promisc_on_vlan 2149 * @hw: pointer to the hw struct 2150 * @seid: vsi number 2151 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2152 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2153 * @cmd_details: pointer to command details structure or NULL 2154 **/ 2155 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2156 u16 seid, bool enable, 2157 u16 vid, 2158 struct i40e_asq_cmd_details *cmd_details) 2159 { 2160 struct i40e_aq_desc desc; 2161 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2162 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2163 enum i40e_status_code status; 2164 u16 flags = 0; 2165 2166 i40e_fill_default_direct_cmd_desc(&desc, 2167 i40e_aqc_opc_set_vsi_promiscuous_modes); 2168 2169 if (enable) 2170 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2171 2172 cmd->promiscuous_flags = cpu_to_le16(flags); 2173 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2174 cmd->seid = cpu_to_le16(seid); 2175 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2176 2177 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2178 2179 return status; 2180 } 2181 2182 /** 2183 * i40e_aq_set_vsi_bc_promisc_on_vlan 2184 * @hw: pointer to the hw struct 2185 * @seid: vsi number 2186 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2187 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2188 * @cmd_details: pointer to command details structure or NULL 2189 **/ 2190 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2191 u16 seid, bool enable, u16 vid, 2192 struct i40e_asq_cmd_details *cmd_details) 2193 { 2194 struct i40e_aq_desc desc; 2195 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2196 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2197 i40e_status status; 2198 u16 flags = 0; 2199 2200 i40e_fill_default_direct_cmd_desc(&desc, 2201 i40e_aqc_opc_set_vsi_promiscuous_modes); 2202 2203 if (enable) 2204 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2205 2206 cmd->promiscuous_flags = cpu_to_le16(flags); 2207 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2208 cmd->seid = cpu_to_le16(seid); 2209 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2210 2211 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2212 2213 return status; 2214 } 2215 2216 /** 2217 * i40e_aq_set_vsi_broadcast 2218 * @hw: pointer to the hw struct 2219 * @seid: vsi number 2220 * @set_filter: true to set filter, false to clear filter 2221 * @cmd_details: pointer to command details structure or NULL 2222 * 2223 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2224 **/ 2225 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2226 u16 seid, bool set_filter, 2227 struct i40e_asq_cmd_details *cmd_details) 2228 { 2229 struct i40e_aq_desc desc; 2230 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2231 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2232 i40e_status status; 2233 2234 i40e_fill_default_direct_cmd_desc(&desc, 2235 i40e_aqc_opc_set_vsi_promiscuous_modes); 2236 2237 if (set_filter) 2238 cmd->promiscuous_flags 2239 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2240 else 2241 cmd->promiscuous_flags 2242 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2243 2244 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2245 cmd->seid = cpu_to_le16(seid); 2246 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2247 2248 return status; 2249 } 2250 2251 /** 2252 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2253 * @hw: pointer to the hw struct 2254 * @seid: vsi number 2255 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2256 * @cmd_details: pointer to command details structure or NULL 2257 **/ 2258 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2259 u16 seid, bool enable, 2260 struct i40e_asq_cmd_details *cmd_details) 2261 { 2262 struct i40e_aq_desc desc; 2263 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2264 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2265 i40e_status status; 2266 u16 flags = 0; 2267 2268 i40e_fill_default_direct_cmd_desc(&desc, 2269 i40e_aqc_opc_set_vsi_promiscuous_modes); 2270 if (enable) 2271 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2272 2273 cmd->promiscuous_flags = cpu_to_le16(flags); 2274 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2275 cmd->seid = cpu_to_le16(seid); 2276 2277 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2278 2279 return status; 2280 } 2281 2282 /** 2283 * i40e_get_vsi_params - get VSI configuration info 2284 * @hw: pointer to the hw struct 2285 * @vsi_ctx: pointer to a vsi context struct 2286 * @cmd_details: pointer to command details structure or NULL 2287 **/ 2288 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2289 struct i40e_vsi_context *vsi_ctx, 2290 struct i40e_asq_cmd_details *cmd_details) 2291 { 2292 struct i40e_aq_desc desc; 2293 struct i40e_aqc_add_get_update_vsi *cmd = 2294 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2295 struct i40e_aqc_add_get_update_vsi_completion *resp = 2296 (struct i40e_aqc_add_get_update_vsi_completion *) 2297 &desc.params.raw; 2298 i40e_status status; 2299 2300 i40e_fill_default_direct_cmd_desc(&desc, 2301 i40e_aqc_opc_get_vsi_parameters); 2302 2303 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2304 2305 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2306 2307 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2308 sizeof(vsi_ctx->info), NULL); 2309 2310 if (status) 2311 goto aq_get_vsi_params_exit; 2312 2313 vsi_ctx->seid = le16_to_cpu(resp->seid); 2314 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2315 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2316 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2317 2318 aq_get_vsi_params_exit: 2319 return status; 2320 } 2321 2322 /** 2323 * i40e_aq_update_vsi_params 2324 * @hw: pointer to the hw struct 2325 * @vsi_ctx: pointer to a vsi context struct 2326 * @cmd_details: pointer to command details structure or NULL 2327 * 2328 * Update a VSI context. 2329 **/ 2330 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2331 struct i40e_vsi_context *vsi_ctx, 2332 struct i40e_asq_cmd_details *cmd_details) 2333 { 2334 struct i40e_aq_desc desc; 2335 struct i40e_aqc_add_get_update_vsi *cmd = 2336 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2337 struct i40e_aqc_add_get_update_vsi_completion *resp = 2338 (struct i40e_aqc_add_get_update_vsi_completion *) 2339 &desc.params.raw; 2340 i40e_status status; 2341 2342 i40e_fill_default_direct_cmd_desc(&desc, 2343 i40e_aqc_opc_update_vsi_parameters); 2344 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2345 2346 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2347 2348 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2349 sizeof(vsi_ctx->info), cmd_details); 2350 2351 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2352 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2353 2354 return status; 2355 } 2356 2357 /** 2358 * i40e_aq_get_switch_config 2359 * @hw: pointer to the hardware structure 2360 * @buf: pointer to the result buffer 2361 * @buf_size: length of input buffer 2362 * @start_seid: seid to start for the report, 0 == beginning 2363 * @cmd_details: pointer to command details structure or NULL 2364 * 2365 * Fill the buf with switch configuration returned from AdminQ command 2366 **/ 2367 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2368 struct i40e_aqc_get_switch_config_resp *buf, 2369 u16 buf_size, u16 *start_seid, 2370 struct i40e_asq_cmd_details *cmd_details) 2371 { 2372 struct i40e_aq_desc desc; 2373 struct i40e_aqc_switch_seid *scfg = 2374 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2375 i40e_status status; 2376 2377 i40e_fill_default_direct_cmd_desc(&desc, 2378 i40e_aqc_opc_get_switch_config); 2379 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2380 if (buf_size > I40E_AQ_LARGE_BUF) 2381 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2382 scfg->seid = cpu_to_le16(*start_seid); 2383 2384 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2385 *start_seid = le16_to_cpu(scfg->seid); 2386 2387 return status; 2388 } 2389 2390 /** 2391 * i40e_aq_set_switch_config 2392 * @hw: pointer to the hardware structure 2393 * @flags: bit flag values to set 2394 * @mode: cloud filter mode 2395 * @valid_flags: which bit flags to set 2396 * @mode: cloud filter mode 2397 * @cmd_details: pointer to command details structure or NULL 2398 * 2399 * Set switch configuration bits 2400 **/ 2401 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2402 u16 flags, 2403 u16 valid_flags, u8 mode, 2404 struct i40e_asq_cmd_details *cmd_details) 2405 { 2406 struct i40e_aq_desc desc; 2407 struct i40e_aqc_set_switch_config *scfg = 2408 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2409 enum i40e_status_code status; 2410 2411 i40e_fill_default_direct_cmd_desc(&desc, 2412 i40e_aqc_opc_set_switch_config); 2413 scfg->flags = cpu_to_le16(flags); 2414 scfg->valid_flags = cpu_to_le16(valid_flags); 2415 scfg->mode = mode; 2416 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2417 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2418 scfg->first_tag = cpu_to_le16(hw->first_tag); 2419 scfg->second_tag = cpu_to_le16(hw->second_tag); 2420 } 2421 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2422 2423 return status; 2424 } 2425 2426 /** 2427 * i40e_aq_get_firmware_version 2428 * @hw: pointer to the hw struct 2429 * @fw_major_version: firmware major version 2430 * @fw_minor_version: firmware minor version 2431 * @fw_build: firmware build number 2432 * @api_major_version: major queue version 2433 * @api_minor_version: minor queue version 2434 * @cmd_details: pointer to command details structure or NULL 2435 * 2436 * Get the firmware version from the admin queue commands 2437 **/ 2438 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2439 u16 *fw_major_version, u16 *fw_minor_version, 2440 u32 *fw_build, 2441 u16 *api_major_version, u16 *api_minor_version, 2442 struct i40e_asq_cmd_details *cmd_details) 2443 { 2444 struct i40e_aq_desc desc; 2445 struct i40e_aqc_get_version *resp = 2446 (struct i40e_aqc_get_version *)&desc.params.raw; 2447 i40e_status status; 2448 2449 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2450 2451 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2452 2453 if (!status) { 2454 if (fw_major_version) 2455 *fw_major_version = le16_to_cpu(resp->fw_major); 2456 if (fw_minor_version) 2457 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2458 if (fw_build) 2459 *fw_build = le32_to_cpu(resp->fw_build); 2460 if (api_major_version) 2461 *api_major_version = le16_to_cpu(resp->api_major); 2462 if (api_minor_version) 2463 *api_minor_version = le16_to_cpu(resp->api_minor); 2464 } 2465 2466 return status; 2467 } 2468 2469 /** 2470 * i40e_aq_send_driver_version 2471 * @hw: pointer to the hw struct 2472 * @dv: driver's major, minor version 2473 * @cmd_details: pointer to command details structure or NULL 2474 * 2475 * Send the driver version to the firmware 2476 **/ 2477 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2478 struct i40e_driver_version *dv, 2479 struct i40e_asq_cmd_details *cmd_details) 2480 { 2481 struct i40e_aq_desc desc; 2482 struct i40e_aqc_driver_version *cmd = 2483 (struct i40e_aqc_driver_version *)&desc.params.raw; 2484 i40e_status status; 2485 u16 len; 2486 2487 if (dv == NULL) 2488 return I40E_ERR_PARAM; 2489 2490 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2491 2492 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2493 cmd->driver_major_ver = dv->major_version; 2494 cmd->driver_minor_ver = dv->minor_version; 2495 cmd->driver_build_ver = dv->build_version; 2496 cmd->driver_subbuild_ver = dv->subbuild_version; 2497 2498 len = 0; 2499 while (len < sizeof(dv->driver_string) && 2500 (dv->driver_string[len] < 0x80) && 2501 dv->driver_string[len]) 2502 len++; 2503 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2504 len, cmd_details); 2505 2506 return status; 2507 } 2508 2509 /** 2510 * i40e_get_link_status - get status of the HW network link 2511 * @hw: pointer to the hw struct 2512 * @link_up: pointer to bool (true/false = linkup/linkdown) 2513 * 2514 * Variable link_up true if link is up, false if link is down. 2515 * The variable link_up is invalid if returned value of status != 0 2516 * 2517 * Side effect: LinkStatusEvent reporting becomes enabled 2518 **/ 2519 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2520 { 2521 i40e_status status = 0; 2522 2523 if (hw->phy.get_link_info) { 2524 status = i40e_update_link_info(hw); 2525 2526 if (status) 2527 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2528 status); 2529 } 2530 2531 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2532 2533 return status; 2534 } 2535 2536 /** 2537 * i40e_updatelink_status - update status of the HW network link 2538 * @hw: pointer to the hw struct 2539 **/ 2540 i40e_status i40e_update_link_info(struct i40e_hw *hw) 2541 { 2542 struct i40e_aq_get_phy_abilities_resp abilities; 2543 i40e_status status = 0; 2544 2545 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2546 if (status) 2547 return status; 2548 2549 /* extra checking needed to ensure link info to user is timely */ 2550 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2551 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2552 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2553 status = i40e_aq_get_phy_capabilities(hw, false, false, 2554 &abilities, NULL); 2555 if (status) 2556 return status; 2557 2558 hw->phy.link_info.req_fec_info = 2559 abilities.fec_cfg_curr_mod_ext_info & 2560 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); 2561 2562 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2563 sizeof(hw->phy.link_info.module_type)); 2564 } 2565 2566 return status; 2567 } 2568 2569 /** 2570 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2571 * @hw: pointer to the hw struct 2572 * @uplink_seid: the MAC or other gizmo SEID 2573 * @downlink_seid: the VSI SEID 2574 * @enabled_tc: bitmap of TCs to be enabled 2575 * @default_port: true for default port VSI, false for control port 2576 * @veb_seid: pointer to where to put the resulting VEB SEID 2577 * @enable_stats: true to turn on VEB stats 2578 * @cmd_details: pointer to command details structure or NULL 2579 * 2580 * This asks the FW to add a VEB between the uplink and downlink 2581 * elements. If the uplink SEID is 0, this will be a floating VEB. 2582 **/ 2583 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2584 u16 downlink_seid, u8 enabled_tc, 2585 bool default_port, u16 *veb_seid, 2586 bool enable_stats, 2587 struct i40e_asq_cmd_details *cmd_details) 2588 { 2589 struct i40e_aq_desc desc; 2590 struct i40e_aqc_add_veb *cmd = 2591 (struct i40e_aqc_add_veb *)&desc.params.raw; 2592 struct i40e_aqc_add_veb_completion *resp = 2593 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2594 i40e_status status; 2595 u16 veb_flags = 0; 2596 2597 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2598 if (!!uplink_seid != !!downlink_seid) 2599 return I40E_ERR_PARAM; 2600 2601 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2602 2603 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2604 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2605 cmd->enable_tcs = enabled_tc; 2606 if (!uplink_seid) 2607 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2608 if (default_port) 2609 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2610 else 2611 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2612 2613 /* reverse logic here: set the bitflag to disable the stats */ 2614 if (!enable_stats) 2615 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2616 2617 cmd->veb_flags = cpu_to_le16(veb_flags); 2618 2619 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2620 2621 if (!status && veb_seid) 2622 *veb_seid = le16_to_cpu(resp->veb_seid); 2623 2624 return status; 2625 } 2626 2627 /** 2628 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2629 * @hw: pointer to the hw struct 2630 * @veb_seid: the SEID of the VEB to query 2631 * @switch_id: the uplink switch id 2632 * @floating: set to true if the VEB is floating 2633 * @statistic_index: index of the stats counter block for this VEB 2634 * @vebs_used: number of VEB's used by function 2635 * @vebs_free: total VEB's not reserved by any function 2636 * @cmd_details: pointer to command details structure or NULL 2637 * 2638 * This retrieves the parameters for a particular VEB, specified by 2639 * uplink_seid, and returns them to the caller. 2640 **/ 2641 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2642 u16 veb_seid, u16 *switch_id, 2643 bool *floating, u16 *statistic_index, 2644 u16 *vebs_used, u16 *vebs_free, 2645 struct i40e_asq_cmd_details *cmd_details) 2646 { 2647 struct i40e_aq_desc desc; 2648 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2649 (struct i40e_aqc_get_veb_parameters_completion *) 2650 &desc.params.raw; 2651 i40e_status status; 2652 2653 if (veb_seid == 0) 2654 return I40E_ERR_PARAM; 2655 2656 i40e_fill_default_direct_cmd_desc(&desc, 2657 i40e_aqc_opc_get_veb_parameters); 2658 cmd_resp->seid = cpu_to_le16(veb_seid); 2659 2660 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2661 if (status) 2662 goto get_veb_exit; 2663 2664 if (switch_id) 2665 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2666 if (statistic_index) 2667 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2668 if (vebs_used) 2669 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2670 if (vebs_free) 2671 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2672 if (floating) { 2673 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2674 2675 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2676 *floating = true; 2677 else 2678 *floating = false; 2679 } 2680 2681 get_veb_exit: 2682 return status; 2683 } 2684 2685 /** 2686 * i40e_aq_add_macvlan 2687 * @hw: pointer to the hw struct 2688 * @seid: VSI for the mac address 2689 * @mv_list: list of macvlans to be added 2690 * @count: length of the list 2691 * @cmd_details: pointer to command details structure or NULL 2692 * 2693 * Add MAC/VLAN addresses to the HW filtering 2694 **/ 2695 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2696 struct i40e_aqc_add_macvlan_element_data *mv_list, 2697 u16 count, struct i40e_asq_cmd_details *cmd_details) 2698 { 2699 struct i40e_aq_desc desc; 2700 struct i40e_aqc_macvlan *cmd = 2701 (struct i40e_aqc_macvlan *)&desc.params.raw; 2702 i40e_status status; 2703 u16 buf_size; 2704 int i; 2705 2706 if (count == 0 || !mv_list || !hw) 2707 return I40E_ERR_PARAM; 2708 2709 buf_size = count * sizeof(*mv_list); 2710 2711 /* prep the rest of the request */ 2712 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2713 cmd->num_addresses = cpu_to_le16(count); 2714 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2715 cmd->seid[1] = 0; 2716 cmd->seid[2] = 0; 2717 2718 for (i = 0; i < count; i++) 2719 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2720 mv_list[i].flags |= 2721 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2722 2723 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2724 if (buf_size > I40E_AQ_LARGE_BUF) 2725 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2726 2727 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2728 cmd_details); 2729 2730 return status; 2731 } 2732 2733 /** 2734 * i40e_aq_remove_macvlan 2735 * @hw: pointer to the hw struct 2736 * @seid: VSI for the mac address 2737 * @mv_list: list of macvlans to be removed 2738 * @count: length of the list 2739 * @cmd_details: pointer to command details structure or NULL 2740 * 2741 * Remove MAC/VLAN addresses from the HW filtering 2742 **/ 2743 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2744 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2745 u16 count, struct i40e_asq_cmd_details *cmd_details) 2746 { 2747 struct i40e_aq_desc desc; 2748 struct i40e_aqc_macvlan *cmd = 2749 (struct i40e_aqc_macvlan *)&desc.params.raw; 2750 i40e_status status; 2751 u16 buf_size; 2752 2753 if (count == 0 || !mv_list || !hw) 2754 return I40E_ERR_PARAM; 2755 2756 buf_size = count * sizeof(*mv_list); 2757 2758 /* prep the rest of the request */ 2759 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2760 cmd->num_addresses = cpu_to_le16(count); 2761 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2762 cmd->seid[1] = 0; 2763 cmd->seid[2] = 0; 2764 2765 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2766 if (buf_size > I40E_AQ_LARGE_BUF) 2767 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2768 2769 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2770 cmd_details); 2771 2772 return status; 2773 } 2774 2775 /** 2776 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2777 * @hw: pointer to the hw struct 2778 * @opcode: AQ opcode for add or delete mirror rule 2779 * @sw_seid: Switch SEID (to which rule refers) 2780 * @rule_type: Rule Type (ingress/egress/VLAN) 2781 * @id: Destination VSI SEID or Rule ID 2782 * @count: length of the list 2783 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2784 * @cmd_details: pointer to command details structure or NULL 2785 * @rule_id: Rule ID returned from FW 2786 * @rules_used: Number of rules used in internal switch 2787 * @rules_free: Number of rules free in internal switch 2788 * 2789 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2790 * VEBs/VEPA elements only 2791 **/ 2792 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2793 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2794 u16 count, __le16 *mr_list, 2795 struct i40e_asq_cmd_details *cmd_details, 2796 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2797 { 2798 struct i40e_aq_desc desc; 2799 struct i40e_aqc_add_delete_mirror_rule *cmd = 2800 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2801 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2802 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2803 i40e_status status; 2804 u16 buf_size; 2805 2806 buf_size = count * sizeof(*mr_list); 2807 2808 /* prep the rest of the request */ 2809 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2810 cmd->seid = cpu_to_le16(sw_seid); 2811 cmd->rule_type = cpu_to_le16(rule_type & 2812 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2813 cmd->num_entries = cpu_to_le16(count); 2814 /* Dest VSI for add, rule_id for delete */ 2815 cmd->destination = cpu_to_le16(id); 2816 if (mr_list) { 2817 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2818 I40E_AQ_FLAG_RD)); 2819 if (buf_size > I40E_AQ_LARGE_BUF) 2820 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2821 } 2822 2823 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2824 cmd_details); 2825 if (!status || 2826 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2827 if (rule_id) 2828 *rule_id = le16_to_cpu(resp->rule_id); 2829 if (rules_used) 2830 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2831 if (rules_free) 2832 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2833 } 2834 return status; 2835 } 2836 2837 /** 2838 * i40e_aq_add_mirrorrule - add a mirror rule 2839 * @hw: pointer to the hw struct 2840 * @sw_seid: Switch SEID (to which rule refers) 2841 * @rule_type: Rule Type (ingress/egress/VLAN) 2842 * @dest_vsi: SEID of VSI to which packets will be mirrored 2843 * @count: length of the list 2844 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2845 * @cmd_details: pointer to command details structure or NULL 2846 * @rule_id: Rule ID returned from FW 2847 * @rules_used: Number of rules used in internal switch 2848 * @rules_free: Number of rules free in internal switch 2849 * 2850 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2851 **/ 2852 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2853 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2854 struct i40e_asq_cmd_details *cmd_details, 2855 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2856 { 2857 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2858 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2859 if (count == 0 || !mr_list) 2860 return I40E_ERR_PARAM; 2861 } 2862 2863 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2864 rule_type, dest_vsi, count, mr_list, 2865 cmd_details, rule_id, rules_used, rules_free); 2866 } 2867 2868 /** 2869 * i40e_aq_delete_mirrorrule - delete a mirror rule 2870 * @hw: pointer to the hw struct 2871 * @sw_seid: Switch SEID (to which rule refers) 2872 * @rule_type: Rule Type (ingress/egress/VLAN) 2873 * @count: length of the list 2874 * @rule_id: Rule ID that is returned in the receive desc as part of 2875 * add_mirrorrule. 2876 * @mr_list: list of mirrored VLAN IDs to be removed 2877 * @cmd_details: pointer to command details structure or NULL 2878 * @rules_used: Number of rules used in internal switch 2879 * @rules_free: Number of rules free in internal switch 2880 * 2881 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2882 **/ 2883 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2884 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2885 struct i40e_asq_cmd_details *cmd_details, 2886 u16 *rules_used, u16 *rules_free) 2887 { 2888 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2889 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2890 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2891 * mirroring. For other rule_type, count and rule_type should 2892 * not matter. 2893 */ 2894 if (count == 0 || !mr_list) 2895 return I40E_ERR_PARAM; 2896 } 2897 2898 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2899 rule_type, rule_id, count, mr_list, 2900 cmd_details, NULL, rules_used, rules_free); 2901 } 2902 2903 /** 2904 * i40e_aq_send_msg_to_vf 2905 * @hw: pointer to the hardware structure 2906 * @vfid: VF id to send msg 2907 * @v_opcode: opcodes for VF-PF communication 2908 * @v_retval: return error code 2909 * @msg: pointer to the msg buffer 2910 * @msglen: msg length 2911 * @cmd_details: pointer to command details 2912 * 2913 * send msg to vf 2914 **/ 2915 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2916 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2917 struct i40e_asq_cmd_details *cmd_details) 2918 { 2919 struct i40e_aq_desc desc; 2920 struct i40e_aqc_pf_vf_message *cmd = 2921 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2922 i40e_status status; 2923 2924 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2925 cmd->id = cpu_to_le32(vfid); 2926 desc.cookie_high = cpu_to_le32(v_opcode); 2927 desc.cookie_low = cpu_to_le32(v_retval); 2928 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2929 if (msglen) { 2930 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2931 I40E_AQ_FLAG_RD)); 2932 if (msglen > I40E_AQ_LARGE_BUF) 2933 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2934 desc.datalen = cpu_to_le16(msglen); 2935 } 2936 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2937 2938 return status; 2939 } 2940 2941 /** 2942 * i40e_aq_debug_read_register 2943 * @hw: pointer to the hw struct 2944 * @reg_addr: register address 2945 * @reg_val: register value 2946 * @cmd_details: pointer to command details structure or NULL 2947 * 2948 * Read the register using the admin queue commands 2949 **/ 2950 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 2951 u32 reg_addr, u64 *reg_val, 2952 struct i40e_asq_cmd_details *cmd_details) 2953 { 2954 struct i40e_aq_desc desc; 2955 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2956 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2957 i40e_status status; 2958 2959 if (reg_val == NULL) 2960 return I40E_ERR_PARAM; 2961 2962 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2963 2964 cmd_resp->address = cpu_to_le32(reg_addr); 2965 2966 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2967 2968 if (!status) { 2969 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2970 (u64)le32_to_cpu(cmd_resp->value_low); 2971 } 2972 2973 return status; 2974 } 2975 2976 /** 2977 * i40e_aq_debug_write_register 2978 * @hw: pointer to the hw struct 2979 * @reg_addr: register address 2980 * @reg_val: register value 2981 * @cmd_details: pointer to command details structure or NULL 2982 * 2983 * Write to a register using the admin queue commands 2984 **/ 2985 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 2986 u32 reg_addr, u64 reg_val, 2987 struct i40e_asq_cmd_details *cmd_details) 2988 { 2989 struct i40e_aq_desc desc; 2990 struct i40e_aqc_debug_reg_read_write *cmd = 2991 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2992 i40e_status status; 2993 2994 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 2995 2996 cmd->address = cpu_to_le32(reg_addr); 2997 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 2998 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 2999 3000 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3001 3002 return status; 3003 } 3004 3005 /** 3006 * i40e_aq_request_resource 3007 * @hw: pointer to the hw struct 3008 * @resource: resource id 3009 * @access: access type 3010 * @sdp_number: resource number 3011 * @timeout: the maximum time in ms that the driver may hold the resource 3012 * @cmd_details: pointer to command details structure or NULL 3013 * 3014 * requests common resource using the admin queue commands 3015 **/ 3016 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3017 enum i40e_aq_resources_ids resource, 3018 enum i40e_aq_resource_access_type access, 3019 u8 sdp_number, u64 *timeout, 3020 struct i40e_asq_cmd_details *cmd_details) 3021 { 3022 struct i40e_aq_desc desc; 3023 struct i40e_aqc_request_resource *cmd_resp = 3024 (struct i40e_aqc_request_resource *)&desc.params.raw; 3025 i40e_status status; 3026 3027 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3028 3029 cmd_resp->resource_id = cpu_to_le16(resource); 3030 cmd_resp->access_type = cpu_to_le16(access); 3031 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3032 3033 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3034 /* The completion specifies the maximum time in ms that the driver 3035 * may hold the resource in the Timeout field. 3036 * If the resource is held by someone else, the command completes with 3037 * busy return value and the timeout field indicates the maximum time 3038 * the current owner of the resource has to free it. 3039 */ 3040 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3041 *timeout = le32_to_cpu(cmd_resp->timeout); 3042 3043 return status; 3044 } 3045 3046 /** 3047 * i40e_aq_release_resource 3048 * @hw: pointer to the hw struct 3049 * @resource: resource id 3050 * @sdp_number: resource number 3051 * @cmd_details: pointer to command details structure or NULL 3052 * 3053 * release common resource using the admin queue commands 3054 **/ 3055 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3056 enum i40e_aq_resources_ids resource, 3057 u8 sdp_number, 3058 struct i40e_asq_cmd_details *cmd_details) 3059 { 3060 struct i40e_aq_desc desc; 3061 struct i40e_aqc_request_resource *cmd = 3062 (struct i40e_aqc_request_resource *)&desc.params.raw; 3063 i40e_status status; 3064 3065 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3066 3067 cmd->resource_id = cpu_to_le16(resource); 3068 cmd->resource_number = cpu_to_le32(sdp_number); 3069 3070 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3071 3072 return status; 3073 } 3074 3075 /** 3076 * i40e_aq_read_nvm 3077 * @hw: pointer to the hw struct 3078 * @module_pointer: module pointer location in words from the NVM beginning 3079 * @offset: byte offset from the module beginning 3080 * @length: length of the section to be read (in bytes from the offset) 3081 * @data: command buffer (size [bytes] = length) 3082 * @last_command: tells if this is the last command in a series 3083 * @cmd_details: pointer to command details structure or NULL 3084 * 3085 * Read the NVM using the admin queue commands 3086 **/ 3087 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3088 u32 offset, u16 length, void *data, 3089 bool last_command, 3090 struct i40e_asq_cmd_details *cmd_details) 3091 { 3092 struct i40e_aq_desc desc; 3093 struct i40e_aqc_nvm_update *cmd = 3094 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3095 i40e_status status; 3096 3097 /* In offset the highest byte must be zeroed. */ 3098 if (offset & 0xFF000000) { 3099 status = I40E_ERR_PARAM; 3100 goto i40e_aq_read_nvm_exit; 3101 } 3102 3103 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3104 3105 /* If this is the last command in a series, set the proper flag. */ 3106 if (last_command) 3107 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3108 cmd->module_pointer = module_pointer; 3109 cmd->offset = cpu_to_le32(offset); 3110 cmd->length = cpu_to_le16(length); 3111 3112 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3113 if (length > I40E_AQ_LARGE_BUF) 3114 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3115 3116 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3117 3118 i40e_aq_read_nvm_exit: 3119 return status; 3120 } 3121 3122 /** 3123 * i40e_aq_erase_nvm 3124 * @hw: pointer to the hw struct 3125 * @module_pointer: module pointer location in words from the NVM beginning 3126 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3127 * @length: length of the section to be erased (expressed in 4 KB) 3128 * @last_command: tells if this is the last command in a series 3129 * @cmd_details: pointer to command details structure or NULL 3130 * 3131 * Erase the NVM sector using the admin queue commands 3132 **/ 3133 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3134 u32 offset, u16 length, bool last_command, 3135 struct i40e_asq_cmd_details *cmd_details) 3136 { 3137 struct i40e_aq_desc desc; 3138 struct i40e_aqc_nvm_update *cmd = 3139 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3140 i40e_status status; 3141 3142 /* In offset the highest byte must be zeroed. */ 3143 if (offset & 0xFF000000) { 3144 status = I40E_ERR_PARAM; 3145 goto i40e_aq_erase_nvm_exit; 3146 } 3147 3148 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3149 3150 /* If this is the last command in a series, set the proper flag. */ 3151 if (last_command) 3152 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3153 cmd->module_pointer = module_pointer; 3154 cmd->offset = cpu_to_le32(offset); 3155 cmd->length = cpu_to_le16(length); 3156 3157 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3158 3159 i40e_aq_erase_nvm_exit: 3160 return status; 3161 } 3162 3163 /** 3164 * i40e_parse_discover_capabilities 3165 * @hw: pointer to the hw struct 3166 * @buff: pointer to a buffer containing device/function capability records 3167 * @cap_count: number of capability records in the list 3168 * @list_type_opc: type of capabilities list to parse 3169 * 3170 * Parse the device/function capabilities list. 3171 **/ 3172 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3173 u32 cap_count, 3174 enum i40e_admin_queue_opc list_type_opc) 3175 { 3176 struct i40e_aqc_list_capabilities_element_resp *cap; 3177 u32 valid_functions, num_functions; 3178 u32 number, logical_id, phys_id; 3179 struct i40e_hw_capabilities *p; 3180 u16 id, ocp_cfg_word0; 3181 i40e_status status; 3182 u8 major_rev; 3183 u32 i = 0; 3184 3185 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3186 3187 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3188 p = &hw->dev_caps; 3189 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3190 p = &hw->func_caps; 3191 else 3192 return; 3193 3194 for (i = 0; i < cap_count; i++, cap++) { 3195 id = le16_to_cpu(cap->id); 3196 number = le32_to_cpu(cap->number); 3197 logical_id = le32_to_cpu(cap->logical_id); 3198 phys_id = le32_to_cpu(cap->phys_id); 3199 major_rev = cap->major_rev; 3200 3201 switch (id) { 3202 case I40E_AQ_CAP_ID_SWITCH_MODE: 3203 p->switch_mode = number; 3204 break; 3205 case I40E_AQ_CAP_ID_MNG_MODE: 3206 p->management_mode = number; 3207 if (major_rev > 1) { 3208 p->mng_protocols_over_mctp = logical_id; 3209 i40e_debug(hw, I40E_DEBUG_INIT, 3210 "HW Capability: Protocols over MCTP = %d\n", 3211 p->mng_protocols_over_mctp); 3212 } else { 3213 p->mng_protocols_over_mctp = 0; 3214 } 3215 break; 3216 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3217 p->npar_enable = number; 3218 break; 3219 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3220 p->os2bmc = number; 3221 break; 3222 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3223 p->valid_functions = number; 3224 break; 3225 case I40E_AQ_CAP_ID_SRIOV: 3226 if (number == 1) 3227 p->sr_iov_1_1 = true; 3228 break; 3229 case I40E_AQ_CAP_ID_VF: 3230 p->num_vfs = number; 3231 p->vf_base_id = logical_id; 3232 break; 3233 case I40E_AQ_CAP_ID_VMDQ: 3234 if (number == 1) 3235 p->vmdq = true; 3236 break; 3237 case I40E_AQ_CAP_ID_8021QBG: 3238 if (number == 1) 3239 p->evb_802_1_qbg = true; 3240 break; 3241 case I40E_AQ_CAP_ID_8021QBR: 3242 if (number == 1) 3243 p->evb_802_1_qbh = true; 3244 break; 3245 case I40E_AQ_CAP_ID_VSI: 3246 p->num_vsis = number; 3247 break; 3248 case I40E_AQ_CAP_ID_DCB: 3249 if (number == 1) { 3250 p->dcb = true; 3251 p->enabled_tcmap = logical_id; 3252 p->maxtc = phys_id; 3253 } 3254 break; 3255 case I40E_AQ_CAP_ID_FCOE: 3256 if (number == 1) 3257 p->fcoe = true; 3258 break; 3259 case I40E_AQ_CAP_ID_ISCSI: 3260 if (number == 1) 3261 p->iscsi = true; 3262 break; 3263 case I40E_AQ_CAP_ID_RSS: 3264 p->rss = true; 3265 p->rss_table_size = number; 3266 p->rss_table_entry_width = logical_id; 3267 break; 3268 case I40E_AQ_CAP_ID_RXQ: 3269 p->num_rx_qp = number; 3270 p->base_queue = phys_id; 3271 break; 3272 case I40E_AQ_CAP_ID_TXQ: 3273 p->num_tx_qp = number; 3274 p->base_queue = phys_id; 3275 break; 3276 case I40E_AQ_CAP_ID_MSIX: 3277 p->num_msix_vectors = number; 3278 i40e_debug(hw, I40E_DEBUG_INIT, 3279 "HW Capability: MSIX vector count = %d\n", 3280 p->num_msix_vectors); 3281 break; 3282 case I40E_AQ_CAP_ID_VF_MSIX: 3283 p->num_msix_vectors_vf = number; 3284 break; 3285 case I40E_AQ_CAP_ID_FLEX10: 3286 if (major_rev == 1) { 3287 if (number == 1) { 3288 p->flex10_enable = true; 3289 p->flex10_capable = true; 3290 } 3291 } else { 3292 /* Capability revision >= 2 */ 3293 if (number & 1) 3294 p->flex10_enable = true; 3295 if (number & 2) 3296 p->flex10_capable = true; 3297 } 3298 p->flex10_mode = logical_id; 3299 p->flex10_status = phys_id; 3300 break; 3301 case I40E_AQ_CAP_ID_CEM: 3302 if (number == 1) 3303 p->mgmt_cem = true; 3304 break; 3305 case I40E_AQ_CAP_ID_IWARP: 3306 if (number == 1) 3307 p->iwarp = true; 3308 break; 3309 case I40E_AQ_CAP_ID_LED: 3310 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3311 p->led[phys_id] = true; 3312 break; 3313 case I40E_AQ_CAP_ID_SDP: 3314 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3315 p->sdp[phys_id] = true; 3316 break; 3317 case I40E_AQ_CAP_ID_MDIO: 3318 if (number == 1) { 3319 p->mdio_port_num = phys_id; 3320 p->mdio_port_mode = logical_id; 3321 } 3322 break; 3323 case I40E_AQ_CAP_ID_1588: 3324 if (number == 1) 3325 p->ieee_1588 = true; 3326 break; 3327 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3328 p->fd = true; 3329 p->fd_filters_guaranteed = number; 3330 p->fd_filters_best_effort = logical_id; 3331 break; 3332 case I40E_AQ_CAP_ID_WSR_PROT: 3333 p->wr_csr_prot = (u64)number; 3334 p->wr_csr_prot |= (u64)logical_id << 32; 3335 break; 3336 case I40E_AQ_CAP_ID_NVM_MGMT: 3337 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3338 p->sec_rev_disabled = true; 3339 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3340 p->update_disabled = true; 3341 break; 3342 default: 3343 break; 3344 } 3345 } 3346 3347 if (p->fcoe) 3348 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3349 3350 /* Software override ensuring FCoE is disabled if npar or mfp 3351 * mode because it is not supported in these modes. 3352 */ 3353 if (p->npar_enable || p->flex10_enable) 3354 p->fcoe = false; 3355 3356 /* count the enabled ports (aka the "not disabled" ports) */ 3357 hw->num_ports = 0; 3358 for (i = 0; i < 4; i++) { 3359 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3360 u64 port_cfg = 0; 3361 3362 /* use AQ read to get the physical register offset instead 3363 * of the port relative offset 3364 */ 3365 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3366 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3367 hw->num_ports++; 3368 } 3369 3370 /* OCP cards case: if a mezz is removed the Ethernet port is at 3371 * disabled state in PRTGEN_CNF register. Additional NVM read is 3372 * needed in order to check if we are dealing with OCP card. 3373 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3374 * physical ports results in wrong partition id calculation and thus 3375 * not supporting WoL. 3376 */ 3377 if (hw->mac.type == I40E_MAC_X722) { 3378 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3379 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3380 2 * I40E_SR_OCP_CFG_WORD0, 3381 sizeof(ocp_cfg_word0), 3382 &ocp_cfg_word0, true, NULL); 3383 if (!status && 3384 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3385 hw->num_ports = 4; 3386 i40e_release_nvm(hw); 3387 } 3388 } 3389 3390 valid_functions = p->valid_functions; 3391 num_functions = 0; 3392 while (valid_functions) { 3393 if (valid_functions & 1) 3394 num_functions++; 3395 valid_functions >>= 1; 3396 } 3397 3398 /* partition id is 1-based, and functions are evenly spread 3399 * across the ports as partitions 3400 */ 3401 if (hw->num_ports != 0) { 3402 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3403 hw->num_partitions = num_functions / hw->num_ports; 3404 } 3405 3406 /* additional HW specific goodies that might 3407 * someday be HW version specific 3408 */ 3409 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3410 } 3411 3412 /** 3413 * i40e_aq_discover_capabilities 3414 * @hw: pointer to the hw struct 3415 * @buff: a virtual buffer to hold the capabilities 3416 * @buff_size: Size of the virtual buffer 3417 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3418 * @list_type_opc: capabilities type to discover - pass in the command opcode 3419 * @cmd_details: pointer to command details structure or NULL 3420 * 3421 * Get the device capabilities descriptions from the firmware 3422 **/ 3423 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3424 void *buff, u16 buff_size, u16 *data_size, 3425 enum i40e_admin_queue_opc list_type_opc, 3426 struct i40e_asq_cmd_details *cmd_details) 3427 { 3428 struct i40e_aqc_list_capabilites *cmd; 3429 struct i40e_aq_desc desc; 3430 i40e_status status = 0; 3431 3432 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3433 3434 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3435 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3436 status = I40E_ERR_PARAM; 3437 goto exit; 3438 } 3439 3440 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3441 3442 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3443 if (buff_size > I40E_AQ_LARGE_BUF) 3444 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3445 3446 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3447 *data_size = le16_to_cpu(desc.datalen); 3448 3449 if (status) 3450 goto exit; 3451 3452 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3453 list_type_opc); 3454 3455 exit: 3456 return status; 3457 } 3458 3459 /** 3460 * i40e_aq_update_nvm 3461 * @hw: pointer to the hw struct 3462 * @module_pointer: module pointer location in words from the NVM beginning 3463 * @offset: byte offset from the module beginning 3464 * @length: length of the section to be written (in bytes from the offset) 3465 * @data: command buffer (size [bytes] = length) 3466 * @last_command: tells if this is the last command in a series 3467 * @preservation_flags: Preservation mode flags 3468 * @cmd_details: pointer to command details structure or NULL 3469 * 3470 * Update the NVM using the admin queue commands 3471 **/ 3472 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3473 u32 offset, u16 length, void *data, 3474 bool last_command, u8 preservation_flags, 3475 struct i40e_asq_cmd_details *cmd_details) 3476 { 3477 struct i40e_aq_desc desc; 3478 struct i40e_aqc_nvm_update *cmd = 3479 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3480 i40e_status status; 3481 3482 /* In offset the highest byte must be zeroed. */ 3483 if (offset & 0xFF000000) { 3484 status = I40E_ERR_PARAM; 3485 goto i40e_aq_update_nvm_exit; 3486 } 3487 3488 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3489 3490 /* If this is the last command in a series, set the proper flag. */ 3491 if (last_command) 3492 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3493 if (hw->mac.type == I40E_MAC_X722) { 3494 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3495 cmd->command_flags |= 3496 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3497 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3498 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3499 cmd->command_flags |= 3500 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3501 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3502 } 3503 cmd->module_pointer = module_pointer; 3504 cmd->offset = cpu_to_le32(offset); 3505 cmd->length = cpu_to_le16(length); 3506 3507 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3508 if (length > I40E_AQ_LARGE_BUF) 3509 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3510 3511 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3512 3513 i40e_aq_update_nvm_exit: 3514 return status; 3515 } 3516 3517 /** 3518 * i40e_aq_rearrange_nvm 3519 * @hw: pointer to the hw struct 3520 * @rearrange_nvm: defines direction of rearrangement 3521 * @cmd_details: pointer to command details structure or NULL 3522 * 3523 * Rearrange NVM structure, available only for transition FW 3524 **/ 3525 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3526 u8 rearrange_nvm, 3527 struct i40e_asq_cmd_details *cmd_details) 3528 { 3529 struct i40e_aqc_nvm_update *cmd; 3530 i40e_status status; 3531 struct i40e_aq_desc desc; 3532 3533 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3534 3535 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3536 3537 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3538 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3539 3540 if (!rearrange_nvm) { 3541 status = I40E_ERR_PARAM; 3542 goto i40e_aq_rearrange_nvm_exit; 3543 } 3544 3545 cmd->command_flags |= rearrange_nvm; 3546 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3547 3548 i40e_aq_rearrange_nvm_exit: 3549 return status; 3550 } 3551 3552 /** 3553 * i40e_aq_get_lldp_mib 3554 * @hw: pointer to the hw struct 3555 * @bridge_type: type of bridge requested 3556 * @mib_type: Local, Remote or both Local and Remote MIBs 3557 * @buff: pointer to a user supplied buffer to store the MIB block 3558 * @buff_size: size of the buffer (in bytes) 3559 * @local_len : length of the returned Local LLDP MIB 3560 * @remote_len: length of the returned Remote LLDP MIB 3561 * @cmd_details: pointer to command details structure or NULL 3562 * 3563 * Requests the complete LLDP MIB (entire packet). 3564 **/ 3565 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3566 u8 mib_type, void *buff, u16 buff_size, 3567 u16 *local_len, u16 *remote_len, 3568 struct i40e_asq_cmd_details *cmd_details) 3569 { 3570 struct i40e_aq_desc desc; 3571 struct i40e_aqc_lldp_get_mib *cmd = 3572 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3573 struct i40e_aqc_lldp_get_mib *resp = 3574 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3575 i40e_status status; 3576 3577 if (buff_size == 0 || !buff) 3578 return I40E_ERR_PARAM; 3579 3580 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3581 /* Indirect Command */ 3582 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3583 3584 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3585 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3586 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3587 3588 desc.datalen = cpu_to_le16(buff_size); 3589 3590 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3591 if (buff_size > I40E_AQ_LARGE_BUF) 3592 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3593 3594 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3595 if (!status) { 3596 if (local_len != NULL) 3597 *local_len = le16_to_cpu(resp->local_len); 3598 if (remote_len != NULL) 3599 *remote_len = le16_to_cpu(resp->remote_len); 3600 } 3601 3602 return status; 3603 } 3604 3605 /** 3606 * i40e_aq_cfg_lldp_mib_change_event 3607 * @hw: pointer to the hw struct 3608 * @enable_update: Enable or Disable event posting 3609 * @cmd_details: pointer to command details structure or NULL 3610 * 3611 * Enable or Disable posting of an event on ARQ when LLDP MIB 3612 * associated with the interface changes 3613 **/ 3614 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3615 bool enable_update, 3616 struct i40e_asq_cmd_details *cmd_details) 3617 { 3618 struct i40e_aq_desc desc; 3619 struct i40e_aqc_lldp_update_mib *cmd = 3620 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3621 i40e_status status; 3622 3623 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3624 3625 if (!enable_update) 3626 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3627 3628 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3629 3630 return status; 3631 } 3632 3633 /** 3634 * i40e_aq_restore_lldp 3635 * @hw: pointer to the hw struct 3636 * @setting: pointer to factory setting variable or NULL 3637 * @restore: True if factory settings should be restored 3638 * @cmd_details: pointer to command details structure or NULL 3639 * 3640 * Restore LLDP Agent factory settings if @restore set to True. In other case 3641 * only returns factory setting in AQ response. 3642 **/ 3643 enum i40e_status_code 3644 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3645 struct i40e_asq_cmd_details *cmd_details) 3646 { 3647 struct i40e_aq_desc desc; 3648 struct i40e_aqc_lldp_restore *cmd = 3649 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3650 i40e_status status; 3651 3652 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3653 i40e_debug(hw, I40E_DEBUG_ALL, 3654 "Restore LLDP not supported by current FW version.\n"); 3655 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3656 } 3657 3658 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3659 3660 if (restore) 3661 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3662 3663 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3664 3665 if (setting) 3666 *setting = cmd->command & 1; 3667 3668 return status; 3669 } 3670 3671 /** 3672 * i40e_aq_stop_lldp 3673 * @hw: pointer to the hw struct 3674 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3675 * @persist: True if stop of LLDP should be persistent across power cycles 3676 * @cmd_details: pointer to command details structure or NULL 3677 * 3678 * Stop or Shutdown the embedded LLDP Agent 3679 **/ 3680 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3681 bool persist, 3682 struct i40e_asq_cmd_details *cmd_details) 3683 { 3684 struct i40e_aq_desc desc; 3685 struct i40e_aqc_lldp_stop *cmd = 3686 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3687 i40e_status status; 3688 3689 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3690 3691 if (shutdown_agent) 3692 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3693 3694 if (persist) { 3695 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3696 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3697 else 3698 i40e_debug(hw, I40E_DEBUG_ALL, 3699 "Persistent Stop LLDP not supported by current FW version.\n"); 3700 } 3701 3702 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3703 3704 return status; 3705 } 3706 3707 /** 3708 * i40e_aq_start_lldp 3709 * @hw: pointer to the hw struct 3710 * @buff: buffer for result 3711 * @persist: True if start of LLDP should be persistent across power cycles 3712 * @buff_size: buffer size 3713 * @cmd_details: pointer to command details structure or NULL 3714 * 3715 * Start the embedded LLDP Agent on all ports. 3716 **/ 3717 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3718 struct i40e_asq_cmd_details *cmd_details) 3719 { 3720 struct i40e_aq_desc desc; 3721 struct i40e_aqc_lldp_start *cmd = 3722 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3723 i40e_status status; 3724 3725 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3726 3727 cmd->command = I40E_AQ_LLDP_AGENT_START; 3728 3729 if (persist) { 3730 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3731 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3732 else 3733 i40e_debug(hw, I40E_DEBUG_ALL, 3734 "Persistent Start LLDP not supported by current FW version.\n"); 3735 } 3736 3737 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3738 3739 return status; 3740 } 3741 3742 /** 3743 * i40e_aq_set_dcb_parameters 3744 * @hw: pointer to the hw struct 3745 * @cmd_details: pointer to command details structure or NULL 3746 * @dcb_enable: True if DCB configuration needs to be applied 3747 * 3748 **/ 3749 enum i40e_status_code 3750 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3751 struct i40e_asq_cmd_details *cmd_details) 3752 { 3753 struct i40e_aq_desc desc; 3754 struct i40e_aqc_set_dcb_parameters *cmd = 3755 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3756 i40e_status status; 3757 3758 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3759 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3760 3761 i40e_fill_default_direct_cmd_desc(&desc, 3762 i40e_aqc_opc_set_dcb_parameters); 3763 3764 if (dcb_enable) { 3765 cmd->valid_flags = I40E_DCB_VALID; 3766 cmd->command = I40E_AQ_DCB_SET_AGENT; 3767 } 3768 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3769 3770 return status; 3771 } 3772 3773 /** 3774 * i40e_aq_get_cee_dcb_config 3775 * @hw: pointer to the hw struct 3776 * @buff: response buffer that stores CEE operational configuration 3777 * @buff_size: size of the buffer passed 3778 * @cmd_details: pointer to command details structure or NULL 3779 * 3780 * Get CEE DCBX mode operational configuration from firmware 3781 **/ 3782 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3783 void *buff, u16 buff_size, 3784 struct i40e_asq_cmd_details *cmd_details) 3785 { 3786 struct i40e_aq_desc desc; 3787 i40e_status status; 3788 3789 if (buff_size == 0 || !buff) 3790 return I40E_ERR_PARAM; 3791 3792 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3793 3794 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3795 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3796 cmd_details); 3797 3798 return status; 3799 } 3800 3801 /** 3802 * i40e_aq_add_udp_tunnel 3803 * @hw: pointer to the hw struct 3804 * @udp_port: the UDP port to add in Host byte order 3805 * @protocol_index: protocol index type 3806 * @filter_index: pointer to filter index 3807 * @cmd_details: pointer to command details structure or NULL 3808 * 3809 * Note: Firmware expects the udp_port value to be in Little Endian format, 3810 * and this function will call cpu_to_le16 to convert from Host byte order to 3811 * Little Endian order. 3812 **/ 3813 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3814 u16 udp_port, u8 protocol_index, 3815 u8 *filter_index, 3816 struct i40e_asq_cmd_details *cmd_details) 3817 { 3818 struct i40e_aq_desc desc; 3819 struct i40e_aqc_add_udp_tunnel *cmd = 3820 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3821 struct i40e_aqc_del_udp_tunnel_completion *resp = 3822 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3823 i40e_status status; 3824 3825 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3826 3827 cmd->udp_port = cpu_to_le16(udp_port); 3828 cmd->protocol_type = protocol_index; 3829 3830 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3831 3832 if (!status && filter_index) 3833 *filter_index = resp->index; 3834 3835 return status; 3836 } 3837 3838 /** 3839 * i40e_aq_del_udp_tunnel 3840 * @hw: pointer to the hw struct 3841 * @index: filter index 3842 * @cmd_details: pointer to command details structure or NULL 3843 **/ 3844 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3845 struct i40e_asq_cmd_details *cmd_details) 3846 { 3847 struct i40e_aq_desc desc; 3848 struct i40e_aqc_remove_udp_tunnel *cmd = 3849 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3850 i40e_status status; 3851 3852 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3853 3854 cmd->index = index; 3855 3856 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3857 3858 return status; 3859 } 3860 3861 /** 3862 * i40e_aq_delete_element - Delete switch element 3863 * @hw: pointer to the hw struct 3864 * @seid: the SEID to delete from the switch 3865 * @cmd_details: pointer to command details structure or NULL 3866 * 3867 * This deletes a switch element from the switch. 3868 **/ 3869 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3870 struct i40e_asq_cmd_details *cmd_details) 3871 { 3872 struct i40e_aq_desc desc; 3873 struct i40e_aqc_switch_seid *cmd = 3874 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3875 i40e_status status; 3876 3877 if (seid == 0) 3878 return I40E_ERR_PARAM; 3879 3880 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3881 3882 cmd->seid = cpu_to_le16(seid); 3883 3884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3885 3886 return status; 3887 } 3888 3889 /** 3890 * i40e_aq_dcb_updated - DCB Updated Command 3891 * @hw: pointer to the hw struct 3892 * @cmd_details: pointer to command details structure or NULL 3893 * 3894 * EMP will return when the shared RPB settings have been 3895 * recomputed and modified. The retval field in the descriptor 3896 * will be set to 0 when RPB is modified. 3897 **/ 3898 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3899 struct i40e_asq_cmd_details *cmd_details) 3900 { 3901 struct i40e_aq_desc desc; 3902 i40e_status status; 3903 3904 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3905 3906 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3907 3908 return status; 3909 } 3910 3911 /** 3912 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3913 * @hw: pointer to the hw struct 3914 * @seid: seid for the physical port/switching component/vsi 3915 * @buff: Indirect buffer to hold data parameters and response 3916 * @buff_size: Indirect buffer size 3917 * @opcode: Tx scheduler AQ command opcode 3918 * @cmd_details: pointer to command details structure or NULL 3919 * 3920 * Generic command handler for Tx scheduler AQ commands 3921 **/ 3922 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3923 void *buff, u16 buff_size, 3924 enum i40e_admin_queue_opc opcode, 3925 struct i40e_asq_cmd_details *cmd_details) 3926 { 3927 struct i40e_aq_desc desc; 3928 struct i40e_aqc_tx_sched_ind *cmd = 3929 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3930 i40e_status status; 3931 bool cmd_param_flag = false; 3932 3933 switch (opcode) { 3934 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3935 case i40e_aqc_opc_configure_vsi_tc_bw: 3936 case i40e_aqc_opc_enable_switching_comp_ets: 3937 case i40e_aqc_opc_modify_switching_comp_ets: 3938 case i40e_aqc_opc_disable_switching_comp_ets: 3939 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3940 case i40e_aqc_opc_configure_switching_comp_bw_config: 3941 cmd_param_flag = true; 3942 break; 3943 case i40e_aqc_opc_query_vsi_bw_config: 3944 case i40e_aqc_opc_query_vsi_ets_sla_config: 3945 case i40e_aqc_opc_query_switching_comp_ets_config: 3946 case i40e_aqc_opc_query_port_ets_config: 3947 case i40e_aqc_opc_query_switching_comp_bw_config: 3948 cmd_param_flag = false; 3949 break; 3950 default: 3951 return I40E_ERR_PARAM; 3952 } 3953 3954 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3955 3956 /* Indirect command */ 3957 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3958 if (cmd_param_flag) 3959 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3960 if (buff_size > I40E_AQ_LARGE_BUF) 3961 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3962 3963 desc.datalen = cpu_to_le16(buff_size); 3964 3965 cmd->vsi_seid = cpu_to_le16(seid); 3966 3967 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3968 3969 return status; 3970 } 3971 3972 /** 3973 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3974 * @hw: pointer to the hw struct 3975 * @seid: VSI seid 3976 * @credit: BW limit credits (0 = disabled) 3977 * @max_credit: Max BW limit credits 3978 * @cmd_details: pointer to command details structure or NULL 3979 **/ 3980 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3981 u16 seid, u16 credit, u8 max_credit, 3982 struct i40e_asq_cmd_details *cmd_details) 3983 { 3984 struct i40e_aq_desc desc; 3985 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3986 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3987 i40e_status status; 3988 3989 i40e_fill_default_direct_cmd_desc(&desc, 3990 i40e_aqc_opc_configure_vsi_bw_limit); 3991 3992 cmd->vsi_seid = cpu_to_le16(seid); 3993 cmd->credit = cpu_to_le16(credit); 3994 cmd->max_credit = max_credit; 3995 3996 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3997 3998 return status; 3999 } 4000 4001 /** 4002 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4003 * @hw: pointer to the hw struct 4004 * @seid: VSI seid 4005 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4006 * @cmd_details: pointer to command details structure or NULL 4007 **/ 4008 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4009 u16 seid, 4010 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4011 struct i40e_asq_cmd_details *cmd_details) 4012 { 4013 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4014 i40e_aqc_opc_configure_vsi_tc_bw, 4015 cmd_details); 4016 } 4017 4018 /** 4019 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4020 * @hw: pointer to the hw struct 4021 * @seid: seid of the switching component connected to Physical Port 4022 * @ets_data: Buffer holding ETS parameters 4023 * @opcode: Tx scheduler AQ command opcode 4024 * @cmd_details: pointer to command details structure or NULL 4025 **/ 4026 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4027 u16 seid, 4028 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4029 enum i40e_admin_queue_opc opcode, 4030 struct i40e_asq_cmd_details *cmd_details) 4031 { 4032 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4033 sizeof(*ets_data), opcode, cmd_details); 4034 } 4035 4036 /** 4037 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4038 * @hw: pointer to the hw struct 4039 * @seid: seid of the switching component 4040 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4041 * @cmd_details: pointer to command details structure or NULL 4042 **/ 4043 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4044 u16 seid, 4045 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4046 struct i40e_asq_cmd_details *cmd_details) 4047 { 4048 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4049 i40e_aqc_opc_configure_switching_comp_bw_config, 4050 cmd_details); 4051 } 4052 4053 /** 4054 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4055 * @hw: pointer to the hw struct 4056 * @seid: seid of the VSI 4057 * @bw_data: Buffer to hold VSI BW configuration 4058 * @cmd_details: pointer to command details structure or NULL 4059 **/ 4060 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4061 u16 seid, 4062 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4063 struct i40e_asq_cmd_details *cmd_details) 4064 { 4065 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4066 i40e_aqc_opc_query_vsi_bw_config, 4067 cmd_details); 4068 } 4069 4070 /** 4071 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4072 * @hw: pointer to the hw struct 4073 * @seid: seid of the VSI 4074 * @bw_data: Buffer to hold VSI BW configuration per TC 4075 * @cmd_details: pointer to command details structure or NULL 4076 **/ 4077 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4078 u16 seid, 4079 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4080 struct i40e_asq_cmd_details *cmd_details) 4081 { 4082 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4083 i40e_aqc_opc_query_vsi_ets_sla_config, 4084 cmd_details); 4085 } 4086 4087 /** 4088 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4089 * @hw: pointer to the hw struct 4090 * @seid: seid of the switching component 4091 * @bw_data: Buffer to hold switching component's per TC BW config 4092 * @cmd_details: pointer to command details structure or NULL 4093 **/ 4094 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4095 u16 seid, 4096 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4097 struct i40e_asq_cmd_details *cmd_details) 4098 { 4099 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4100 i40e_aqc_opc_query_switching_comp_ets_config, 4101 cmd_details); 4102 } 4103 4104 /** 4105 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4106 * @hw: pointer to the hw struct 4107 * @seid: seid of the VSI or switching component connected to Physical Port 4108 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4109 * @cmd_details: pointer to command details structure or NULL 4110 **/ 4111 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4112 u16 seid, 4113 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4114 struct i40e_asq_cmd_details *cmd_details) 4115 { 4116 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4117 i40e_aqc_opc_query_port_ets_config, 4118 cmd_details); 4119 } 4120 4121 /** 4122 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4123 * @hw: pointer to the hw struct 4124 * @seid: seid of the switching component 4125 * @bw_data: Buffer to hold switching component's BW configuration 4126 * @cmd_details: pointer to command details structure or NULL 4127 **/ 4128 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4129 u16 seid, 4130 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4131 struct i40e_asq_cmd_details *cmd_details) 4132 { 4133 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4134 i40e_aqc_opc_query_switching_comp_bw_config, 4135 cmd_details); 4136 } 4137 4138 /** 4139 * i40e_validate_filter_settings 4140 * @hw: pointer to the hardware structure 4141 * @settings: Filter control settings 4142 * 4143 * Check and validate the filter control settings passed. 4144 * The function checks for the valid filter/context sizes being 4145 * passed for FCoE and PE. 4146 * 4147 * Returns 0 if the values passed are valid and within 4148 * range else returns an error. 4149 **/ 4150 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4151 struct i40e_filter_control_settings *settings) 4152 { 4153 u32 fcoe_cntx_size, fcoe_filt_size; 4154 u32 pe_cntx_size, pe_filt_size; 4155 u32 fcoe_fmax; 4156 u32 val; 4157 4158 /* Validate FCoE settings passed */ 4159 switch (settings->fcoe_filt_num) { 4160 case I40E_HASH_FILTER_SIZE_1K: 4161 case I40E_HASH_FILTER_SIZE_2K: 4162 case I40E_HASH_FILTER_SIZE_4K: 4163 case I40E_HASH_FILTER_SIZE_8K: 4164 case I40E_HASH_FILTER_SIZE_16K: 4165 case I40E_HASH_FILTER_SIZE_32K: 4166 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4167 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4168 break; 4169 default: 4170 return I40E_ERR_PARAM; 4171 } 4172 4173 switch (settings->fcoe_cntx_num) { 4174 case I40E_DMA_CNTX_SIZE_512: 4175 case I40E_DMA_CNTX_SIZE_1K: 4176 case I40E_DMA_CNTX_SIZE_2K: 4177 case I40E_DMA_CNTX_SIZE_4K: 4178 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4179 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4180 break; 4181 default: 4182 return I40E_ERR_PARAM; 4183 } 4184 4185 /* Validate PE settings passed */ 4186 switch (settings->pe_filt_num) { 4187 case I40E_HASH_FILTER_SIZE_1K: 4188 case I40E_HASH_FILTER_SIZE_2K: 4189 case I40E_HASH_FILTER_SIZE_4K: 4190 case I40E_HASH_FILTER_SIZE_8K: 4191 case I40E_HASH_FILTER_SIZE_16K: 4192 case I40E_HASH_FILTER_SIZE_32K: 4193 case I40E_HASH_FILTER_SIZE_64K: 4194 case I40E_HASH_FILTER_SIZE_128K: 4195 case I40E_HASH_FILTER_SIZE_256K: 4196 case I40E_HASH_FILTER_SIZE_512K: 4197 case I40E_HASH_FILTER_SIZE_1M: 4198 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4199 pe_filt_size <<= (u32)settings->pe_filt_num; 4200 break; 4201 default: 4202 return I40E_ERR_PARAM; 4203 } 4204 4205 switch (settings->pe_cntx_num) { 4206 case I40E_DMA_CNTX_SIZE_512: 4207 case I40E_DMA_CNTX_SIZE_1K: 4208 case I40E_DMA_CNTX_SIZE_2K: 4209 case I40E_DMA_CNTX_SIZE_4K: 4210 case I40E_DMA_CNTX_SIZE_8K: 4211 case I40E_DMA_CNTX_SIZE_16K: 4212 case I40E_DMA_CNTX_SIZE_32K: 4213 case I40E_DMA_CNTX_SIZE_64K: 4214 case I40E_DMA_CNTX_SIZE_128K: 4215 case I40E_DMA_CNTX_SIZE_256K: 4216 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4217 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4218 break; 4219 default: 4220 return I40E_ERR_PARAM; 4221 } 4222 4223 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4224 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4225 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4226 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4227 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4228 return I40E_ERR_INVALID_SIZE; 4229 4230 return 0; 4231 } 4232 4233 /** 4234 * i40e_set_filter_control 4235 * @hw: pointer to the hardware structure 4236 * @settings: Filter control settings 4237 * 4238 * Set the Queue Filters for PE/FCoE and enable filters required 4239 * for a single PF. It is expected that these settings are programmed 4240 * at the driver initialization time. 4241 **/ 4242 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4243 struct i40e_filter_control_settings *settings) 4244 { 4245 i40e_status ret = 0; 4246 u32 hash_lut_size = 0; 4247 u32 val; 4248 4249 if (!settings) 4250 return I40E_ERR_PARAM; 4251 4252 /* Validate the input settings */ 4253 ret = i40e_validate_filter_settings(hw, settings); 4254 if (ret) 4255 return ret; 4256 4257 /* Read the PF Queue Filter control register */ 4258 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4259 4260 /* Program required PE hash buckets for the PF */ 4261 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4262 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4263 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4264 /* Program required PE contexts for the PF */ 4265 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4266 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4267 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4268 4269 /* Program required FCoE hash buckets for the PF */ 4270 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4271 val |= ((u32)settings->fcoe_filt_num << 4272 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4273 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4274 /* Program required FCoE DDP contexts for the PF */ 4275 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4276 val |= ((u32)settings->fcoe_cntx_num << 4277 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4278 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4279 4280 /* Program Hash LUT size for the PF */ 4281 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4282 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4283 hash_lut_size = 1; 4284 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4285 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4286 4287 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4288 if (settings->enable_fdir) 4289 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4290 if (settings->enable_ethtype) 4291 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4292 if (settings->enable_macvlan) 4293 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4294 4295 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4296 4297 return 0; 4298 } 4299 4300 /** 4301 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4302 * @hw: pointer to the hw struct 4303 * @mac_addr: MAC address to use in the filter 4304 * @ethtype: Ethertype to use in the filter 4305 * @flags: Flags that needs to be applied to the filter 4306 * @vsi_seid: seid of the control VSI 4307 * @queue: VSI queue number to send the packet to 4308 * @is_add: Add control packet filter if True else remove 4309 * @stats: Structure to hold information on control filter counts 4310 * @cmd_details: pointer to command details structure or NULL 4311 * 4312 * This command will Add or Remove control packet filter for a control VSI. 4313 * In return it will update the total number of perfect filter count in 4314 * the stats member. 4315 **/ 4316 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4317 u8 *mac_addr, u16 ethtype, u16 flags, 4318 u16 vsi_seid, u16 queue, bool is_add, 4319 struct i40e_control_filter_stats *stats, 4320 struct i40e_asq_cmd_details *cmd_details) 4321 { 4322 struct i40e_aq_desc desc; 4323 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4324 (struct i40e_aqc_add_remove_control_packet_filter *) 4325 &desc.params.raw; 4326 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4327 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4328 &desc.params.raw; 4329 i40e_status status; 4330 4331 if (vsi_seid == 0) 4332 return I40E_ERR_PARAM; 4333 4334 if (is_add) { 4335 i40e_fill_default_direct_cmd_desc(&desc, 4336 i40e_aqc_opc_add_control_packet_filter); 4337 cmd->queue = cpu_to_le16(queue); 4338 } else { 4339 i40e_fill_default_direct_cmd_desc(&desc, 4340 i40e_aqc_opc_remove_control_packet_filter); 4341 } 4342 4343 if (mac_addr) 4344 ether_addr_copy(cmd->mac, mac_addr); 4345 4346 cmd->etype = cpu_to_le16(ethtype); 4347 cmd->flags = cpu_to_le16(flags); 4348 cmd->seid = cpu_to_le16(vsi_seid); 4349 4350 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4351 4352 if (!status && stats) { 4353 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4354 stats->etype_used = le16_to_cpu(resp->etype_used); 4355 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4356 stats->etype_free = le16_to_cpu(resp->etype_free); 4357 } 4358 4359 return status; 4360 } 4361 4362 /** 4363 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4364 * @hw: pointer to the hw struct 4365 * @seid: VSI seid to add ethertype filter from 4366 **/ 4367 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4368 u16 seid) 4369 { 4370 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4371 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4372 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4373 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4374 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4375 i40e_status status; 4376 4377 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4378 seid, 0, true, NULL, 4379 NULL); 4380 if (status) 4381 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4382 } 4383 4384 /** 4385 * i40e_aq_alternate_read 4386 * @hw: pointer to the hardware structure 4387 * @reg_addr0: address of first dword to be read 4388 * @reg_val0: pointer for data read from 'reg_addr0' 4389 * @reg_addr1: address of second dword to be read 4390 * @reg_val1: pointer for data read from 'reg_addr1' 4391 * 4392 * Read one or two dwords from alternate structure. Fields are indicated 4393 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4394 * is not passed then only register at 'reg_addr0' is read. 4395 * 4396 **/ 4397 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4398 u32 reg_addr0, u32 *reg_val0, 4399 u32 reg_addr1, u32 *reg_val1) 4400 { 4401 struct i40e_aq_desc desc; 4402 struct i40e_aqc_alternate_write *cmd_resp = 4403 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4404 i40e_status status; 4405 4406 if (!reg_val0) 4407 return I40E_ERR_PARAM; 4408 4409 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4410 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4411 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4412 4413 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4414 4415 if (!status) { 4416 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4417 4418 if (reg_val1) 4419 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4420 } 4421 4422 return status; 4423 } 4424 4425 /** 4426 * i40e_aq_resume_port_tx 4427 * @hw: pointer to the hardware structure 4428 * @cmd_details: pointer to command details structure or NULL 4429 * 4430 * Resume port's Tx traffic 4431 **/ 4432 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4433 struct i40e_asq_cmd_details *cmd_details) 4434 { 4435 struct i40e_aq_desc desc; 4436 i40e_status status; 4437 4438 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4439 4440 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4441 4442 return status; 4443 } 4444 4445 /** 4446 * i40e_set_pci_config_data - store PCI bus info 4447 * @hw: pointer to hardware structure 4448 * @link_status: the link status word from PCI config space 4449 * 4450 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4451 **/ 4452 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4453 { 4454 hw->bus.type = i40e_bus_type_pci_express; 4455 4456 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4457 case PCI_EXP_LNKSTA_NLW_X1: 4458 hw->bus.width = i40e_bus_width_pcie_x1; 4459 break; 4460 case PCI_EXP_LNKSTA_NLW_X2: 4461 hw->bus.width = i40e_bus_width_pcie_x2; 4462 break; 4463 case PCI_EXP_LNKSTA_NLW_X4: 4464 hw->bus.width = i40e_bus_width_pcie_x4; 4465 break; 4466 case PCI_EXP_LNKSTA_NLW_X8: 4467 hw->bus.width = i40e_bus_width_pcie_x8; 4468 break; 4469 default: 4470 hw->bus.width = i40e_bus_width_unknown; 4471 break; 4472 } 4473 4474 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4475 case PCI_EXP_LNKSTA_CLS_2_5GB: 4476 hw->bus.speed = i40e_bus_speed_2500; 4477 break; 4478 case PCI_EXP_LNKSTA_CLS_5_0GB: 4479 hw->bus.speed = i40e_bus_speed_5000; 4480 break; 4481 case PCI_EXP_LNKSTA_CLS_8_0GB: 4482 hw->bus.speed = i40e_bus_speed_8000; 4483 break; 4484 default: 4485 hw->bus.speed = i40e_bus_speed_unknown; 4486 break; 4487 } 4488 } 4489 4490 /** 4491 * i40e_aq_debug_dump 4492 * @hw: pointer to the hardware structure 4493 * @cluster_id: specific cluster to dump 4494 * @table_id: table id within cluster 4495 * @start_index: index of line in the block to read 4496 * @buff_size: dump buffer size 4497 * @buff: dump buffer 4498 * @ret_buff_size: actual buffer size returned 4499 * @ret_next_table: next block to read 4500 * @ret_next_index: next index to read 4501 * @cmd_details: pointer to command details structure or NULL 4502 * 4503 * Dump internal FW/HW data for debug purposes. 4504 * 4505 **/ 4506 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4507 u8 table_id, u32 start_index, u16 buff_size, 4508 void *buff, u16 *ret_buff_size, 4509 u8 *ret_next_table, u32 *ret_next_index, 4510 struct i40e_asq_cmd_details *cmd_details) 4511 { 4512 struct i40e_aq_desc desc; 4513 struct i40e_aqc_debug_dump_internals *cmd = 4514 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4515 struct i40e_aqc_debug_dump_internals *resp = 4516 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4517 i40e_status status; 4518 4519 if (buff_size == 0 || !buff) 4520 return I40E_ERR_PARAM; 4521 4522 i40e_fill_default_direct_cmd_desc(&desc, 4523 i40e_aqc_opc_debug_dump_internals); 4524 /* Indirect Command */ 4525 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4526 if (buff_size > I40E_AQ_LARGE_BUF) 4527 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4528 4529 cmd->cluster_id = cluster_id; 4530 cmd->table_id = table_id; 4531 cmd->idx = cpu_to_le32(start_index); 4532 4533 desc.datalen = cpu_to_le16(buff_size); 4534 4535 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4536 if (!status) { 4537 if (ret_buff_size) 4538 *ret_buff_size = le16_to_cpu(desc.datalen); 4539 if (ret_next_table) 4540 *ret_next_table = resp->table_id; 4541 if (ret_next_index) 4542 *ret_next_index = le32_to_cpu(resp->idx); 4543 } 4544 4545 return status; 4546 } 4547 4548 /** 4549 * i40e_read_bw_from_alt_ram 4550 * @hw: pointer to the hardware structure 4551 * @max_bw: pointer for max_bw read 4552 * @min_bw: pointer for min_bw read 4553 * @min_valid: pointer for bool that is true if min_bw is a valid value 4554 * @max_valid: pointer for bool that is true if max_bw is a valid value 4555 * 4556 * Read bw from the alternate ram for the given pf 4557 **/ 4558 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4559 u32 *max_bw, u32 *min_bw, 4560 bool *min_valid, bool *max_valid) 4561 { 4562 i40e_status status; 4563 u32 max_bw_addr, min_bw_addr; 4564 4565 /* Calculate the address of the min/max bw registers */ 4566 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4567 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4568 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4569 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4570 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4571 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4572 4573 /* Read the bandwidths from alt ram */ 4574 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4575 min_bw_addr, min_bw); 4576 4577 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4578 *min_valid = true; 4579 else 4580 *min_valid = false; 4581 4582 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4583 *max_valid = true; 4584 else 4585 *max_valid = false; 4586 4587 return status; 4588 } 4589 4590 /** 4591 * i40e_aq_configure_partition_bw 4592 * @hw: pointer to the hardware structure 4593 * @bw_data: Buffer holding valid pfs and bw limits 4594 * @cmd_details: pointer to command details 4595 * 4596 * Configure partitions guaranteed/max bw 4597 **/ 4598 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4599 struct i40e_aqc_configure_partition_bw_data *bw_data, 4600 struct i40e_asq_cmd_details *cmd_details) 4601 { 4602 i40e_status status; 4603 struct i40e_aq_desc desc; 4604 u16 bwd_size = sizeof(*bw_data); 4605 4606 i40e_fill_default_direct_cmd_desc(&desc, 4607 i40e_aqc_opc_configure_partition_bw); 4608 4609 /* Indirect command */ 4610 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4611 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4612 4613 if (bwd_size > I40E_AQ_LARGE_BUF) 4614 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4615 4616 desc.datalen = cpu_to_le16(bwd_size); 4617 4618 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4619 cmd_details); 4620 4621 return status; 4622 } 4623 4624 /** 4625 * i40e_read_phy_register_clause22 4626 * @hw: pointer to the HW structure 4627 * @reg: register address in the page 4628 * @phy_addr: PHY address on MDIO interface 4629 * @value: PHY register value 4630 * 4631 * Reads specified PHY register value 4632 **/ 4633 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4634 u16 reg, u8 phy_addr, u16 *value) 4635 { 4636 i40e_status status = I40E_ERR_TIMEOUT; 4637 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4638 u32 command = 0; 4639 u16 retry = 1000; 4640 4641 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4642 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4643 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4644 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4645 (I40E_GLGEN_MSCA_MDICMD_MASK); 4646 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4647 do { 4648 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4649 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4650 status = 0; 4651 break; 4652 } 4653 udelay(10); 4654 retry--; 4655 } while (retry); 4656 4657 if (status) { 4658 i40e_debug(hw, I40E_DEBUG_PHY, 4659 "PHY: Can't write command to external PHY.\n"); 4660 } else { 4661 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4662 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4663 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4664 } 4665 4666 return status; 4667 } 4668 4669 /** 4670 * i40e_write_phy_register_clause22 4671 * @hw: pointer to the HW structure 4672 * @reg: register address in the page 4673 * @phy_addr: PHY address on MDIO interface 4674 * @value: PHY register value 4675 * 4676 * Writes specified PHY register value 4677 **/ 4678 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4679 u16 reg, u8 phy_addr, u16 value) 4680 { 4681 i40e_status status = I40E_ERR_TIMEOUT; 4682 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4683 u32 command = 0; 4684 u16 retry = 1000; 4685 4686 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4687 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4688 4689 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4690 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4691 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4692 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4693 (I40E_GLGEN_MSCA_MDICMD_MASK); 4694 4695 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4696 do { 4697 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4698 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4699 status = 0; 4700 break; 4701 } 4702 udelay(10); 4703 retry--; 4704 } while (retry); 4705 4706 return status; 4707 } 4708 4709 /** 4710 * i40e_read_phy_register_clause45 4711 * @hw: pointer to the HW structure 4712 * @page: registers page number 4713 * @reg: register address in the page 4714 * @phy_addr: PHY address on MDIO interface 4715 * @value: PHY register value 4716 * 4717 * Reads specified PHY register value 4718 **/ 4719 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4720 u8 page, u16 reg, u8 phy_addr, u16 *value) 4721 { 4722 i40e_status status = I40E_ERR_TIMEOUT; 4723 u32 command = 0; 4724 u16 retry = 1000; 4725 u8 port_num = hw->func_caps.mdio_port_num; 4726 4727 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4728 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4729 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4730 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4731 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4732 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4733 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4734 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4735 do { 4736 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4737 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4738 status = 0; 4739 break; 4740 } 4741 usleep_range(10, 20); 4742 retry--; 4743 } while (retry); 4744 4745 if (status) { 4746 i40e_debug(hw, I40E_DEBUG_PHY, 4747 "PHY: Can't write command to external PHY.\n"); 4748 goto phy_read_end; 4749 } 4750 4751 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4752 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4753 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4754 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4755 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4756 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4757 status = I40E_ERR_TIMEOUT; 4758 retry = 1000; 4759 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4760 do { 4761 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4762 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4763 status = 0; 4764 break; 4765 } 4766 usleep_range(10, 20); 4767 retry--; 4768 } while (retry); 4769 4770 if (!status) { 4771 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4772 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4773 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4774 } else { 4775 i40e_debug(hw, I40E_DEBUG_PHY, 4776 "PHY: Can't read register value from external PHY.\n"); 4777 } 4778 4779 phy_read_end: 4780 return status; 4781 } 4782 4783 /** 4784 * i40e_write_phy_register_clause45 4785 * @hw: pointer to the HW structure 4786 * @page: registers page number 4787 * @reg: register address in the page 4788 * @phy_addr: PHY address on MDIO interface 4789 * @value: PHY register value 4790 * 4791 * Writes value to specified PHY register 4792 **/ 4793 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4794 u8 page, u16 reg, u8 phy_addr, u16 value) 4795 { 4796 i40e_status status = I40E_ERR_TIMEOUT; 4797 u32 command = 0; 4798 u16 retry = 1000; 4799 u8 port_num = hw->func_caps.mdio_port_num; 4800 4801 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4802 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4803 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4804 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4805 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4806 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4807 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4808 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4809 do { 4810 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4811 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4812 status = 0; 4813 break; 4814 } 4815 usleep_range(10, 20); 4816 retry--; 4817 } while (retry); 4818 if (status) { 4819 i40e_debug(hw, I40E_DEBUG_PHY, 4820 "PHY: Can't write command to external PHY.\n"); 4821 goto phy_write_end; 4822 } 4823 4824 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4825 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4826 4827 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4828 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4829 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4830 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4831 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4832 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4833 status = I40E_ERR_TIMEOUT; 4834 retry = 1000; 4835 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4836 do { 4837 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4838 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4839 status = 0; 4840 break; 4841 } 4842 usleep_range(10, 20); 4843 retry--; 4844 } while (retry); 4845 4846 phy_write_end: 4847 return status; 4848 } 4849 4850 /** 4851 * i40e_write_phy_register 4852 * @hw: pointer to the HW structure 4853 * @page: registers page number 4854 * @reg: register address in the page 4855 * @phy_addr: PHY address on MDIO interface 4856 * @value: PHY register value 4857 * 4858 * Writes value to specified PHY register 4859 **/ 4860 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4861 u8 page, u16 reg, u8 phy_addr, u16 value) 4862 { 4863 i40e_status status; 4864 4865 switch (hw->device_id) { 4866 case I40E_DEV_ID_1G_BASE_T_X722: 4867 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4868 value); 4869 break; 4870 case I40E_DEV_ID_10G_BASE_T: 4871 case I40E_DEV_ID_10G_BASE_T4: 4872 case I40E_DEV_ID_10G_BASE_T_X722: 4873 case I40E_DEV_ID_25G_B: 4874 case I40E_DEV_ID_25G_SFP28: 4875 status = i40e_write_phy_register_clause45(hw, page, reg, 4876 phy_addr, value); 4877 break; 4878 default: 4879 status = I40E_ERR_UNKNOWN_PHY; 4880 break; 4881 } 4882 4883 return status; 4884 } 4885 4886 /** 4887 * i40e_read_phy_register 4888 * @hw: pointer to the HW structure 4889 * @page: registers page number 4890 * @reg: register address in the page 4891 * @phy_addr: PHY address on MDIO interface 4892 * @value: PHY register value 4893 * 4894 * Reads specified PHY register value 4895 **/ 4896 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 4897 u8 page, u16 reg, u8 phy_addr, u16 *value) 4898 { 4899 i40e_status status; 4900 4901 switch (hw->device_id) { 4902 case I40E_DEV_ID_1G_BASE_T_X722: 4903 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4904 value); 4905 break; 4906 case I40E_DEV_ID_10G_BASE_T: 4907 case I40E_DEV_ID_10G_BASE_T4: 4908 case I40E_DEV_ID_10G_BASE_T_BC: 4909 case I40E_DEV_ID_10G_BASE_T_X722: 4910 case I40E_DEV_ID_25G_B: 4911 case I40E_DEV_ID_25G_SFP28: 4912 status = i40e_read_phy_register_clause45(hw, page, reg, 4913 phy_addr, value); 4914 break; 4915 default: 4916 status = I40E_ERR_UNKNOWN_PHY; 4917 break; 4918 } 4919 4920 return status; 4921 } 4922 4923 /** 4924 * i40e_get_phy_address 4925 * @hw: pointer to the HW structure 4926 * @dev_num: PHY port num that address we want 4927 * 4928 * Gets PHY address for current port 4929 **/ 4930 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4931 { 4932 u8 port_num = hw->func_caps.mdio_port_num; 4933 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4934 4935 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4936 } 4937 4938 /** 4939 * i40e_blink_phy_led 4940 * @hw: pointer to the HW structure 4941 * @time: time how long led will blinks in secs 4942 * @interval: gap between LED on and off in msecs 4943 * 4944 * Blinks PHY link LED 4945 **/ 4946 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 4947 u32 time, u32 interval) 4948 { 4949 i40e_status status = 0; 4950 u32 i; 4951 u16 led_ctl; 4952 u16 gpio_led_port; 4953 u16 led_reg; 4954 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4955 u8 phy_addr = 0; 4956 u8 port_num; 4957 4958 i = rd32(hw, I40E_PFGEN_PORTNUM); 4959 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4960 phy_addr = i40e_get_phy_address(hw, port_num); 4961 4962 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4963 led_addr++) { 4964 status = i40e_read_phy_register_clause45(hw, 4965 I40E_PHY_COM_REG_PAGE, 4966 led_addr, phy_addr, 4967 &led_reg); 4968 if (status) 4969 goto phy_blinking_end; 4970 led_ctl = led_reg; 4971 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4972 led_reg = 0; 4973 status = i40e_write_phy_register_clause45(hw, 4974 I40E_PHY_COM_REG_PAGE, 4975 led_addr, phy_addr, 4976 led_reg); 4977 if (status) 4978 goto phy_blinking_end; 4979 break; 4980 } 4981 } 4982 4983 if (time > 0 && interval > 0) { 4984 for (i = 0; i < time * 1000; i += interval) { 4985 status = i40e_read_phy_register_clause45(hw, 4986 I40E_PHY_COM_REG_PAGE, 4987 led_addr, phy_addr, &led_reg); 4988 if (status) 4989 goto restore_config; 4990 if (led_reg & I40E_PHY_LED_MANUAL_ON) 4991 led_reg = 0; 4992 else 4993 led_reg = I40E_PHY_LED_MANUAL_ON; 4994 status = i40e_write_phy_register_clause45(hw, 4995 I40E_PHY_COM_REG_PAGE, 4996 led_addr, phy_addr, led_reg); 4997 if (status) 4998 goto restore_config; 4999 msleep(interval); 5000 } 5001 } 5002 5003 restore_config: 5004 status = i40e_write_phy_register_clause45(hw, 5005 I40E_PHY_COM_REG_PAGE, 5006 led_addr, phy_addr, led_ctl); 5007 5008 phy_blinking_end: 5009 return status; 5010 } 5011 5012 /** 5013 * i40e_led_get_reg - read LED register 5014 * @hw: pointer to the HW structure 5015 * @led_addr: LED register address 5016 * @reg_val: read register value 5017 **/ 5018 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5019 u32 *reg_val) 5020 { 5021 enum i40e_status_code status; 5022 u8 phy_addr = 0; 5023 u8 port_num; 5024 u32 i; 5025 5026 *reg_val = 0; 5027 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5028 status = 5029 i40e_aq_get_phy_register(hw, 5030 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5031 I40E_PHY_COM_REG_PAGE, 5032 I40E_PHY_LED_PROV_REG_1, 5033 reg_val, NULL); 5034 } else { 5035 i = rd32(hw, I40E_PFGEN_PORTNUM); 5036 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5037 phy_addr = i40e_get_phy_address(hw, port_num); 5038 status = i40e_read_phy_register_clause45(hw, 5039 I40E_PHY_COM_REG_PAGE, 5040 led_addr, phy_addr, 5041 (u16 *)reg_val); 5042 } 5043 return status; 5044 } 5045 5046 /** 5047 * i40e_led_set_reg - write LED register 5048 * @hw: pointer to the HW structure 5049 * @led_addr: LED register address 5050 * @reg_val: register value to write 5051 **/ 5052 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5053 u32 reg_val) 5054 { 5055 enum i40e_status_code status; 5056 u8 phy_addr = 0; 5057 u8 port_num; 5058 u32 i; 5059 5060 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5061 status = 5062 i40e_aq_set_phy_register(hw, 5063 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5064 I40E_PHY_COM_REG_PAGE, 5065 I40E_PHY_LED_PROV_REG_1, 5066 reg_val, NULL); 5067 } else { 5068 i = rd32(hw, I40E_PFGEN_PORTNUM); 5069 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5070 phy_addr = i40e_get_phy_address(hw, port_num); 5071 status = i40e_write_phy_register_clause45(hw, 5072 I40E_PHY_COM_REG_PAGE, 5073 led_addr, phy_addr, 5074 (u16)reg_val); 5075 } 5076 5077 return status; 5078 } 5079 5080 /** 5081 * i40e_led_get_phy - return current on/off mode 5082 * @hw: pointer to the hw struct 5083 * @led_addr: address of led register to use 5084 * @val: original value of register to use 5085 * 5086 **/ 5087 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5088 u16 *val) 5089 { 5090 i40e_status status = 0; 5091 u16 gpio_led_port; 5092 u8 phy_addr = 0; 5093 u16 reg_val; 5094 u16 temp_addr; 5095 u8 port_num; 5096 u32 i; 5097 u32 reg_val_aq; 5098 5099 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5100 status = 5101 i40e_aq_get_phy_register(hw, 5102 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5103 I40E_PHY_COM_REG_PAGE, 5104 I40E_PHY_LED_PROV_REG_1, 5105 ®_val_aq, NULL); 5106 if (status == I40E_SUCCESS) 5107 *val = (u16)reg_val_aq; 5108 return status; 5109 } 5110 temp_addr = I40E_PHY_LED_PROV_REG_1; 5111 i = rd32(hw, I40E_PFGEN_PORTNUM); 5112 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5113 phy_addr = i40e_get_phy_address(hw, port_num); 5114 5115 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5116 temp_addr++) { 5117 status = i40e_read_phy_register_clause45(hw, 5118 I40E_PHY_COM_REG_PAGE, 5119 temp_addr, phy_addr, 5120 ®_val); 5121 if (status) 5122 return status; 5123 *val = reg_val; 5124 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5125 *led_addr = temp_addr; 5126 break; 5127 } 5128 } 5129 return status; 5130 } 5131 5132 /** 5133 * i40e_led_set_phy 5134 * @hw: pointer to the HW structure 5135 * @on: true or false 5136 * @led_addr: address of led register to use 5137 * @mode: original val plus bit for set or ignore 5138 * 5139 * Set led's on or off when controlled by the PHY 5140 * 5141 **/ 5142 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5143 u16 led_addr, u32 mode) 5144 { 5145 i40e_status status = 0; 5146 u32 led_ctl = 0; 5147 u32 led_reg = 0; 5148 5149 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5150 if (status) 5151 return status; 5152 led_ctl = led_reg; 5153 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5154 led_reg = 0; 5155 status = i40e_led_set_reg(hw, led_addr, led_reg); 5156 if (status) 5157 return status; 5158 } 5159 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5160 if (status) 5161 goto restore_config; 5162 if (on) 5163 led_reg = I40E_PHY_LED_MANUAL_ON; 5164 else 5165 led_reg = 0; 5166 5167 status = i40e_led_set_reg(hw, led_addr, led_reg); 5168 if (status) 5169 goto restore_config; 5170 if (mode & I40E_PHY_LED_MODE_ORIG) { 5171 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5172 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5173 } 5174 return status; 5175 5176 restore_config: 5177 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5178 return status; 5179 } 5180 5181 /** 5182 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5183 * @hw: pointer to the hw struct 5184 * @reg_addr: register address 5185 * @reg_val: ptr to register value 5186 * @cmd_details: pointer to command details structure or NULL 5187 * 5188 * Use the firmware to read the Rx control register, 5189 * especially useful if the Rx unit is under heavy pressure 5190 **/ 5191 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5192 u32 reg_addr, u32 *reg_val, 5193 struct i40e_asq_cmd_details *cmd_details) 5194 { 5195 struct i40e_aq_desc desc; 5196 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5197 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5198 i40e_status status; 5199 5200 if (!reg_val) 5201 return I40E_ERR_PARAM; 5202 5203 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5204 5205 cmd_resp->address = cpu_to_le32(reg_addr); 5206 5207 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5208 5209 if (status == 0) 5210 *reg_val = le32_to_cpu(cmd_resp->value); 5211 5212 return status; 5213 } 5214 5215 /** 5216 * i40e_read_rx_ctl - read from an Rx control register 5217 * @hw: pointer to the hw struct 5218 * @reg_addr: register address 5219 **/ 5220 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5221 { 5222 i40e_status status = 0; 5223 bool use_register; 5224 int retry = 5; 5225 u32 val = 0; 5226 5227 use_register = (((hw->aq.api_maj_ver == 1) && 5228 (hw->aq.api_min_ver < 5)) || 5229 (hw->mac.type == I40E_MAC_X722)); 5230 if (!use_register) { 5231 do_retry: 5232 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5233 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5234 usleep_range(1000, 2000); 5235 retry--; 5236 goto do_retry; 5237 } 5238 } 5239 5240 /* if the AQ access failed, try the old-fashioned way */ 5241 if (status || use_register) 5242 val = rd32(hw, reg_addr); 5243 5244 return val; 5245 } 5246 5247 /** 5248 * i40e_aq_rx_ctl_write_register 5249 * @hw: pointer to the hw struct 5250 * @reg_addr: register address 5251 * @reg_val: register value 5252 * @cmd_details: pointer to command details structure or NULL 5253 * 5254 * Use the firmware to write to an Rx control register, 5255 * especially useful if the Rx unit is under heavy pressure 5256 **/ 5257 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5258 u32 reg_addr, u32 reg_val, 5259 struct i40e_asq_cmd_details *cmd_details) 5260 { 5261 struct i40e_aq_desc desc; 5262 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5263 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5264 i40e_status status; 5265 5266 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5267 5268 cmd->address = cpu_to_le32(reg_addr); 5269 cmd->value = cpu_to_le32(reg_val); 5270 5271 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5272 5273 return status; 5274 } 5275 5276 /** 5277 * i40e_write_rx_ctl - write to an Rx control register 5278 * @hw: pointer to the hw struct 5279 * @reg_addr: register address 5280 * @reg_val: register value 5281 **/ 5282 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5283 { 5284 i40e_status status = 0; 5285 bool use_register; 5286 int retry = 5; 5287 5288 use_register = (((hw->aq.api_maj_ver == 1) && 5289 (hw->aq.api_min_ver < 5)) || 5290 (hw->mac.type == I40E_MAC_X722)); 5291 if (!use_register) { 5292 do_retry: 5293 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5294 reg_val, NULL); 5295 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5296 usleep_range(1000, 2000); 5297 retry--; 5298 goto do_retry; 5299 } 5300 } 5301 5302 /* if the AQ access failed, try the old-fashioned way */ 5303 if (status || use_register) 5304 wr32(hw, reg_addr, reg_val); 5305 } 5306 5307 /** 5308 * i40e_aq_set_phy_register 5309 * @hw: pointer to the hw struct 5310 * @phy_select: select which phy should be accessed 5311 * @dev_addr: PHY device address 5312 * @reg_addr: PHY register address 5313 * @reg_val: new register value 5314 * @cmd_details: pointer to command details structure or NULL 5315 * 5316 * Write the external PHY register. 5317 **/ 5318 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, 5319 u8 phy_select, u8 dev_addr, 5320 u32 reg_addr, u32 reg_val, 5321 struct i40e_asq_cmd_details *cmd_details) 5322 { 5323 struct i40e_aq_desc desc; 5324 struct i40e_aqc_phy_register_access *cmd = 5325 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5326 i40e_status status; 5327 5328 i40e_fill_default_direct_cmd_desc(&desc, 5329 i40e_aqc_opc_set_phy_register); 5330 5331 cmd->phy_interface = phy_select; 5332 cmd->dev_address = dev_addr; 5333 cmd->reg_address = cpu_to_le32(reg_addr); 5334 cmd->reg_value = cpu_to_le32(reg_val); 5335 5336 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5337 5338 return status; 5339 } 5340 5341 /** 5342 * i40e_aq_get_phy_register 5343 * @hw: pointer to the hw struct 5344 * @phy_select: select which phy should be accessed 5345 * @dev_addr: PHY device address 5346 * @reg_addr: PHY register address 5347 * @reg_val: read register value 5348 * @cmd_details: pointer to command details structure or NULL 5349 * 5350 * Read the external PHY register. 5351 **/ 5352 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, 5353 u8 phy_select, u8 dev_addr, 5354 u32 reg_addr, u32 *reg_val, 5355 struct i40e_asq_cmd_details *cmd_details) 5356 { 5357 struct i40e_aq_desc desc; 5358 struct i40e_aqc_phy_register_access *cmd = 5359 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5360 i40e_status status; 5361 5362 i40e_fill_default_direct_cmd_desc(&desc, 5363 i40e_aqc_opc_get_phy_register); 5364 5365 cmd->phy_interface = phy_select; 5366 cmd->dev_address = dev_addr; 5367 cmd->reg_address = cpu_to_le32(reg_addr); 5368 5369 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5370 if (!status) 5371 *reg_val = le32_to_cpu(cmd->reg_value); 5372 5373 return status; 5374 } 5375 5376 /** 5377 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5378 * @hw: pointer to the hw struct 5379 * @buff: command buffer (size in bytes = buff_size) 5380 * @buff_size: buffer size in bytes 5381 * @track_id: package tracking id 5382 * @error_offset: returns error offset 5383 * @error_info: returns error information 5384 * @cmd_details: pointer to command details structure or NULL 5385 **/ 5386 enum 5387 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5388 u16 buff_size, u32 track_id, 5389 u32 *error_offset, u32 *error_info, 5390 struct i40e_asq_cmd_details *cmd_details) 5391 { 5392 struct i40e_aq_desc desc; 5393 struct i40e_aqc_write_personalization_profile *cmd = 5394 (struct i40e_aqc_write_personalization_profile *) 5395 &desc.params.raw; 5396 struct i40e_aqc_write_ddp_resp *resp; 5397 i40e_status status; 5398 5399 i40e_fill_default_direct_cmd_desc(&desc, 5400 i40e_aqc_opc_write_personalization_profile); 5401 5402 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5403 if (buff_size > I40E_AQ_LARGE_BUF) 5404 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5405 5406 desc.datalen = cpu_to_le16(buff_size); 5407 5408 cmd->profile_track_id = cpu_to_le32(track_id); 5409 5410 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5411 if (!status) { 5412 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5413 if (error_offset) 5414 *error_offset = le32_to_cpu(resp->error_offset); 5415 if (error_info) 5416 *error_info = le32_to_cpu(resp->error_info); 5417 } 5418 5419 return status; 5420 } 5421 5422 /** 5423 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5424 * @hw: pointer to the hw struct 5425 * @buff: command buffer (size in bytes = buff_size) 5426 * @buff_size: buffer size in bytes 5427 * @flags: AdminQ command flags 5428 * @cmd_details: pointer to command details structure or NULL 5429 **/ 5430 enum 5431 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5432 u16 buff_size, u8 flags, 5433 struct i40e_asq_cmd_details *cmd_details) 5434 { 5435 struct i40e_aq_desc desc; 5436 struct i40e_aqc_get_applied_profiles *cmd = 5437 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5438 i40e_status status; 5439 5440 i40e_fill_default_direct_cmd_desc(&desc, 5441 i40e_aqc_opc_get_personalization_profile_list); 5442 5443 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5444 if (buff_size > I40E_AQ_LARGE_BUF) 5445 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5446 desc.datalen = cpu_to_le16(buff_size); 5447 5448 cmd->flags = flags; 5449 5450 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5451 5452 return status; 5453 } 5454 5455 /** 5456 * i40e_find_segment_in_package 5457 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5458 * @pkg_hdr: pointer to the package header to be searched 5459 * 5460 * This function searches a package file for a particular segment type. On 5461 * success it returns a pointer to the segment header, otherwise it will 5462 * return NULL. 5463 **/ 5464 struct i40e_generic_seg_header * 5465 i40e_find_segment_in_package(u32 segment_type, 5466 struct i40e_package_header *pkg_hdr) 5467 { 5468 struct i40e_generic_seg_header *segment; 5469 u32 i; 5470 5471 /* Search all package segments for the requested segment type */ 5472 for (i = 0; i < pkg_hdr->segment_count; i++) { 5473 segment = 5474 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5475 pkg_hdr->segment_offset[i]); 5476 5477 if (segment->type == segment_type) 5478 return segment; 5479 } 5480 5481 return NULL; 5482 } 5483 5484 /* Get section table in profile */ 5485 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5486 do { \ 5487 struct i40e_profile_segment *p = (profile); \ 5488 u32 count; \ 5489 u32 *nvm; \ 5490 count = p->device_table_count; \ 5491 nvm = (u32 *)&p->device_table[count]; \ 5492 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5493 } while (0) 5494 5495 /* Get section header in profile */ 5496 #define I40E_SECTION_HEADER(profile, offset) \ 5497 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5498 5499 /** 5500 * i40e_find_section_in_profile 5501 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5502 * @profile: pointer to the i40e segment header to be searched 5503 * 5504 * This function searches i40e segment for a particular section type. On 5505 * success it returns a pointer to the section header, otherwise it will 5506 * return NULL. 5507 **/ 5508 struct i40e_profile_section_header * 5509 i40e_find_section_in_profile(u32 section_type, 5510 struct i40e_profile_segment *profile) 5511 { 5512 struct i40e_profile_section_header *sec; 5513 struct i40e_section_table *sec_tbl; 5514 u32 sec_off; 5515 u32 i; 5516 5517 if (profile->header.type != SEGMENT_TYPE_I40E) 5518 return NULL; 5519 5520 I40E_SECTION_TABLE(profile, sec_tbl); 5521 5522 for (i = 0; i < sec_tbl->section_count; i++) { 5523 sec_off = sec_tbl->section_offset[i]; 5524 sec = I40E_SECTION_HEADER(profile, sec_off); 5525 if (sec->section.type == section_type) 5526 return sec; 5527 } 5528 5529 return NULL; 5530 } 5531 5532 /** 5533 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5534 * @hw: pointer to the hw struct 5535 * @aq: command buffer containing all data to execute AQ 5536 **/ 5537 static enum 5538 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5539 struct i40e_profile_aq_section *aq) 5540 { 5541 i40e_status status; 5542 struct i40e_aq_desc desc; 5543 u8 *msg = NULL; 5544 u16 msglen; 5545 5546 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5547 desc.flags |= cpu_to_le16(aq->flags); 5548 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5549 5550 msglen = aq->datalen; 5551 if (msglen) { 5552 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5553 I40E_AQ_FLAG_RD)); 5554 if (msglen > I40E_AQ_LARGE_BUF) 5555 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5556 desc.datalen = cpu_to_le16(msglen); 5557 msg = &aq->data[0]; 5558 } 5559 5560 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5561 5562 if (status) { 5563 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5564 "unable to exec DDP AQ opcode %u, error %d\n", 5565 aq->opcode, status); 5566 return status; 5567 } 5568 5569 /* copy returned desc to aq_buf */ 5570 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5571 5572 return 0; 5573 } 5574 5575 /** 5576 * i40e_validate_profile 5577 * @hw: pointer to the hardware structure 5578 * @profile: pointer to the profile segment of the package to be validated 5579 * @track_id: package tracking id 5580 * @rollback: flag if the profile is for rollback. 5581 * 5582 * Validates supported devices and profile's sections. 5583 */ 5584 static enum i40e_status_code 5585 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5586 u32 track_id, bool rollback) 5587 { 5588 struct i40e_profile_section_header *sec = NULL; 5589 i40e_status status = 0; 5590 struct i40e_section_table *sec_tbl; 5591 u32 vendor_dev_id; 5592 u32 dev_cnt; 5593 u32 sec_off; 5594 u32 i; 5595 5596 if (track_id == I40E_DDP_TRACKID_INVALID) { 5597 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5598 return I40E_NOT_SUPPORTED; 5599 } 5600 5601 dev_cnt = profile->device_table_count; 5602 for (i = 0; i < dev_cnt; i++) { 5603 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5604 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5605 hw->device_id == (vendor_dev_id & 0xFFFF)) 5606 break; 5607 } 5608 if (dev_cnt && i == dev_cnt) { 5609 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5610 "Device doesn't support DDP\n"); 5611 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5612 } 5613 5614 I40E_SECTION_TABLE(profile, sec_tbl); 5615 5616 /* Validate sections types */ 5617 for (i = 0; i < sec_tbl->section_count; i++) { 5618 sec_off = sec_tbl->section_offset[i]; 5619 sec = I40E_SECTION_HEADER(profile, sec_off); 5620 if (rollback) { 5621 if (sec->section.type == SECTION_TYPE_MMIO || 5622 sec->section.type == SECTION_TYPE_AQ || 5623 sec->section.type == SECTION_TYPE_RB_AQ) { 5624 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5625 "Not a roll-back package\n"); 5626 return I40E_NOT_SUPPORTED; 5627 } 5628 } else { 5629 if (sec->section.type == SECTION_TYPE_RB_AQ || 5630 sec->section.type == SECTION_TYPE_RB_MMIO) { 5631 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5632 "Not an original package\n"); 5633 return I40E_NOT_SUPPORTED; 5634 } 5635 } 5636 } 5637 5638 return status; 5639 } 5640 5641 /** 5642 * i40e_write_profile 5643 * @hw: pointer to the hardware structure 5644 * @profile: pointer to the profile segment of the package to be downloaded 5645 * @track_id: package tracking id 5646 * 5647 * Handles the download of a complete package. 5648 */ 5649 enum i40e_status_code 5650 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5651 u32 track_id) 5652 { 5653 i40e_status status = 0; 5654 struct i40e_section_table *sec_tbl; 5655 struct i40e_profile_section_header *sec = NULL; 5656 struct i40e_profile_aq_section *ddp_aq; 5657 u32 section_size = 0; 5658 u32 offset = 0, info = 0; 5659 u32 sec_off; 5660 u32 i; 5661 5662 status = i40e_validate_profile(hw, profile, track_id, false); 5663 if (status) 5664 return status; 5665 5666 I40E_SECTION_TABLE(profile, sec_tbl); 5667 5668 for (i = 0; i < sec_tbl->section_count; i++) { 5669 sec_off = sec_tbl->section_offset[i]; 5670 sec = I40E_SECTION_HEADER(profile, sec_off); 5671 /* Process generic admin command */ 5672 if (sec->section.type == SECTION_TYPE_AQ) { 5673 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5674 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5675 if (status) { 5676 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5677 "Failed to execute aq: section %d, opcode %u\n", 5678 i, ddp_aq->opcode); 5679 break; 5680 } 5681 sec->section.type = SECTION_TYPE_RB_AQ; 5682 } 5683 5684 /* Skip any non-mmio sections */ 5685 if (sec->section.type != SECTION_TYPE_MMIO) 5686 continue; 5687 5688 section_size = sec->section.size + 5689 sizeof(struct i40e_profile_section_header); 5690 5691 /* Write MMIO section */ 5692 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5693 track_id, &offset, &info, NULL); 5694 if (status) { 5695 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5696 "Failed to write profile: section %d, offset %d, info %d\n", 5697 i, offset, info); 5698 break; 5699 } 5700 } 5701 return status; 5702 } 5703 5704 /** 5705 * i40e_rollback_profile 5706 * @hw: pointer to the hardware structure 5707 * @profile: pointer to the profile segment of the package to be removed 5708 * @track_id: package tracking id 5709 * 5710 * Rolls back previously loaded package. 5711 */ 5712 enum i40e_status_code 5713 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5714 u32 track_id) 5715 { 5716 struct i40e_profile_section_header *sec = NULL; 5717 i40e_status status = 0; 5718 struct i40e_section_table *sec_tbl; 5719 u32 offset = 0, info = 0; 5720 u32 section_size = 0; 5721 u32 sec_off; 5722 int i; 5723 5724 status = i40e_validate_profile(hw, profile, track_id, true); 5725 if (status) 5726 return status; 5727 5728 I40E_SECTION_TABLE(profile, sec_tbl); 5729 5730 /* For rollback write sections in reverse */ 5731 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5732 sec_off = sec_tbl->section_offset[i]; 5733 sec = I40E_SECTION_HEADER(profile, sec_off); 5734 5735 /* Skip any non-rollback sections */ 5736 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5737 continue; 5738 5739 section_size = sec->section.size + 5740 sizeof(struct i40e_profile_section_header); 5741 5742 /* Write roll-back MMIO section */ 5743 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5744 track_id, &offset, &info, NULL); 5745 if (status) { 5746 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5747 "Failed to write profile: section %d, offset %d, info %d\n", 5748 i, offset, info); 5749 break; 5750 } 5751 } 5752 return status; 5753 } 5754 5755 /** 5756 * i40e_add_pinfo_to_list 5757 * @hw: pointer to the hardware structure 5758 * @profile: pointer to the profile segment of the package 5759 * @profile_info_sec: buffer for information section 5760 * @track_id: package tracking id 5761 * 5762 * Register a profile to the list of loaded profiles. 5763 */ 5764 enum i40e_status_code 5765 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5766 struct i40e_profile_segment *profile, 5767 u8 *profile_info_sec, u32 track_id) 5768 { 5769 i40e_status status = 0; 5770 struct i40e_profile_section_header *sec = NULL; 5771 struct i40e_profile_info *pinfo; 5772 u32 offset = 0, info = 0; 5773 5774 sec = (struct i40e_profile_section_header *)profile_info_sec; 5775 sec->tbl_size = 1; 5776 sec->data_end = sizeof(struct i40e_profile_section_header) + 5777 sizeof(struct i40e_profile_info); 5778 sec->section.type = SECTION_TYPE_INFO; 5779 sec->section.offset = sizeof(struct i40e_profile_section_header); 5780 sec->section.size = sizeof(struct i40e_profile_info); 5781 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5782 sec->section.offset); 5783 pinfo->track_id = track_id; 5784 pinfo->version = profile->version; 5785 pinfo->op = I40E_DDP_ADD_TRACKID; 5786 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5787 5788 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5789 track_id, &offset, &info, NULL); 5790 5791 return status; 5792 } 5793 5794 /** 5795 * i40e_aq_add_cloud_filters 5796 * @hw: pointer to the hardware structure 5797 * @seid: VSI seid to add cloud filters from 5798 * @filters: Buffer which contains the filters to be added 5799 * @filter_count: number of filters contained in the buffer 5800 * 5801 * Set the cloud filters for a given VSI. The contents of the 5802 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5803 * of the function. 5804 * 5805 **/ 5806 enum i40e_status_code 5807 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5808 struct i40e_aqc_cloud_filters_element_data *filters, 5809 u8 filter_count) 5810 { 5811 struct i40e_aq_desc desc; 5812 struct i40e_aqc_add_remove_cloud_filters *cmd = 5813 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5814 enum i40e_status_code status; 5815 u16 buff_len; 5816 5817 i40e_fill_default_direct_cmd_desc(&desc, 5818 i40e_aqc_opc_add_cloud_filters); 5819 5820 buff_len = filter_count * sizeof(*filters); 5821 desc.datalen = cpu_to_le16(buff_len); 5822 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5823 cmd->num_filters = filter_count; 5824 cmd->seid = cpu_to_le16(seid); 5825 5826 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5827 5828 return status; 5829 } 5830 5831 /** 5832 * i40e_aq_add_cloud_filters_bb 5833 * @hw: pointer to the hardware structure 5834 * @seid: VSI seid to add cloud filters from 5835 * @filters: Buffer which contains the filters in big buffer to be added 5836 * @filter_count: number of filters contained in the buffer 5837 * 5838 * Set the big buffer cloud filters for a given VSI. The contents of the 5839 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5840 * function. 5841 * 5842 **/ 5843 enum i40e_status_code 5844 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5845 struct i40e_aqc_cloud_filters_element_bb *filters, 5846 u8 filter_count) 5847 { 5848 struct i40e_aq_desc desc; 5849 struct i40e_aqc_add_remove_cloud_filters *cmd = 5850 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5851 i40e_status status; 5852 u16 buff_len; 5853 int i; 5854 5855 i40e_fill_default_direct_cmd_desc(&desc, 5856 i40e_aqc_opc_add_cloud_filters); 5857 5858 buff_len = filter_count * sizeof(*filters); 5859 desc.datalen = cpu_to_le16(buff_len); 5860 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5861 cmd->num_filters = filter_count; 5862 cmd->seid = cpu_to_le16(seid); 5863 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5864 5865 for (i = 0; i < filter_count; i++) { 5866 u16 tnl_type; 5867 u32 ti; 5868 5869 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5870 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5871 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5872 5873 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5874 * one more byte further than normally used for Tenant ID in 5875 * other tunnel types. 5876 */ 5877 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5878 ti = le32_to_cpu(filters[i].element.tenant_id); 5879 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5880 } 5881 } 5882 5883 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5884 5885 return status; 5886 } 5887 5888 /** 5889 * i40e_aq_rem_cloud_filters 5890 * @hw: pointer to the hardware structure 5891 * @seid: VSI seid to remove cloud filters from 5892 * @filters: Buffer which contains the filters to be removed 5893 * @filter_count: number of filters contained in the buffer 5894 * 5895 * Remove the cloud filters for a given VSI. The contents of the 5896 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5897 * of the function. 5898 * 5899 **/ 5900 enum i40e_status_code 5901 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5902 struct i40e_aqc_cloud_filters_element_data *filters, 5903 u8 filter_count) 5904 { 5905 struct i40e_aq_desc desc; 5906 struct i40e_aqc_add_remove_cloud_filters *cmd = 5907 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5908 enum i40e_status_code status; 5909 u16 buff_len; 5910 5911 i40e_fill_default_direct_cmd_desc(&desc, 5912 i40e_aqc_opc_remove_cloud_filters); 5913 5914 buff_len = filter_count * sizeof(*filters); 5915 desc.datalen = cpu_to_le16(buff_len); 5916 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5917 cmd->num_filters = filter_count; 5918 cmd->seid = cpu_to_le16(seid); 5919 5920 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5921 5922 return status; 5923 } 5924 5925 /** 5926 * i40e_aq_rem_cloud_filters_bb 5927 * @hw: pointer to the hardware structure 5928 * @seid: VSI seid to remove cloud filters from 5929 * @filters: Buffer which contains the filters in big buffer to be removed 5930 * @filter_count: number of filters contained in the buffer 5931 * 5932 * Remove the big buffer cloud filters for a given VSI. The contents of the 5933 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5934 * function. 5935 * 5936 **/ 5937 enum i40e_status_code 5938 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5939 struct i40e_aqc_cloud_filters_element_bb *filters, 5940 u8 filter_count) 5941 { 5942 struct i40e_aq_desc desc; 5943 struct i40e_aqc_add_remove_cloud_filters *cmd = 5944 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5945 i40e_status status; 5946 u16 buff_len; 5947 int i; 5948 5949 i40e_fill_default_direct_cmd_desc(&desc, 5950 i40e_aqc_opc_remove_cloud_filters); 5951 5952 buff_len = filter_count * sizeof(*filters); 5953 desc.datalen = cpu_to_le16(buff_len); 5954 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5955 cmd->num_filters = filter_count; 5956 cmd->seid = cpu_to_le16(seid); 5957 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5958 5959 for (i = 0; i < filter_count; i++) { 5960 u16 tnl_type; 5961 u32 ti; 5962 5963 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5964 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5965 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5966 5967 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5968 * one more byte further than normally used for Tenant ID in 5969 * other tunnel types. 5970 */ 5971 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5972 ti = le32_to_cpu(filters[i].element.tenant_id); 5973 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5974 } 5975 } 5976 5977 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5978 5979 return status; 5980 } 5981