1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2021 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_type.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 #include <linux/avf/virtchnl.h> 9 10 /** 11 * i40e_set_mac_type - Sets MAC type 12 * @hw: pointer to the HW structure 13 * 14 * This function sets the mac type of the adapter based on the 15 * vendor ID and device ID stored in the hw structure. 16 **/ 17 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 18 { 19 i40e_status status = 0; 20 21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 22 switch (hw->device_id) { 23 case I40E_DEV_ID_SFP_XL710: 24 case I40E_DEV_ID_QEMU: 25 case I40E_DEV_ID_KX_B: 26 case I40E_DEV_ID_KX_C: 27 case I40E_DEV_ID_QSFP_A: 28 case I40E_DEV_ID_QSFP_B: 29 case I40E_DEV_ID_QSFP_C: 30 case I40E_DEV_ID_5G_BASE_T_BC: 31 case I40E_DEV_ID_10G_BASE_T: 32 case I40E_DEV_ID_10G_BASE_T4: 33 case I40E_DEV_ID_10G_BASE_T_BC: 34 case I40E_DEV_ID_10G_B: 35 case I40E_DEV_ID_10G_SFP: 36 case I40E_DEV_ID_20G_KR2: 37 case I40E_DEV_ID_20G_KR2_A: 38 case I40E_DEV_ID_25G_B: 39 case I40E_DEV_ID_25G_SFP28: 40 case I40E_DEV_ID_X710_N3000: 41 case I40E_DEV_ID_XXV710_N3000: 42 hw->mac.type = I40E_MAC_XL710; 43 break; 44 case I40E_DEV_ID_KX_X722: 45 case I40E_DEV_ID_QSFP_X722: 46 case I40E_DEV_ID_SFP_X722: 47 case I40E_DEV_ID_1G_BASE_T_X722: 48 case I40E_DEV_ID_10G_BASE_T_X722: 49 case I40E_DEV_ID_SFP_I_X722: 50 hw->mac.type = I40E_MAC_X722; 51 break; 52 default: 53 hw->mac.type = I40E_MAC_GENERIC; 54 break; 55 } 56 } else { 57 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 58 } 59 60 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 61 hw->mac.type, status); 62 return status; 63 } 64 65 /** 66 * i40e_aq_str - convert AQ err code to a string 67 * @hw: pointer to the HW structure 68 * @aq_err: the AQ error code to convert 69 **/ 70 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 71 { 72 switch (aq_err) { 73 case I40E_AQ_RC_OK: 74 return "OK"; 75 case I40E_AQ_RC_EPERM: 76 return "I40E_AQ_RC_EPERM"; 77 case I40E_AQ_RC_ENOENT: 78 return "I40E_AQ_RC_ENOENT"; 79 case I40E_AQ_RC_ESRCH: 80 return "I40E_AQ_RC_ESRCH"; 81 case I40E_AQ_RC_EINTR: 82 return "I40E_AQ_RC_EINTR"; 83 case I40E_AQ_RC_EIO: 84 return "I40E_AQ_RC_EIO"; 85 case I40E_AQ_RC_ENXIO: 86 return "I40E_AQ_RC_ENXIO"; 87 case I40E_AQ_RC_E2BIG: 88 return "I40E_AQ_RC_E2BIG"; 89 case I40E_AQ_RC_EAGAIN: 90 return "I40E_AQ_RC_EAGAIN"; 91 case I40E_AQ_RC_ENOMEM: 92 return "I40E_AQ_RC_ENOMEM"; 93 case I40E_AQ_RC_EACCES: 94 return "I40E_AQ_RC_EACCES"; 95 case I40E_AQ_RC_EFAULT: 96 return "I40E_AQ_RC_EFAULT"; 97 case I40E_AQ_RC_EBUSY: 98 return "I40E_AQ_RC_EBUSY"; 99 case I40E_AQ_RC_EEXIST: 100 return "I40E_AQ_RC_EEXIST"; 101 case I40E_AQ_RC_EINVAL: 102 return "I40E_AQ_RC_EINVAL"; 103 case I40E_AQ_RC_ENOTTY: 104 return "I40E_AQ_RC_ENOTTY"; 105 case I40E_AQ_RC_ENOSPC: 106 return "I40E_AQ_RC_ENOSPC"; 107 case I40E_AQ_RC_ENOSYS: 108 return "I40E_AQ_RC_ENOSYS"; 109 case I40E_AQ_RC_ERANGE: 110 return "I40E_AQ_RC_ERANGE"; 111 case I40E_AQ_RC_EFLUSHED: 112 return "I40E_AQ_RC_EFLUSHED"; 113 case I40E_AQ_RC_BAD_ADDR: 114 return "I40E_AQ_RC_BAD_ADDR"; 115 case I40E_AQ_RC_EMODE: 116 return "I40E_AQ_RC_EMODE"; 117 case I40E_AQ_RC_EFBIG: 118 return "I40E_AQ_RC_EFBIG"; 119 } 120 121 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 122 return hw->err_str; 123 } 124 125 /** 126 * i40e_stat_str - convert status err code to a string 127 * @hw: pointer to the HW structure 128 * @stat_err: the status error code to convert 129 **/ 130 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 131 { 132 switch (stat_err) { 133 case 0: 134 return "OK"; 135 case I40E_ERR_NVM: 136 return "I40E_ERR_NVM"; 137 case I40E_ERR_NVM_CHECKSUM: 138 return "I40E_ERR_NVM_CHECKSUM"; 139 case I40E_ERR_PHY: 140 return "I40E_ERR_PHY"; 141 case I40E_ERR_CONFIG: 142 return "I40E_ERR_CONFIG"; 143 case I40E_ERR_PARAM: 144 return "I40E_ERR_PARAM"; 145 case I40E_ERR_MAC_TYPE: 146 return "I40E_ERR_MAC_TYPE"; 147 case I40E_ERR_UNKNOWN_PHY: 148 return "I40E_ERR_UNKNOWN_PHY"; 149 case I40E_ERR_LINK_SETUP: 150 return "I40E_ERR_LINK_SETUP"; 151 case I40E_ERR_ADAPTER_STOPPED: 152 return "I40E_ERR_ADAPTER_STOPPED"; 153 case I40E_ERR_INVALID_MAC_ADDR: 154 return "I40E_ERR_INVALID_MAC_ADDR"; 155 case I40E_ERR_DEVICE_NOT_SUPPORTED: 156 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 157 case I40E_ERR_PRIMARY_REQUESTS_PENDING: 158 return "I40E_ERR_PRIMARY_REQUESTS_PENDING"; 159 case I40E_ERR_INVALID_LINK_SETTINGS: 160 return "I40E_ERR_INVALID_LINK_SETTINGS"; 161 case I40E_ERR_AUTONEG_NOT_COMPLETE: 162 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 163 case I40E_ERR_RESET_FAILED: 164 return "I40E_ERR_RESET_FAILED"; 165 case I40E_ERR_SWFW_SYNC: 166 return "I40E_ERR_SWFW_SYNC"; 167 case I40E_ERR_NO_AVAILABLE_VSI: 168 return "I40E_ERR_NO_AVAILABLE_VSI"; 169 case I40E_ERR_NO_MEMORY: 170 return "I40E_ERR_NO_MEMORY"; 171 case I40E_ERR_BAD_PTR: 172 return "I40E_ERR_BAD_PTR"; 173 case I40E_ERR_RING_FULL: 174 return "I40E_ERR_RING_FULL"; 175 case I40E_ERR_INVALID_PD_ID: 176 return "I40E_ERR_INVALID_PD_ID"; 177 case I40E_ERR_INVALID_QP_ID: 178 return "I40E_ERR_INVALID_QP_ID"; 179 case I40E_ERR_INVALID_CQ_ID: 180 return "I40E_ERR_INVALID_CQ_ID"; 181 case I40E_ERR_INVALID_CEQ_ID: 182 return "I40E_ERR_INVALID_CEQ_ID"; 183 case I40E_ERR_INVALID_AEQ_ID: 184 return "I40E_ERR_INVALID_AEQ_ID"; 185 case I40E_ERR_INVALID_SIZE: 186 return "I40E_ERR_INVALID_SIZE"; 187 case I40E_ERR_INVALID_ARP_INDEX: 188 return "I40E_ERR_INVALID_ARP_INDEX"; 189 case I40E_ERR_INVALID_FPM_FUNC_ID: 190 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 191 case I40E_ERR_QP_INVALID_MSG_SIZE: 192 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 193 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 194 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 195 case I40E_ERR_INVALID_FRAG_COUNT: 196 return "I40E_ERR_INVALID_FRAG_COUNT"; 197 case I40E_ERR_QUEUE_EMPTY: 198 return "I40E_ERR_QUEUE_EMPTY"; 199 case I40E_ERR_INVALID_ALIGNMENT: 200 return "I40E_ERR_INVALID_ALIGNMENT"; 201 case I40E_ERR_FLUSHED_QUEUE: 202 return "I40E_ERR_FLUSHED_QUEUE"; 203 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 204 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 205 case I40E_ERR_INVALID_IMM_DATA_SIZE: 206 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 207 case I40E_ERR_TIMEOUT: 208 return "I40E_ERR_TIMEOUT"; 209 case I40E_ERR_OPCODE_MISMATCH: 210 return "I40E_ERR_OPCODE_MISMATCH"; 211 case I40E_ERR_CQP_COMPL_ERROR: 212 return "I40E_ERR_CQP_COMPL_ERROR"; 213 case I40E_ERR_INVALID_VF_ID: 214 return "I40E_ERR_INVALID_VF_ID"; 215 case I40E_ERR_INVALID_HMCFN_ID: 216 return "I40E_ERR_INVALID_HMCFN_ID"; 217 case I40E_ERR_BACKING_PAGE_ERROR: 218 return "I40E_ERR_BACKING_PAGE_ERROR"; 219 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 220 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 221 case I40E_ERR_INVALID_PBLE_INDEX: 222 return "I40E_ERR_INVALID_PBLE_INDEX"; 223 case I40E_ERR_INVALID_SD_INDEX: 224 return "I40E_ERR_INVALID_SD_INDEX"; 225 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 226 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 227 case I40E_ERR_INVALID_SD_TYPE: 228 return "I40E_ERR_INVALID_SD_TYPE"; 229 case I40E_ERR_MEMCPY_FAILED: 230 return "I40E_ERR_MEMCPY_FAILED"; 231 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 232 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 233 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 234 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 235 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 236 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 237 case I40E_ERR_SRQ_ENABLED: 238 return "I40E_ERR_SRQ_ENABLED"; 239 case I40E_ERR_ADMIN_QUEUE_ERROR: 240 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 241 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 242 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 243 case I40E_ERR_BUF_TOO_SHORT: 244 return "I40E_ERR_BUF_TOO_SHORT"; 245 case I40E_ERR_ADMIN_QUEUE_FULL: 246 return "I40E_ERR_ADMIN_QUEUE_FULL"; 247 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 248 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 249 case I40E_ERR_BAD_IWARP_CQE: 250 return "I40E_ERR_BAD_IWARP_CQE"; 251 case I40E_ERR_NVM_BLANK_MODE: 252 return "I40E_ERR_NVM_BLANK_MODE"; 253 case I40E_ERR_NOT_IMPLEMENTED: 254 return "I40E_ERR_NOT_IMPLEMENTED"; 255 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 256 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 257 case I40E_ERR_DIAG_TEST_FAILED: 258 return "I40E_ERR_DIAG_TEST_FAILED"; 259 case I40E_ERR_NOT_READY: 260 return "I40E_ERR_NOT_READY"; 261 case I40E_NOT_SUPPORTED: 262 return "I40E_NOT_SUPPORTED"; 263 case I40E_ERR_FIRMWARE_API_VERSION: 264 return "I40E_ERR_FIRMWARE_API_VERSION"; 265 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 266 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 267 } 268 269 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 270 return hw->err_str; 271 } 272 273 /** 274 * i40e_debug_aq 275 * @hw: debug mask related to admin queue 276 * @mask: debug mask 277 * @desc: pointer to admin queue descriptor 278 * @buffer: pointer to command buffer 279 * @buf_len: max length of buffer 280 * 281 * Dumps debug log about adminq command with descriptor contents. 282 **/ 283 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 284 void *buffer, u16 buf_len) 285 { 286 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 287 u32 effective_mask = hw->debug_mask & mask; 288 char prefix[27]; 289 u16 len; 290 u8 *buf = (u8 *)buffer; 291 292 if (!effective_mask || !desc) 293 return; 294 295 len = le16_to_cpu(aq_desc->datalen); 296 297 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 298 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 299 le16_to_cpu(aq_desc->opcode), 300 le16_to_cpu(aq_desc->flags), 301 le16_to_cpu(aq_desc->datalen), 302 le16_to_cpu(aq_desc->retval)); 303 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 304 "\tcookie (h,l) 0x%08X 0x%08X\n", 305 le32_to_cpu(aq_desc->cookie_high), 306 le32_to_cpu(aq_desc->cookie_low)); 307 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 308 "\tparam (0,1) 0x%08X 0x%08X\n", 309 le32_to_cpu(aq_desc->params.internal.param0), 310 le32_to_cpu(aq_desc->params.internal.param1)); 311 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 312 "\taddr (h,l) 0x%08X 0x%08X\n", 313 le32_to_cpu(aq_desc->params.external.addr_high), 314 le32_to_cpu(aq_desc->params.external.addr_low)); 315 316 if (buffer && buf_len != 0 && len != 0 && 317 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 318 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 319 if (buf_len < len) 320 len = buf_len; 321 322 snprintf(prefix, sizeof(prefix), 323 "i40e %02x:%02x.%x: \t0x", 324 hw->bus.bus_id, 325 hw->bus.device, 326 hw->bus.func); 327 328 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 329 16, 1, buf, len, false); 330 } 331 } 332 333 /** 334 * i40e_check_asq_alive 335 * @hw: pointer to the hw struct 336 * 337 * Returns true if Queue is enabled else false. 338 **/ 339 bool i40e_check_asq_alive(struct i40e_hw *hw) 340 { 341 if (hw->aq.asq.len) 342 return !!(rd32(hw, hw->aq.asq.len) & 343 I40E_PF_ATQLEN_ATQENABLE_MASK); 344 else 345 return false; 346 } 347 348 /** 349 * i40e_aq_queue_shutdown 350 * @hw: pointer to the hw struct 351 * @unloading: is the driver unloading itself 352 * 353 * Tell the Firmware that we're shutting down the AdminQ and whether 354 * or not the driver is unloading as well. 355 **/ 356 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 357 bool unloading) 358 { 359 struct i40e_aq_desc desc; 360 struct i40e_aqc_queue_shutdown *cmd = 361 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 362 i40e_status status; 363 364 i40e_fill_default_direct_cmd_desc(&desc, 365 i40e_aqc_opc_queue_shutdown); 366 367 if (unloading) 368 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 369 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 370 371 return status; 372 } 373 374 /** 375 * i40e_aq_get_set_rss_lut 376 * @hw: pointer to the hardware structure 377 * @vsi_id: vsi fw index 378 * @pf_lut: for PF table set true, for VSI table set false 379 * @lut: pointer to the lut buffer provided by the caller 380 * @lut_size: size of the lut buffer 381 * @set: set true to set the table, false to get the table 382 * 383 * Internal function to get or set RSS look up table 384 **/ 385 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 386 u16 vsi_id, bool pf_lut, 387 u8 *lut, u16 lut_size, 388 bool set) 389 { 390 i40e_status status; 391 struct i40e_aq_desc desc; 392 struct i40e_aqc_get_set_rss_lut *cmd_resp = 393 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 394 395 if (set) 396 i40e_fill_default_direct_cmd_desc(&desc, 397 i40e_aqc_opc_set_rss_lut); 398 else 399 i40e_fill_default_direct_cmd_desc(&desc, 400 i40e_aqc_opc_get_rss_lut); 401 402 /* Indirect command */ 403 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 404 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 405 406 cmd_resp->vsi_id = 407 cpu_to_le16((u16)((vsi_id << 408 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 409 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 410 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 411 412 if (pf_lut) 413 cmd_resp->flags |= cpu_to_le16((u16) 414 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 415 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 416 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 417 else 418 cmd_resp->flags |= cpu_to_le16((u16) 419 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 420 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 421 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 422 423 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 424 425 return status; 426 } 427 428 /** 429 * i40e_aq_get_rss_lut 430 * @hw: pointer to the hardware structure 431 * @vsi_id: vsi fw index 432 * @pf_lut: for PF table set true, for VSI table set false 433 * @lut: pointer to the lut buffer provided by the caller 434 * @lut_size: size of the lut buffer 435 * 436 * get the RSS lookup table, PF or VSI type 437 **/ 438 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 439 bool pf_lut, u8 *lut, u16 lut_size) 440 { 441 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 442 false); 443 } 444 445 /** 446 * i40e_aq_set_rss_lut 447 * @hw: pointer to the hardware structure 448 * @vsi_id: vsi fw index 449 * @pf_lut: for PF table set true, for VSI table set false 450 * @lut: pointer to the lut buffer provided by the caller 451 * @lut_size: size of the lut buffer 452 * 453 * set the RSS lookup table, PF or VSI type 454 **/ 455 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 456 bool pf_lut, u8 *lut, u16 lut_size) 457 { 458 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 459 } 460 461 /** 462 * i40e_aq_get_set_rss_key 463 * @hw: pointer to the hw struct 464 * @vsi_id: vsi fw index 465 * @key: pointer to key info struct 466 * @set: set true to set the key, false to get the key 467 * 468 * get the RSS key per VSI 469 **/ 470 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 471 u16 vsi_id, 472 struct i40e_aqc_get_set_rss_key_data *key, 473 bool set) 474 { 475 i40e_status status; 476 struct i40e_aq_desc desc; 477 struct i40e_aqc_get_set_rss_key *cmd_resp = 478 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 479 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 480 481 if (set) 482 i40e_fill_default_direct_cmd_desc(&desc, 483 i40e_aqc_opc_set_rss_key); 484 else 485 i40e_fill_default_direct_cmd_desc(&desc, 486 i40e_aqc_opc_get_rss_key); 487 488 /* Indirect command */ 489 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 490 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 491 492 cmd_resp->vsi_id = 493 cpu_to_le16((u16)((vsi_id << 494 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 495 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 496 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 497 498 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 499 500 return status; 501 } 502 503 /** 504 * i40e_aq_get_rss_key 505 * @hw: pointer to the hw struct 506 * @vsi_id: vsi fw index 507 * @key: pointer to key info struct 508 * 509 **/ 510 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 511 u16 vsi_id, 512 struct i40e_aqc_get_set_rss_key_data *key) 513 { 514 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 515 } 516 517 /** 518 * i40e_aq_set_rss_key 519 * @hw: pointer to the hw struct 520 * @vsi_id: vsi fw index 521 * @key: pointer to key info struct 522 * 523 * set the RSS key per VSI 524 **/ 525 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 526 u16 vsi_id, 527 struct i40e_aqc_get_set_rss_key_data *key) 528 { 529 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 530 } 531 532 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 533 * hardware to a bit-field that can be used by SW to more easily determine the 534 * packet type. 535 * 536 * Macros are used to shorten the table lines and make this table human 537 * readable. 538 * 539 * We store the PTYPE in the top byte of the bit field - this is just so that 540 * we can check that the table doesn't have a row missing, as the index into 541 * the table should be the PTYPE. 542 * 543 * Typical work flow: 544 * 545 * IF NOT i40e_ptype_lookup[ptype].known 546 * THEN 547 * Packet is unknown 548 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 549 * Use the rest of the fields to look at the tunnels, inner protocols, etc 550 * ELSE 551 * Use the enum i40e_rx_l2_ptype to decode the packet type 552 * ENDIF 553 */ 554 555 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ 556 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 557 [PTYPE] = { \ 558 1, \ 559 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 560 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 561 I40E_RX_PTYPE_##OUTER_FRAG, \ 562 I40E_RX_PTYPE_TUNNEL_##T, \ 563 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 564 I40E_RX_PTYPE_##TEF, \ 565 I40E_RX_PTYPE_INNER_PROT_##I, \ 566 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 567 568 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 569 570 /* shorter macros makes the table fit but are terse */ 571 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 572 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 573 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 574 575 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ 576 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { 577 /* L2 Packet types */ 578 I40E_PTT_UNUSED_ENTRY(0), 579 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 580 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 581 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 582 I40E_PTT_UNUSED_ENTRY(4), 583 I40E_PTT_UNUSED_ENTRY(5), 584 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 585 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 586 I40E_PTT_UNUSED_ENTRY(8), 587 I40E_PTT_UNUSED_ENTRY(9), 588 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 589 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 590 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 591 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 599 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 600 601 /* Non Tunneled IPv4 */ 602 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 603 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 604 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 605 I40E_PTT_UNUSED_ENTRY(25), 606 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 607 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 608 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 609 610 /* IPv4 --> IPv4 */ 611 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 612 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 613 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 614 I40E_PTT_UNUSED_ENTRY(32), 615 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 616 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 617 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 618 619 /* IPv4 --> IPv6 */ 620 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 621 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 622 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 623 I40E_PTT_UNUSED_ENTRY(39), 624 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 625 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 626 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 627 628 /* IPv4 --> GRE/NAT */ 629 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 630 631 /* IPv4 --> GRE/NAT --> IPv4 */ 632 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 633 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 634 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 635 I40E_PTT_UNUSED_ENTRY(47), 636 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 637 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 638 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 639 640 /* IPv4 --> GRE/NAT --> IPv6 */ 641 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 642 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 643 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 644 I40E_PTT_UNUSED_ENTRY(54), 645 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 646 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 647 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 648 649 /* IPv4 --> GRE/NAT --> MAC */ 650 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 651 652 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 653 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 654 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 655 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 656 I40E_PTT_UNUSED_ENTRY(62), 657 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 658 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 659 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 660 661 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 662 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 663 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 664 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 665 I40E_PTT_UNUSED_ENTRY(69), 666 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 667 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 668 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 669 670 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 671 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 672 673 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 674 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 675 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 676 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 677 I40E_PTT_UNUSED_ENTRY(77), 678 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 679 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 680 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 681 682 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 683 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 684 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 685 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 686 I40E_PTT_UNUSED_ENTRY(84), 687 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 688 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 689 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 690 691 /* Non Tunneled IPv6 */ 692 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 693 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 694 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 695 I40E_PTT_UNUSED_ENTRY(91), 696 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 697 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 698 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 699 700 /* IPv6 --> IPv4 */ 701 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 702 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 703 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 704 I40E_PTT_UNUSED_ENTRY(98), 705 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 706 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 707 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 708 709 /* IPv6 --> IPv6 */ 710 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 711 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 712 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 713 I40E_PTT_UNUSED_ENTRY(105), 714 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 715 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 716 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 717 718 /* IPv6 --> GRE/NAT */ 719 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 720 721 /* IPv6 --> GRE/NAT -> IPv4 */ 722 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 723 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 724 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 725 I40E_PTT_UNUSED_ENTRY(113), 726 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 727 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 728 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 729 730 /* IPv6 --> GRE/NAT -> IPv6 */ 731 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 732 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 733 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 734 I40E_PTT_UNUSED_ENTRY(120), 735 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 736 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 737 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 738 739 /* IPv6 --> GRE/NAT -> MAC */ 740 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 741 742 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 743 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 744 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 745 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 746 I40E_PTT_UNUSED_ENTRY(128), 747 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 748 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 749 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 750 751 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 752 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 753 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 754 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 755 I40E_PTT_UNUSED_ENTRY(135), 756 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 757 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 758 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 759 760 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 761 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 762 763 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 764 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 765 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 766 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 767 I40E_PTT_UNUSED_ENTRY(143), 768 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 769 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 770 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 771 772 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 773 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 774 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 775 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 776 I40E_PTT_UNUSED_ENTRY(150), 777 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 778 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 779 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 780 781 /* unused entries */ 782 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 783 }; 784 785 /** 786 * i40e_init_shared_code - Initialize the shared code 787 * @hw: pointer to hardware structure 788 * 789 * This assigns the MAC type and PHY code and inits the NVM. 790 * Does not touch the hardware. This function must be called prior to any 791 * other function in the shared code. The i40e_hw structure should be 792 * memset to 0 prior to calling this function. The following fields in 793 * hw structure should be filled in prior to calling this function: 794 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 795 * subsystem_vendor_id, and revision_id 796 **/ 797 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 798 { 799 i40e_status status = 0; 800 u32 port, ari, func_rid; 801 802 i40e_set_mac_type(hw); 803 804 switch (hw->mac.type) { 805 case I40E_MAC_XL710: 806 case I40E_MAC_X722: 807 break; 808 default: 809 return I40E_ERR_DEVICE_NOT_SUPPORTED; 810 } 811 812 hw->phy.get_link_info = true; 813 814 /* Determine port number and PF number*/ 815 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 816 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 817 hw->port = (u8)port; 818 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 819 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 820 func_rid = rd32(hw, I40E_PF_FUNC_RID); 821 if (ari) 822 hw->pf_id = (u8)(func_rid & 0xff); 823 else 824 hw->pf_id = (u8)(func_rid & 0x7); 825 826 status = i40e_init_nvm(hw); 827 return status; 828 } 829 830 /** 831 * i40e_aq_mac_address_read - Retrieve the MAC addresses 832 * @hw: pointer to the hw struct 833 * @flags: a return indicator of what addresses were added to the addr store 834 * @addrs: the requestor's mac addr store 835 * @cmd_details: pointer to command details structure or NULL 836 **/ 837 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 838 u16 *flags, 839 struct i40e_aqc_mac_address_read_data *addrs, 840 struct i40e_asq_cmd_details *cmd_details) 841 { 842 struct i40e_aq_desc desc; 843 struct i40e_aqc_mac_address_read *cmd_data = 844 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 845 i40e_status status; 846 847 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 848 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 849 850 status = i40e_asq_send_command(hw, &desc, addrs, 851 sizeof(*addrs), cmd_details); 852 *flags = le16_to_cpu(cmd_data->command_flags); 853 854 return status; 855 } 856 857 /** 858 * i40e_aq_mac_address_write - Change the MAC addresses 859 * @hw: pointer to the hw struct 860 * @flags: indicates which MAC to be written 861 * @mac_addr: address to write 862 * @cmd_details: pointer to command details structure or NULL 863 **/ 864 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 865 u16 flags, u8 *mac_addr, 866 struct i40e_asq_cmd_details *cmd_details) 867 { 868 struct i40e_aq_desc desc; 869 struct i40e_aqc_mac_address_write *cmd_data = 870 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 871 i40e_status status; 872 873 i40e_fill_default_direct_cmd_desc(&desc, 874 i40e_aqc_opc_mac_address_write); 875 cmd_data->command_flags = cpu_to_le16(flags); 876 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 877 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 878 ((u32)mac_addr[3] << 16) | 879 ((u32)mac_addr[4] << 8) | 880 mac_addr[5]); 881 882 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 883 884 return status; 885 } 886 887 /** 888 * i40e_get_mac_addr - get MAC address 889 * @hw: pointer to the HW structure 890 * @mac_addr: pointer to MAC address 891 * 892 * Reads the adapter's MAC address from register 893 **/ 894 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 895 { 896 struct i40e_aqc_mac_address_read_data addrs; 897 i40e_status status; 898 u16 flags = 0; 899 900 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 901 902 if (flags & I40E_AQC_LAN_ADDR_VALID) 903 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 904 905 return status; 906 } 907 908 /** 909 * i40e_get_port_mac_addr - get Port MAC address 910 * @hw: pointer to the HW structure 911 * @mac_addr: pointer to Port MAC address 912 * 913 * Reads the adapter's Port MAC address 914 **/ 915 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 916 { 917 struct i40e_aqc_mac_address_read_data addrs; 918 i40e_status status; 919 u16 flags = 0; 920 921 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 922 if (status) 923 return status; 924 925 if (flags & I40E_AQC_PORT_ADDR_VALID) 926 ether_addr_copy(mac_addr, addrs.port_mac); 927 else 928 status = I40E_ERR_INVALID_MAC_ADDR; 929 930 return status; 931 } 932 933 /** 934 * i40e_pre_tx_queue_cfg - pre tx queue configure 935 * @hw: pointer to the HW structure 936 * @queue: target PF queue index 937 * @enable: state change request 938 * 939 * Handles hw requirement to indicate intention to enable 940 * or disable target queue. 941 **/ 942 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 943 { 944 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 945 u32 reg_block = 0; 946 u32 reg_val; 947 948 if (abs_queue_idx >= 128) { 949 reg_block = abs_queue_idx / 128; 950 abs_queue_idx %= 128; 951 } 952 953 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 954 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 955 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 956 957 if (enable) 958 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 959 else 960 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 961 962 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 963 } 964 965 /** 966 * i40e_read_pba_string - Reads part number string from EEPROM 967 * @hw: pointer to hardware structure 968 * @pba_num: stores the part number string from the EEPROM 969 * @pba_num_size: part number string buffer length 970 * 971 * Reads the part number string from the EEPROM. 972 **/ 973 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 974 u32 pba_num_size) 975 { 976 i40e_status status = 0; 977 u16 pba_word = 0; 978 u16 pba_size = 0; 979 u16 pba_ptr = 0; 980 u16 i = 0; 981 982 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 983 if (status || (pba_word != 0xFAFA)) { 984 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 985 return status; 986 } 987 988 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 989 if (status) { 990 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 991 return status; 992 } 993 994 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 995 if (status) { 996 hw_dbg(hw, "Failed to read PBA Block size.\n"); 997 return status; 998 } 999 1000 /* Subtract one to get PBA word count (PBA Size word is included in 1001 * total size) 1002 */ 1003 pba_size--; 1004 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1005 hw_dbg(hw, "Buffer too small for PBA data.\n"); 1006 return I40E_ERR_PARAM; 1007 } 1008 1009 for (i = 0; i < pba_size; i++) { 1010 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1011 if (status) { 1012 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1013 return status; 1014 } 1015 1016 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1017 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1018 } 1019 pba_num[(pba_size * 2)] = '\0'; 1020 1021 return status; 1022 } 1023 1024 /** 1025 * i40e_get_media_type - Gets media type 1026 * @hw: pointer to the hardware structure 1027 **/ 1028 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1029 { 1030 enum i40e_media_type media; 1031 1032 switch (hw->phy.link_info.phy_type) { 1033 case I40E_PHY_TYPE_10GBASE_SR: 1034 case I40E_PHY_TYPE_10GBASE_LR: 1035 case I40E_PHY_TYPE_1000BASE_SX: 1036 case I40E_PHY_TYPE_1000BASE_LX: 1037 case I40E_PHY_TYPE_40GBASE_SR4: 1038 case I40E_PHY_TYPE_40GBASE_LR4: 1039 case I40E_PHY_TYPE_25GBASE_LR: 1040 case I40E_PHY_TYPE_25GBASE_SR: 1041 media = I40E_MEDIA_TYPE_FIBER; 1042 break; 1043 case I40E_PHY_TYPE_100BASE_TX: 1044 case I40E_PHY_TYPE_1000BASE_T: 1045 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 1046 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 1047 case I40E_PHY_TYPE_10GBASE_T: 1048 media = I40E_MEDIA_TYPE_BASET; 1049 break; 1050 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1051 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1052 case I40E_PHY_TYPE_10GBASE_CR1: 1053 case I40E_PHY_TYPE_40GBASE_CR4: 1054 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1055 case I40E_PHY_TYPE_40GBASE_AOC: 1056 case I40E_PHY_TYPE_10GBASE_AOC: 1057 case I40E_PHY_TYPE_25GBASE_CR: 1058 case I40E_PHY_TYPE_25GBASE_AOC: 1059 case I40E_PHY_TYPE_25GBASE_ACC: 1060 media = I40E_MEDIA_TYPE_DA; 1061 break; 1062 case I40E_PHY_TYPE_1000BASE_KX: 1063 case I40E_PHY_TYPE_10GBASE_KX4: 1064 case I40E_PHY_TYPE_10GBASE_KR: 1065 case I40E_PHY_TYPE_40GBASE_KR4: 1066 case I40E_PHY_TYPE_20GBASE_KR2: 1067 case I40E_PHY_TYPE_25GBASE_KR: 1068 media = I40E_MEDIA_TYPE_BACKPLANE; 1069 break; 1070 case I40E_PHY_TYPE_SGMII: 1071 case I40E_PHY_TYPE_XAUI: 1072 case I40E_PHY_TYPE_XFI: 1073 case I40E_PHY_TYPE_XLAUI: 1074 case I40E_PHY_TYPE_XLPPI: 1075 default: 1076 media = I40E_MEDIA_TYPE_UNKNOWN; 1077 break; 1078 } 1079 1080 return media; 1081 } 1082 1083 /** 1084 * i40e_poll_globr - Poll for Global Reset completion 1085 * @hw: pointer to the hardware structure 1086 * @retry_limit: how many times to retry before failure 1087 **/ 1088 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1089 u32 retry_limit) 1090 { 1091 u32 cnt, reg = 0; 1092 1093 for (cnt = 0; cnt < retry_limit; cnt++) { 1094 reg = rd32(hw, I40E_GLGEN_RSTAT); 1095 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1096 return 0; 1097 msleep(100); 1098 } 1099 1100 hw_dbg(hw, "Global reset failed.\n"); 1101 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1102 1103 return I40E_ERR_RESET_FAILED; 1104 } 1105 1106 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1107 #define I40E_PF_RESET_WAIT_COUNT 200 1108 /** 1109 * i40e_pf_reset - Reset the PF 1110 * @hw: pointer to the hardware structure 1111 * 1112 * Assuming someone else has triggered a global reset, 1113 * assure the global reset is complete and then reset the PF 1114 **/ 1115 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1116 { 1117 u32 cnt = 0; 1118 u32 cnt1 = 0; 1119 u32 reg = 0; 1120 u32 grst_del; 1121 1122 /* Poll for Global Reset steady state in case of recent GRST. 1123 * The grst delay value is in 100ms units, and we'll wait a 1124 * couple counts longer to be sure we don't just miss the end. 1125 */ 1126 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1127 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1128 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1129 1130 /* It can take upto 15 secs for GRST steady state. 1131 * Bump it to 16 secs max to be safe. 1132 */ 1133 grst_del = grst_del * 20; 1134 1135 for (cnt = 0; cnt < grst_del; cnt++) { 1136 reg = rd32(hw, I40E_GLGEN_RSTAT); 1137 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1138 break; 1139 msleep(100); 1140 } 1141 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1142 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1143 return I40E_ERR_RESET_FAILED; 1144 } 1145 1146 /* Now Wait for the FW to be ready */ 1147 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1148 reg = rd32(hw, I40E_GLNVM_ULD); 1149 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1150 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1151 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1152 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1153 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1154 break; 1155 } 1156 usleep_range(10000, 20000); 1157 } 1158 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1159 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1160 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1161 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1162 return I40E_ERR_RESET_FAILED; 1163 } 1164 1165 /* If there was a Global Reset in progress when we got here, 1166 * we don't need to do the PF Reset 1167 */ 1168 if (!cnt) { 1169 u32 reg2 = 0; 1170 if (hw->revision_id == 0) 1171 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1172 else 1173 cnt = I40E_PF_RESET_WAIT_COUNT; 1174 reg = rd32(hw, I40E_PFGEN_CTRL); 1175 wr32(hw, I40E_PFGEN_CTRL, 1176 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1177 for (; cnt; cnt--) { 1178 reg = rd32(hw, I40E_PFGEN_CTRL); 1179 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1180 break; 1181 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1182 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1183 break; 1184 usleep_range(1000, 2000); 1185 } 1186 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1187 if (i40e_poll_globr(hw, grst_del)) 1188 return I40E_ERR_RESET_FAILED; 1189 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1190 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1191 return I40E_ERR_RESET_FAILED; 1192 } 1193 } 1194 1195 i40e_clear_pxe_mode(hw); 1196 1197 return 0; 1198 } 1199 1200 /** 1201 * i40e_clear_hw - clear out any left over hw state 1202 * @hw: pointer to the hw struct 1203 * 1204 * Clear queues and interrupts, typically called at init time, 1205 * but after the capabilities have been found so we know how many 1206 * queues and msix vectors have been allocated. 1207 **/ 1208 void i40e_clear_hw(struct i40e_hw *hw) 1209 { 1210 u32 num_queues, base_queue; 1211 u32 num_pf_int; 1212 u32 num_vf_int; 1213 u32 num_vfs; 1214 u32 i, j; 1215 u32 val; 1216 u32 eol = 0x7ff; 1217 1218 /* get number of interrupts, queues, and VFs */ 1219 val = rd32(hw, I40E_GLPCI_CNF2); 1220 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1221 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1222 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1223 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1224 1225 val = rd32(hw, I40E_PFLAN_QALLOC); 1226 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1227 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1228 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1229 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1230 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1231 num_queues = (j - base_queue) + 1; 1232 else 1233 num_queues = 0; 1234 1235 val = rd32(hw, I40E_PF_VT_PFALLOC); 1236 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1237 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1238 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1239 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1240 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1241 num_vfs = (j - i) + 1; 1242 else 1243 num_vfs = 0; 1244 1245 /* stop all the interrupts */ 1246 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1247 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1248 for (i = 0; i < num_pf_int - 2; i++) 1249 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1250 1251 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1252 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1253 wr32(hw, I40E_PFINT_LNKLST0, val); 1254 for (i = 0; i < num_pf_int - 2; i++) 1255 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1256 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1257 for (i = 0; i < num_vfs; i++) 1258 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1259 for (i = 0; i < num_vf_int - 2; i++) 1260 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1261 1262 /* warn the HW of the coming Tx disables */ 1263 for (i = 0; i < num_queues; i++) { 1264 u32 abs_queue_idx = base_queue + i; 1265 u32 reg_block = 0; 1266 1267 if (abs_queue_idx >= 128) { 1268 reg_block = abs_queue_idx / 128; 1269 abs_queue_idx %= 128; 1270 } 1271 1272 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1273 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1274 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1275 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1276 1277 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1278 } 1279 udelay(400); 1280 1281 /* stop all the queues */ 1282 for (i = 0; i < num_queues; i++) { 1283 wr32(hw, I40E_QINT_TQCTL(i), 0); 1284 wr32(hw, I40E_QTX_ENA(i), 0); 1285 wr32(hw, I40E_QINT_RQCTL(i), 0); 1286 wr32(hw, I40E_QRX_ENA(i), 0); 1287 } 1288 1289 /* short wait for all queue disables to settle */ 1290 udelay(50); 1291 } 1292 1293 /** 1294 * i40e_clear_pxe_mode - clear pxe operations mode 1295 * @hw: pointer to the hw struct 1296 * 1297 * Make sure all PXE mode settings are cleared, including things 1298 * like descriptor fetch/write-back mode. 1299 **/ 1300 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1301 { 1302 u32 reg; 1303 1304 if (i40e_check_asq_alive(hw)) 1305 i40e_aq_clear_pxe_mode(hw, NULL); 1306 1307 /* Clear single descriptor fetch/write-back mode */ 1308 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1309 1310 if (hw->revision_id == 0) { 1311 /* As a work around clear PXE_MODE instead of setting it */ 1312 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1313 } else { 1314 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1315 } 1316 } 1317 1318 /** 1319 * i40e_led_is_mine - helper to find matching led 1320 * @hw: pointer to the hw struct 1321 * @idx: index into GPIO registers 1322 * 1323 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1324 */ 1325 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1326 { 1327 u32 gpio_val = 0; 1328 u32 port; 1329 1330 if (!I40E_IS_X710TL_DEVICE(hw->device_id) && 1331 !hw->func_caps.led[idx]) 1332 return 0; 1333 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1334 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1335 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1336 1337 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1338 * if it is not our port then ignore 1339 */ 1340 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1341 (port != hw->port)) 1342 return 0; 1343 1344 return gpio_val; 1345 } 1346 1347 #define I40E_FW_LED BIT(4) 1348 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ 1349 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) 1350 1351 #define I40E_LED0 22 1352 1353 #define I40E_PIN_FUNC_SDP 0x0 1354 #define I40E_PIN_FUNC_LED 0x1 1355 1356 /** 1357 * i40e_led_get - return current on/off mode 1358 * @hw: pointer to the hw struct 1359 * 1360 * The value returned is the 'mode' field as defined in the 1361 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1362 * values are variations of possible behaviors relating to 1363 * blink, link, and wire. 1364 **/ 1365 u32 i40e_led_get(struct i40e_hw *hw) 1366 { 1367 u32 mode = 0; 1368 int i; 1369 1370 /* as per the documentation GPIO 22-29 are the LED 1371 * GPIO pins named LED0..LED7 1372 */ 1373 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1374 u32 gpio_val = i40e_led_is_mine(hw, i); 1375 1376 if (!gpio_val) 1377 continue; 1378 1379 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1380 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1381 break; 1382 } 1383 1384 return mode; 1385 } 1386 1387 /** 1388 * i40e_led_set - set new on/off mode 1389 * @hw: pointer to the hw struct 1390 * @mode: 0=off, 0xf=on (else see manual for mode details) 1391 * @blink: true if the LED should blink when on, false if steady 1392 * 1393 * if this function is used to turn on the blink it should 1394 * be used to disable the blink when restoring the original state. 1395 **/ 1396 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1397 { 1398 int i; 1399 1400 if (mode & ~I40E_LED_MODE_VALID) { 1401 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1402 return; 1403 } 1404 1405 /* as per the documentation GPIO 22-29 are the LED 1406 * GPIO pins named LED0..LED7 1407 */ 1408 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1409 u32 gpio_val = i40e_led_is_mine(hw, i); 1410 1411 if (!gpio_val) 1412 continue; 1413 1414 if (I40E_IS_X710TL_DEVICE(hw->device_id)) { 1415 u32 pin_func = 0; 1416 1417 if (mode & I40E_FW_LED) 1418 pin_func = I40E_PIN_FUNC_SDP; 1419 else 1420 pin_func = I40E_PIN_FUNC_LED; 1421 1422 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; 1423 gpio_val |= ((pin_func << 1424 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & 1425 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); 1426 } 1427 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1428 /* this & is a bit of paranoia, but serves as a range check */ 1429 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1430 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1431 1432 if (blink) 1433 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1434 else 1435 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1436 1437 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1438 break; 1439 } 1440 } 1441 1442 /* Admin command wrappers */ 1443 1444 /** 1445 * i40e_aq_get_phy_capabilities 1446 * @hw: pointer to the hw struct 1447 * @abilities: structure for PHY capabilities to be filled 1448 * @qualified_modules: report Qualified Modules 1449 * @report_init: report init capabilities (active are default) 1450 * @cmd_details: pointer to command details structure or NULL 1451 * 1452 * Returns the various PHY abilities supported on the Port. 1453 **/ 1454 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1455 bool qualified_modules, bool report_init, 1456 struct i40e_aq_get_phy_abilities_resp *abilities, 1457 struct i40e_asq_cmd_details *cmd_details) 1458 { 1459 struct i40e_aq_desc desc; 1460 i40e_status status; 1461 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1462 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1463 1464 if (!abilities) 1465 return I40E_ERR_PARAM; 1466 1467 do { 1468 i40e_fill_default_direct_cmd_desc(&desc, 1469 i40e_aqc_opc_get_phy_abilities); 1470 1471 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1472 if (abilities_size > I40E_AQ_LARGE_BUF) 1473 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1474 1475 if (qualified_modules) 1476 desc.params.external.param0 |= 1477 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1478 1479 if (report_init) 1480 desc.params.external.param0 |= 1481 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1482 1483 status = i40e_asq_send_command(hw, &desc, abilities, 1484 abilities_size, cmd_details); 1485 1486 switch (hw->aq.asq_last_status) { 1487 case I40E_AQ_RC_EIO: 1488 status = I40E_ERR_UNKNOWN_PHY; 1489 break; 1490 case I40E_AQ_RC_EAGAIN: 1491 usleep_range(1000, 2000); 1492 total_delay++; 1493 status = I40E_ERR_TIMEOUT; 1494 break; 1495 /* also covers I40E_AQ_RC_OK */ 1496 default: 1497 break; 1498 } 1499 1500 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1501 (total_delay < max_delay)); 1502 1503 if (status) 1504 return status; 1505 1506 if (report_init) { 1507 if (hw->mac.type == I40E_MAC_XL710 && 1508 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1509 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1510 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1511 } else { 1512 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1513 hw->phy.phy_types |= 1514 ((u64)abilities->phy_type_ext << 32); 1515 } 1516 } 1517 1518 return status; 1519 } 1520 1521 /** 1522 * i40e_aq_set_phy_config 1523 * @hw: pointer to the hw struct 1524 * @config: structure with PHY configuration to be set 1525 * @cmd_details: pointer to command details structure or NULL 1526 * 1527 * Set the various PHY configuration parameters 1528 * supported on the Port.One or more of the Set PHY config parameters may be 1529 * ignored in an MFP mode as the PF may not have the privilege to set some 1530 * of the PHY Config parameters. This status will be indicated by the 1531 * command response. 1532 **/ 1533 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1534 struct i40e_aq_set_phy_config *config, 1535 struct i40e_asq_cmd_details *cmd_details) 1536 { 1537 struct i40e_aq_desc desc; 1538 struct i40e_aq_set_phy_config *cmd = 1539 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1540 enum i40e_status_code status; 1541 1542 if (!config) 1543 return I40E_ERR_PARAM; 1544 1545 i40e_fill_default_direct_cmd_desc(&desc, 1546 i40e_aqc_opc_set_phy_config); 1547 1548 *cmd = *config; 1549 1550 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1551 1552 return status; 1553 } 1554 1555 static noinline_for_stack enum i40e_status_code 1556 i40e_set_fc_status(struct i40e_hw *hw, 1557 struct i40e_aq_get_phy_abilities_resp *abilities, 1558 bool atomic_restart) 1559 { 1560 struct i40e_aq_set_phy_config config; 1561 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1562 u8 pause_mask = 0x0; 1563 1564 switch (fc_mode) { 1565 case I40E_FC_FULL: 1566 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1567 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1568 break; 1569 case I40E_FC_RX_PAUSE: 1570 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1571 break; 1572 case I40E_FC_TX_PAUSE: 1573 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1574 break; 1575 default: 1576 break; 1577 } 1578 1579 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1580 /* clear the old pause settings */ 1581 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1582 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1583 /* set the new abilities */ 1584 config.abilities |= pause_mask; 1585 /* If the abilities have changed, then set the new config */ 1586 if (config.abilities == abilities->abilities) 1587 return 0; 1588 1589 /* Auto restart link so settings take effect */ 1590 if (atomic_restart) 1591 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1592 /* Copy over all the old settings */ 1593 config.phy_type = abilities->phy_type; 1594 config.phy_type_ext = abilities->phy_type_ext; 1595 config.link_speed = abilities->link_speed; 1596 config.eee_capability = abilities->eee_capability; 1597 config.eeer = abilities->eeer_val; 1598 config.low_power_ctrl = abilities->d3_lpan; 1599 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1600 I40E_AQ_PHY_FEC_CONFIG_MASK; 1601 1602 return i40e_aq_set_phy_config(hw, &config, NULL); 1603 } 1604 1605 /** 1606 * i40e_set_fc 1607 * @hw: pointer to the hw struct 1608 * @aq_failures: buffer to return AdminQ failure information 1609 * @atomic_restart: whether to enable atomic link restart 1610 * 1611 * Set the requested flow control mode using set_phy_config. 1612 **/ 1613 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1614 bool atomic_restart) 1615 { 1616 struct i40e_aq_get_phy_abilities_resp abilities; 1617 enum i40e_status_code status; 1618 1619 *aq_failures = 0x0; 1620 1621 /* Get the current phy config */ 1622 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1623 NULL); 1624 if (status) { 1625 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1626 return status; 1627 } 1628 1629 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1630 if (status) 1631 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1632 1633 /* Update the link info */ 1634 status = i40e_update_link_info(hw); 1635 if (status) { 1636 /* Wait a little bit (on 40G cards it sometimes takes a really 1637 * long time for link to come back from the atomic reset) 1638 * and try once more 1639 */ 1640 msleep(1000); 1641 status = i40e_update_link_info(hw); 1642 } 1643 if (status) 1644 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1645 1646 return status; 1647 } 1648 1649 /** 1650 * i40e_aq_clear_pxe_mode 1651 * @hw: pointer to the hw struct 1652 * @cmd_details: pointer to command details structure or NULL 1653 * 1654 * Tell the firmware that the driver is taking over from PXE 1655 **/ 1656 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1657 struct i40e_asq_cmd_details *cmd_details) 1658 { 1659 i40e_status status; 1660 struct i40e_aq_desc desc; 1661 struct i40e_aqc_clear_pxe *cmd = 1662 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1663 1664 i40e_fill_default_direct_cmd_desc(&desc, 1665 i40e_aqc_opc_clear_pxe_mode); 1666 1667 cmd->rx_cnt = 0x2; 1668 1669 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1670 1671 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1672 1673 return status; 1674 } 1675 1676 /** 1677 * i40e_aq_set_link_restart_an 1678 * @hw: pointer to the hw struct 1679 * @enable_link: if true: enable link, if false: disable link 1680 * @cmd_details: pointer to command details structure or NULL 1681 * 1682 * Sets up the link and restarts the Auto-Negotiation over the link. 1683 **/ 1684 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1685 bool enable_link, 1686 struct i40e_asq_cmd_details *cmd_details) 1687 { 1688 struct i40e_aq_desc desc; 1689 struct i40e_aqc_set_link_restart_an *cmd = 1690 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1691 i40e_status status; 1692 1693 i40e_fill_default_direct_cmd_desc(&desc, 1694 i40e_aqc_opc_set_link_restart_an); 1695 1696 cmd->command = I40E_AQ_PHY_RESTART_AN; 1697 if (enable_link) 1698 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1699 else 1700 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1701 1702 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1703 1704 return status; 1705 } 1706 1707 /** 1708 * i40e_aq_get_link_info 1709 * @hw: pointer to the hw struct 1710 * @enable_lse: enable/disable LinkStatusEvent reporting 1711 * @link: pointer to link status structure - optional 1712 * @cmd_details: pointer to command details structure or NULL 1713 * 1714 * Returns the link status of the adapter. 1715 **/ 1716 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1717 bool enable_lse, struct i40e_link_status *link, 1718 struct i40e_asq_cmd_details *cmd_details) 1719 { 1720 struct i40e_aq_desc desc; 1721 struct i40e_aqc_get_link_status *resp = 1722 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1723 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1724 i40e_status status; 1725 bool tx_pause, rx_pause; 1726 u16 command_flags; 1727 1728 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1729 1730 if (enable_lse) 1731 command_flags = I40E_AQ_LSE_ENABLE; 1732 else 1733 command_flags = I40E_AQ_LSE_DISABLE; 1734 resp->command_flags = cpu_to_le16(command_flags); 1735 1736 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1737 1738 if (status) 1739 goto aq_get_link_info_exit; 1740 1741 /* save off old link status information */ 1742 hw->phy.link_info_old = *hw_link_info; 1743 1744 /* update link status */ 1745 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1746 hw->phy.media_type = i40e_get_media_type(hw); 1747 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1748 hw_link_info->link_info = resp->link_info; 1749 hw_link_info->an_info = resp->an_info; 1750 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1751 I40E_AQ_CONFIG_FEC_RS_ENA); 1752 hw_link_info->ext_info = resp->ext_info; 1753 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1754 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1755 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1756 1757 /* update fc info */ 1758 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1759 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1760 if (tx_pause & rx_pause) 1761 hw->fc.current_mode = I40E_FC_FULL; 1762 else if (tx_pause) 1763 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1764 else if (rx_pause) 1765 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1766 else 1767 hw->fc.current_mode = I40E_FC_NONE; 1768 1769 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1770 hw_link_info->crc_enable = true; 1771 else 1772 hw_link_info->crc_enable = false; 1773 1774 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1775 hw_link_info->lse_enable = true; 1776 else 1777 hw_link_info->lse_enable = false; 1778 1779 if ((hw->mac.type == I40E_MAC_XL710) && 1780 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1781 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1782 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1783 1784 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1785 hw->mac.type != I40E_MAC_X722) { 1786 __le32 tmp; 1787 1788 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1789 hw->phy.phy_types = le32_to_cpu(tmp); 1790 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1791 } 1792 1793 /* save link status information */ 1794 if (link) 1795 *link = *hw_link_info; 1796 1797 /* flag cleared so helper functions don't call AQ again */ 1798 hw->phy.get_link_info = false; 1799 1800 aq_get_link_info_exit: 1801 return status; 1802 } 1803 1804 /** 1805 * i40e_aq_set_phy_int_mask 1806 * @hw: pointer to the hw struct 1807 * @mask: interrupt mask to be set 1808 * @cmd_details: pointer to command details structure or NULL 1809 * 1810 * Set link interrupt mask. 1811 **/ 1812 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1813 u16 mask, 1814 struct i40e_asq_cmd_details *cmd_details) 1815 { 1816 struct i40e_aq_desc desc; 1817 struct i40e_aqc_set_phy_int_mask *cmd = 1818 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1819 i40e_status status; 1820 1821 i40e_fill_default_direct_cmd_desc(&desc, 1822 i40e_aqc_opc_set_phy_int_mask); 1823 1824 cmd->event_mask = cpu_to_le16(mask); 1825 1826 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1827 1828 return status; 1829 } 1830 1831 /** 1832 * i40e_aq_set_phy_debug 1833 * @hw: pointer to the hw struct 1834 * @cmd_flags: debug command flags 1835 * @cmd_details: pointer to command details structure or NULL 1836 * 1837 * Reset the external PHY. 1838 **/ 1839 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1840 struct i40e_asq_cmd_details *cmd_details) 1841 { 1842 struct i40e_aq_desc desc; 1843 struct i40e_aqc_set_phy_debug *cmd = 1844 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1845 i40e_status status; 1846 1847 i40e_fill_default_direct_cmd_desc(&desc, 1848 i40e_aqc_opc_set_phy_debug); 1849 1850 cmd->command_flags = cmd_flags; 1851 1852 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1853 1854 return status; 1855 } 1856 1857 /** 1858 * i40e_is_aq_api_ver_ge 1859 * @aq: pointer to AdminQ info containing HW API version to compare 1860 * @maj: API major value 1861 * @min: API minor value 1862 * 1863 * Assert whether current HW API version is greater/equal than provided. 1864 **/ 1865 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, 1866 u16 min) 1867 { 1868 return (aq->api_maj_ver > maj || 1869 (aq->api_maj_ver == maj && aq->api_min_ver >= min)); 1870 } 1871 1872 /** 1873 * i40e_aq_add_vsi 1874 * @hw: pointer to the hw struct 1875 * @vsi_ctx: pointer to a vsi context struct 1876 * @cmd_details: pointer to command details structure or NULL 1877 * 1878 * Add a VSI context to the hardware. 1879 **/ 1880 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1881 struct i40e_vsi_context *vsi_ctx, 1882 struct i40e_asq_cmd_details *cmd_details) 1883 { 1884 struct i40e_aq_desc desc; 1885 struct i40e_aqc_add_get_update_vsi *cmd = 1886 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1887 struct i40e_aqc_add_get_update_vsi_completion *resp = 1888 (struct i40e_aqc_add_get_update_vsi_completion *) 1889 &desc.params.raw; 1890 i40e_status status; 1891 1892 i40e_fill_default_direct_cmd_desc(&desc, 1893 i40e_aqc_opc_add_vsi); 1894 1895 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1896 cmd->connection_type = vsi_ctx->connection_type; 1897 cmd->vf_id = vsi_ctx->vf_num; 1898 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1899 1900 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1901 1902 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 1903 sizeof(vsi_ctx->info), 1904 cmd_details, true); 1905 1906 if (status) 1907 goto aq_add_vsi_exit; 1908 1909 vsi_ctx->seid = le16_to_cpu(resp->seid); 1910 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1911 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1912 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1913 1914 aq_add_vsi_exit: 1915 return status; 1916 } 1917 1918 /** 1919 * i40e_aq_set_default_vsi 1920 * @hw: pointer to the hw struct 1921 * @seid: vsi number 1922 * @cmd_details: pointer to command details structure or NULL 1923 **/ 1924 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 1925 u16 seid, 1926 struct i40e_asq_cmd_details *cmd_details) 1927 { 1928 struct i40e_aq_desc desc; 1929 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1930 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1931 &desc.params.raw; 1932 i40e_status status; 1933 1934 i40e_fill_default_direct_cmd_desc(&desc, 1935 i40e_aqc_opc_set_vsi_promiscuous_modes); 1936 1937 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1938 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1939 cmd->seid = cpu_to_le16(seid); 1940 1941 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1942 1943 return status; 1944 } 1945 1946 /** 1947 * i40e_aq_clear_default_vsi 1948 * @hw: pointer to the hw struct 1949 * @seid: vsi number 1950 * @cmd_details: pointer to command details structure or NULL 1951 **/ 1952 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 1953 u16 seid, 1954 struct i40e_asq_cmd_details *cmd_details) 1955 { 1956 struct i40e_aq_desc desc; 1957 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1958 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1959 &desc.params.raw; 1960 i40e_status status; 1961 1962 i40e_fill_default_direct_cmd_desc(&desc, 1963 i40e_aqc_opc_set_vsi_promiscuous_modes); 1964 1965 cmd->promiscuous_flags = cpu_to_le16(0); 1966 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 1967 cmd->seid = cpu_to_le16(seid); 1968 1969 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1970 1971 return status; 1972 } 1973 1974 /** 1975 * i40e_aq_set_vsi_unicast_promiscuous 1976 * @hw: pointer to the hw struct 1977 * @seid: vsi number 1978 * @set: set unicast promiscuous enable/disable 1979 * @cmd_details: pointer to command details structure or NULL 1980 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 1981 **/ 1982 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 1983 u16 seid, bool set, 1984 struct i40e_asq_cmd_details *cmd_details, 1985 bool rx_only_promisc) 1986 { 1987 struct i40e_aq_desc desc; 1988 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1989 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 1990 i40e_status status; 1991 u16 flags = 0; 1992 1993 i40e_fill_default_direct_cmd_desc(&desc, 1994 i40e_aqc_opc_set_vsi_promiscuous_modes); 1995 1996 if (set) { 1997 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 1998 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 1999 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2000 } 2001 2002 cmd->promiscuous_flags = cpu_to_le16(flags); 2003 2004 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2005 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2006 cmd->valid_flags |= 2007 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2008 2009 cmd->seid = cpu_to_le16(seid); 2010 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2011 2012 return status; 2013 } 2014 2015 /** 2016 * i40e_aq_set_vsi_multicast_promiscuous 2017 * @hw: pointer to the hw struct 2018 * @seid: vsi number 2019 * @set: set multicast promiscuous enable/disable 2020 * @cmd_details: pointer to command details structure or NULL 2021 **/ 2022 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2023 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2024 { 2025 struct i40e_aq_desc desc; 2026 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2027 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2028 i40e_status status; 2029 u16 flags = 0; 2030 2031 i40e_fill_default_direct_cmd_desc(&desc, 2032 i40e_aqc_opc_set_vsi_promiscuous_modes); 2033 2034 if (set) 2035 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2036 2037 cmd->promiscuous_flags = cpu_to_le16(flags); 2038 2039 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2040 2041 cmd->seid = cpu_to_le16(seid); 2042 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2043 2044 return status; 2045 } 2046 2047 /** 2048 * i40e_aq_set_vsi_mc_promisc_on_vlan 2049 * @hw: pointer to the hw struct 2050 * @seid: vsi number 2051 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2052 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2053 * @cmd_details: pointer to command details structure or NULL 2054 **/ 2055 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2056 u16 seid, bool enable, 2057 u16 vid, 2058 struct i40e_asq_cmd_details *cmd_details) 2059 { 2060 struct i40e_aq_desc desc; 2061 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2062 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2063 enum i40e_status_code status; 2064 u16 flags = 0; 2065 2066 i40e_fill_default_direct_cmd_desc(&desc, 2067 i40e_aqc_opc_set_vsi_promiscuous_modes); 2068 2069 if (enable) 2070 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2071 2072 cmd->promiscuous_flags = cpu_to_le16(flags); 2073 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2074 cmd->seid = cpu_to_le16(seid); 2075 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2076 2077 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2078 cmd_details, true); 2079 2080 return status; 2081 } 2082 2083 /** 2084 * i40e_aq_set_vsi_uc_promisc_on_vlan 2085 * @hw: pointer to the hw struct 2086 * @seid: vsi number 2087 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2088 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2089 * @cmd_details: pointer to command details structure or NULL 2090 **/ 2091 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2092 u16 seid, bool enable, 2093 u16 vid, 2094 struct i40e_asq_cmd_details *cmd_details) 2095 { 2096 struct i40e_aq_desc desc; 2097 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2098 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2099 enum i40e_status_code status; 2100 u16 flags = 0; 2101 2102 i40e_fill_default_direct_cmd_desc(&desc, 2103 i40e_aqc_opc_set_vsi_promiscuous_modes); 2104 2105 if (enable) { 2106 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2107 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2108 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; 2109 } 2110 2111 cmd->promiscuous_flags = cpu_to_le16(flags); 2112 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2113 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) 2114 cmd->valid_flags |= 2115 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); 2116 cmd->seid = cpu_to_le16(seid); 2117 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2118 2119 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 2120 cmd_details, true); 2121 2122 return status; 2123 } 2124 2125 /** 2126 * i40e_aq_set_vsi_bc_promisc_on_vlan 2127 * @hw: pointer to the hw struct 2128 * @seid: vsi number 2129 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2130 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2131 * @cmd_details: pointer to command details structure or NULL 2132 **/ 2133 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2134 u16 seid, bool enable, u16 vid, 2135 struct i40e_asq_cmd_details *cmd_details) 2136 { 2137 struct i40e_aq_desc desc; 2138 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2139 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2140 i40e_status status; 2141 u16 flags = 0; 2142 2143 i40e_fill_default_direct_cmd_desc(&desc, 2144 i40e_aqc_opc_set_vsi_promiscuous_modes); 2145 2146 if (enable) 2147 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2148 2149 cmd->promiscuous_flags = cpu_to_le16(flags); 2150 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2151 cmd->seid = cpu_to_le16(seid); 2152 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2153 2154 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2155 2156 return status; 2157 } 2158 2159 /** 2160 * i40e_aq_set_vsi_broadcast 2161 * @hw: pointer to the hw struct 2162 * @seid: vsi number 2163 * @set_filter: true to set filter, false to clear filter 2164 * @cmd_details: pointer to command details structure or NULL 2165 * 2166 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2167 **/ 2168 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2169 u16 seid, bool set_filter, 2170 struct i40e_asq_cmd_details *cmd_details) 2171 { 2172 struct i40e_aq_desc desc; 2173 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2174 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2175 i40e_status status; 2176 2177 i40e_fill_default_direct_cmd_desc(&desc, 2178 i40e_aqc_opc_set_vsi_promiscuous_modes); 2179 2180 if (set_filter) 2181 cmd->promiscuous_flags 2182 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2183 else 2184 cmd->promiscuous_flags 2185 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2186 2187 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2188 cmd->seid = cpu_to_le16(seid); 2189 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2190 2191 return status; 2192 } 2193 2194 /** 2195 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2196 * @hw: pointer to the hw struct 2197 * @seid: vsi number 2198 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2199 * @cmd_details: pointer to command details structure or NULL 2200 **/ 2201 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2202 u16 seid, bool enable, 2203 struct i40e_asq_cmd_details *cmd_details) 2204 { 2205 struct i40e_aq_desc desc; 2206 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2207 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2208 i40e_status status; 2209 u16 flags = 0; 2210 2211 i40e_fill_default_direct_cmd_desc(&desc, 2212 i40e_aqc_opc_set_vsi_promiscuous_modes); 2213 if (enable) 2214 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2215 2216 cmd->promiscuous_flags = cpu_to_le16(flags); 2217 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2218 cmd->seid = cpu_to_le16(seid); 2219 2220 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2221 2222 return status; 2223 } 2224 2225 /** 2226 * i40e_aq_get_vsi_params - get VSI configuration info 2227 * @hw: pointer to the hw struct 2228 * @vsi_ctx: pointer to a vsi context struct 2229 * @cmd_details: pointer to command details structure or NULL 2230 **/ 2231 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2232 struct i40e_vsi_context *vsi_ctx, 2233 struct i40e_asq_cmd_details *cmd_details) 2234 { 2235 struct i40e_aq_desc desc; 2236 struct i40e_aqc_add_get_update_vsi *cmd = 2237 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2238 struct i40e_aqc_add_get_update_vsi_completion *resp = 2239 (struct i40e_aqc_add_get_update_vsi_completion *) 2240 &desc.params.raw; 2241 i40e_status status; 2242 2243 i40e_fill_default_direct_cmd_desc(&desc, 2244 i40e_aqc_opc_get_vsi_parameters); 2245 2246 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2247 2248 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2249 2250 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2251 sizeof(vsi_ctx->info), NULL); 2252 2253 if (status) 2254 goto aq_get_vsi_params_exit; 2255 2256 vsi_ctx->seid = le16_to_cpu(resp->seid); 2257 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2258 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2259 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2260 2261 aq_get_vsi_params_exit: 2262 return status; 2263 } 2264 2265 /** 2266 * i40e_aq_update_vsi_params 2267 * @hw: pointer to the hw struct 2268 * @vsi_ctx: pointer to a vsi context struct 2269 * @cmd_details: pointer to command details structure or NULL 2270 * 2271 * Update a VSI context. 2272 **/ 2273 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2274 struct i40e_vsi_context *vsi_ctx, 2275 struct i40e_asq_cmd_details *cmd_details) 2276 { 2277 struct i40e_aq_desc desc; 2278 struct i40e_aqc_add_get_update_vsi *cmd = 2279 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2280 struct i40e_aqc_add_get_update_vsi_completion *resp = 2281 (struct i40e_aqc_add_get_update_vsi_completion *) 2282 &desc.params.raw; 2283 i40e_status status; 2284 2285 i40e_fill_default_direct_cmd_desc(&desc, 2286 i40e_aqc_opc_update_vsi_parameters); 2287 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2288 2289 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2290 2291 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, 2292 sizeof(vsi_ctx->info), 2293 cmd_details, true); 2294 2295 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2296 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2297 2298 return status; 2299 } 2300 2301 /** 2302 * i40e_aq_get_switch_config 2303 * @hw: pointer to the hardware structure 2304 * @buf: pointer to the result buffer 2305 * @buf_size: length of input buffer 2306 * @start_seid: seid to start for the report, 0 == beginning 2307 * @cmd_details: pointer to command details structure or NULL 2308 * 2309 * Fill the buf with switch configuration returned from AdminQ command 2310 **/ 2311 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2312 struct i40e_aqc_get_switch_config_resp *buf, 2313 u16 buf_size, u16 *start_seid, 2314 struct i40e_asq_cmd_details *cmd_details) 2315 { 2316 struct i40e_aq_desc desc; 2317 struct i40e_aqc_switch_seid *scfg = 2318 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2319 i40e_status status; 2320 2321 i40e_fill_default_direct_cmd_desc(&desc, 2322 i40e_aqc_opc_get_switch_config); 2323 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2324 if (buf_size > I40E_AQ_LARGE_BUF) 2325 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2326 scfg->seid = cpu_to_le16(*start_seid); 2327 2328 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2329 *start_seid = le16_to_cpu(scfg->seid); 2330 2331 return status; 2332 } 2333 2334 /** 2335 * i40e_aq_set_switch_config 2336 * @hw: pointer to the hardware structure 2337 * @flags: bit flag values to set 2338 * @mode: cloud filter mode 2339 * @valid_flags: which bit flags to set 2340 * @mode: cloud filter mode 2341 * @cmd_details: pointer to command details structure or NULL 2342 * 2343 * Set switch configuration bits 2344 **/ 2345 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2346 u16 flags, 2347 u16 valid_flags, u8 mode, 2348 struct i40e_asq_cmd_details *cmd_details) 2349 { 2350 struct i40e_aq_desc desc; 2351 struct i40e_aqc_set_switch_config *scfg = 2352 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2353 enum i40e_status_code status; 2354 2355 i40e_fill_default_direct_cmd_desc(&desc, 2356 i40e_aqc_opc_set_switch_config); 2357 scfg->flags = cpu_to_le16(flags); 2358 scfg->valid_flags = cpu_to_le16(valid_flags); 2359 scfg->mode = mode; 2360 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2361 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2362 scfg->first_tag = cpu_to_le16(hw->first_tag); 2363 scfg->second_tag = cpu_to_le16(hw->second_tag); 2364 } 2365 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2366 2367 return status; 2368 } 2369 2370 /** 2371 * i40e_aq_get_firmware_version 2372 * @hw: pointer to the hw struct 2373 * @fw_major_version: firmware major version 2374 * @fw_minor_version: firmware minor version 2375 * @fw_build: firmware build number 2376 * @api_major_version: major queue version 2377 * @api_minor_version: minor queue version 2378 * @cmd_details: pointer to command details structure or NULL 2379 * 2380 * Get the firmware version from the admin queue commands 2381 **/ 2382 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2383 u16 *fw_major_version, u16 *fw_minor_version, 2384 u32 *fw_build, 2385 u16 *api_major_version, u16 *api_minor_version, 2386 struct i40e_asq_cmd_details *cmd_details) 2387 { 2388 struct i40e_aq_desc desc; 2389 struct i40e_aqc_get_version *resp = 2390 (struct i40e_aqc_get_version *)&desc.params.raw; 2391 i40e_status status; 2392 2393 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2394 2395 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2396 2397 if (!status) { 2398 if (fw_major_version) 2399 *fw_major_version = le16_to_cpu(resp->fw_major); 2400 if (fw_minor_version) 2401 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2402 if (fw_build) 2403 *fw_build = le32_to_cpu(resp->fw_build); 2404 if (api_major_version) 2405 *api_major_version = le16_to_cpu(resp->api_major); 2406 if (api_minor_version) 2407 *api_minor_version = le16_to_cpu(resp->api_minor); 2408 } 2409 2410 return status; 2411 } 2412 2413 /** 2414 * i40e_aq_send_driver_version 2415 * @hw: pointer to the hw struct 2416 * @dv: driver's major, minor version 2417 * @cmd_details: pointer to command details structure or NULL 2418 * 2419 * Send the driver version to the firmware 2420 **/ 2421 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2422 struct i40e_driver_version *dv, 2423 struct i40e_asq_cmd_details *cmd_details) 2424 { 2425 struct i40e_aq_desc desc; 2426 struct i40e_aqc_driver_version *cmd = 2427 (struct i40e_aqc_driver_version *)&desc.params.raw; 2428 i40e_status status; 2429 u16 len; 2430 2431 if (dv == NULL) 2432 return I40E_ERR_PARAM; 2433 2434 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2435 2436 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2437 cmd->driver_major_ver = dv->major_version; 2438 cmd->driver_minor_ver = dv->minor_version; 2439 cmd->driver_build_ver = dv->build_version; 2440 cmd->driver_subbuild_ver = dv->subbuild_version; 2441 2442 len = 0; 2443 while (len < sizeof(dv->driver_string) && 2444 (dv->driver_string[len] < 0x80) && 2445 dv->driver_string[len]) 2446 len++; 2447 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2448 len, cmd_details); 2449 2450 return status; 2451 } 2452 2453 /** 2454 * i40e_get_link_status - get status of the HW network link 2455 * @hw: pointer to the hw struct 2456 * @link_up: pointer to bool (true/false = linkup/linkdown) 2457 * 2458 * Variable link_up true if link is up, false if link is down. 2459 * The variable link_up is invalid if returned value of status != 0 2460 * 2461 * Side effect: LinkStatusEvent reporting becomes enabled 2462 **/ 2463 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2464 { 2465 i40e_status status = 0; 2466 2467 if (hw->phy.get_link_info) { 2468 status = i40e_update_link_info(hw); 2469 2470 if (status) 2471 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2472 status); 2473 } 2474 2475 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2476 2477 return status; 2478 } 2479 2480 /** 2481 * i40e_update_link_info - update status of the HW network link 2482 * @hw: pointer to the hw struct 2483 **/ 2484 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2485 { 2486 struct i40e_aq_get_phy_abilities_resp abilities; 2487 i40e_status status = 0; 2488 2489 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2490 if (status) 2491 return status; 2492 2493 /* extra checking needed to ensure link info to user is timely */ 2494 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2495 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2496 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2497 status = i40e_aq_get_phy_capabilities(hw, false, false, 2498 &abilities, NULL); 2499 if (status) 2500 return status; 2501 2502 if (abilities.fec_cfg_curr_mod_ext_info & 2503 I40E_AQ_ENABLE_FEC_AUTO) 2504 hw->phy.link_info.req_fec_info = 2505 (I40E_AQ_REQUEST_FEC_KR | 2506 I40E_AQ_REQUEST_FEC_RS); 2507 else 2508 hw->phy.link_info.req_fec_info = 2509 abilities.fec_cfg_curr_mod_ext_info & 2510 (I40E_AQ_REQUEST_FEC_KR | 2511 I40E_AQ_REQUEST_FEC_RS); 2512 2513 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2514 sizeof(hw->phy.link_info.module_type)); 2515 } 2516 2517 return status; 2518 } 2519 2520 /** 2521 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2522 * @hw: pointer to the hw struct 2523 * @uplink_seid: the MAC or other gizmo SEID 2524 * @downlink_seid: the VSI SEID 2525 * @enabled_tc: bitmap of TCs to be enabled 2526 * @default_port: true for default port VSI, false for control port 2527 * @veb_seid: pointer to where to put the resulting VEB SEID 2528 * @enable_stats: true to turn on VEB stats 2529 * @cmd_details: pointer to command details structure or NULL 2530 * 2531 * This asks the FW to add a VEB between the uplink and downlink 2532 * elements. If the uplink SEID is 0, this will be a floating VEB. 2533 **/ 2534 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2535 u16 downlink_seid, u8 enabled_tc, 2536 bool default_port, u16 *veb_seid, 2537 bool enable_stats, 2538 struct i40e_asq_cmd_details *cmd_details) 2539 { 2540 struct i40e_aq_desc desc; 2541 struct i40e_aqc_add_veb *cmd = 2542 (struct i40e_aqc_add_veb *)&desc.params.raw; 2543 struct i40e_aqc_add_veb_completion *resp = 2544 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2545 i40e_status status; 2546 u16 veb_flags = 0; 2547 2548 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2549 if (!!uplink_seid != !!downlink_seid) 2550 return I40E_ERR_PARAM; 2551 2552 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2553 2554 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2555 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2556 cmd->enable_tcs = enabled_tc; 2557 if (!uplink_seid) 2558 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2559 if (default_port) 2560 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2561 else 2562 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2563 2564 /* reverse logic here: set the bitflag to disable the stats */ 2565 if (!enable_stats) 2566 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2567 2568 cmd->veb_flags = cpu_to_le16(veb_flags); 2569 2570 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2571 2572 if (!status && veb_seid) 2573 *veb_seid = le16_to_cpu(resp->veb_seid); 2574 2575 return status; 2576 } 2577 2578 /** 2579 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2580 * @hw: pointer to the hw struct 2581 * @veb_seid: the SEID of the VEB to query 2582 * @switch_id: the uplink switch id 2583 * @floating: set to true if the VEB is floating 2584 * @statistic_index: index of the stats counter block for this VEB 2585 * @vebs_used: number of VEB's used by function 2586 * @vebs_free: total VEB's not reserved by any function 2587 * @cmd_details: pointer to command details structure or NULL 2588 * 2589 * This retrieves the parameters for a particular VEB, specified by 2590 * uplink_seid, and returns them to the caller. 2591 **/ 2592 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2593 u16 veb_seid, u16 *switch_id, 2594 bool *floating, u16 *statistic_index, 2595 u16 *vebs_used, u16 *vebs_free, 2596 struct i40e_asq_cmd_details *cmd_details) 2597 { 2598 struct i40e_aq_desc desc; 2599 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2600 (struct i40e_aqc_get_veb_parameters_completion *) 2601 &desc.params.raw; 2602 i40e_status status; 2603 2604 if (veb_seid == 0) 2605 return I40E_ERR_PARAM; 2606 2607 i40e_fill_default_direct_cmd_desc(&desc, 2608 i40e_aqc_opc_get_veb_parameters); 2609 cmd_resp->seid = cpu_to_le16(veb_seid); 2610 2611 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2612 if (status) 2613 goto get_veb_exit; 2614 2615 if (switch_id) 2616 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2617 if (statistic_index) 2618 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2619 if (vebs_used) 2620 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2621 if (vebs_free) 2622 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2623 if (floating) { 2624 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2625 2626 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2627 *floating = true; 2628 else 2629 *floating = false; 2630 } 2631 2632 get_veb_exit: 2633 return status; 2634 } 2635 2636 /** 2637 * i40e_prepare_add_macvlan 2638 * @mv_list: list of macvlans to be added 2639 * @desc: pointer to AQ descriptor structure 2640 * @count: length of the list 2641 * @seid: VSI for the mac address 2642 * 2643 * Internal helper function that prepares the add macvlan request 2644 * and returns the buffer size. 2645 **/ 2646 static u16 2647 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, 2648 struct i40e_aq_desc *desc, u16 count, u16 seid) 2649 { 2650 struct i40e_aqc_macvlan *cmd = 2651 (struct i40e_aqc_macvlan *)&desc->params.raw; 2652 u16 buf_size; 2653 int i; 2654 2655 buf_size = count * sizeof(*mv_list); 2656 2657 /* prep the rest of the request */ 2658 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); 2659 cmd->num_addresses = cpu_to_le16(count); 2660 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2661 cmd->seid[1] = 0; 2662 cmd->seid[2] = 0; 2663 2664 for (i = 0; i < count; i++) 2665 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2666 mv_list[i].flags |= 2667 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2668 2669 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2670 if (buf_size > I40E_AQ_LARGE_BUF) 2671 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2672 2673 return buf_size; 2674 } 2675 2676 /** 2677 * i40e_aq_add_macvlan 2678 * @hw: pointer to the hw struct 2679 * @seid: VSI for the mac address 2680 * @mv_list: list of macvlans to be added 2681 * @count: length of the list 2682 * @cmd_details: pointer to command details structure or NULL 2683 * 2684 * Add MAC/VLAN addresses to the HW filtering 2685 **/ 2686 i40e_status 2687 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2688 struct i40e_aqc_add_macvlan_element_data *mv_list, 2689 u16 count, struct i40e_asq_cmd_details *cmd_details) 2690 { 2691 struct i40e_aq_desc desc; 2692 u16 buf_size; 2693 2694 if (count == 0 || !mv_list || !hw) 2695 return I40E_ERR_PARAM; 2696 2697 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2698 2699 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2700 cmd_details, true); 2701 } 2702 2703 /** 2704 * i40e_aq_add_macvlan_v2 2705 * @hw: pointer to the hw struct 2706 * @seid: VSI for the mac address 2707 * @mv_list: list of macvlans to be added 2708 * @count: length of the list 2709 * @cmd_details: pointer to command details structure or NULL 2710 * @aq_status: pointer to Admin Queue status return value 2711 * 2712 * Add MAC/VLAN addresses to the HW filtering. 2713 * The _v2 version returns the last Admin Queue status in aq_status 2714 * to avoid race conditions in access to hw->aq.asq_last_status. 2715 * It also calls _v2 versions of asq_send_command functions to 2716 * get the aq_status on the stack. 2717 **/ 2718 i40e_status 2719 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, 2720 struct i40e_aqc_add_macvlan_element_data *mv_list, 2721 u16 count, struct i40e_asq_cmd_details *cmd_details, 2722 enum i40e_admin_queue_err *aq_status) 2723 { 2724 struct i40e_aq_desc desc; 2725 u16 buf_size; 2726 2727 if (count == 0 || !mv_list || !hw) 2728 return I40E_ERR_PARAM; 2729 2730 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); 2731 2732 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2733 cmd_details, true, aq_status); 2734 } 2735 2736 /** 2737 * i40e_aq_remove_macvlan 2738 * @hw: pointer to the hw struct 2739 * @seid: VSI for the mac address 2740 * @mv_list: list of macvlans to be removed 2741 * @count: length of the list 2742 * @cmd_details: pointer to command details structure or NULL 2743 * 2744 * Remove MAC/VLAN addresses from the HW filtering 2745 **/ 2746 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2747 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2748 u16 count, struct i40e_asq_cmd_details *cmd_details) 2749 { 2750 struct i40e_aq_desc desc; 2751 struct i40e_aqc_macvlan *cmd = 2752 (struct i40e_aqc_macvlan *)&desc.params.raw; 2753 i40e_status status; 2754 u16 buf_size; 2755 2756 if (count == 0 || !mv_list || !hw) 2757 return I40E_ERR_PARAM; 2758 2759 buf_size = count * sizeof(*mv_list); 2760 2761 /* prep the rest of the request */ 2762 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2763 cmd->num_addresses = cpu_to_le16(count); 2764 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2765 cmd->seid[1] = 0; 2766 cmd->seid[2] = 0; 2767 2768 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2769 if (buf_size > I40E_AQ_LARGE_BUF) 2770 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2771 2772 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, 2773 cmd_details, true); 2774 2775 return status; 2776 } 2777 2778 /** 2779 * i40e_aq_remove_macvlan_v2 2780 * @hw: pointer to the hw struct 2781 * @seid: VSI for the mac address 2782 * @mv_list: list of macvlans to be removed 2783 * @count: length of the list 2784 * @cmd_details: pointer to command details structure or NULL 2785 * @aq_status: pointer to Admin Queue status return value 2786 * 2787 * Remove MAC/VLAN addresses from the HW filtering. 2788 * The _v2 version returns the last Admin Queue status in aq_status 2789 * to avoid race conditions in access to hw->aq.asq_last_status. 2790 * It also calls _v2 versions of asq_send_command functions to 2791 * get the aq_status on the stack. 2792 **/ 2793 i40e_status 2794 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, 2795 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2796 u16 count, struct i40e_asq_cmd_details *cmd_details, 2797 enum i40e_admin_queue_err *aq_status) 2798 { 2799 struct i40e_aqc_macvlan *cmd; 2800 struct i40e_aq_desc desc; 2801 u16 buf_size; 2802 2803 if (count == 0 || !mv_list || !hw) 2804 return I40E_ERR_PARAM; 2805 2806 buf_size = count * sizeof(*mv_list); 2807 2808 /* prep the rest of the request */ 2809 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2810 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; 2811 cmd->num_addresses = cpu_to_le16(count); 2812 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2813 cmd->seid[1] = 0; 2814 cmd->seid[2] = 0; 2815 2816 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2817 if (buf_size > I40E_AQ_LARGE_BUF) 2818 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2819 2820 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, 2821 cmd_details, true, aq_status); 2822 } 2823 2824 /** 2825 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2826 * @hw: pointer to the hw struct 2827 * @opcode: AQ opcode for add or delete mirror rule 2828 * @sw_seid: Switch SEID (to which rule refers) 2829 * @rule_type: Rule Type (ingress/egress/VLAN) 2830 * @id: Destination VSI SEID or Rule ID 2831 * @count: length of the list 2832 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2833 * @cmd_details: pointer to command details structure or NULL 2834 * @rule_id: Rule ID returned from FW 2835 * @rules_used: Number of rules used in internal switch 2836 * @rules_free: Number of rules free in internal switch 2837 * 2838 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2839 * VEBs/VEPA elements only 2840 **/ 2841 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2842 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2843 u16 count, __le16 *mr_list, 2844 struct i40e_asq_cmd_details *cmd_details, 2845 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2846 { 2847 struct i40e_aq_desc desc; 2848 struct i40e_aqc_add_delete_mirror_rule *cmd = 2849 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2850 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2851 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2852 i40e_status status; 2853 u16 buf_size; 2854 2855 buf_size = count * sizeof(*mr_list); 2856 2857 /* prep the rest of the request */ 2858 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2859 cmd->seid = cpu_to_le16(sw_seid); 2860 cmd->rule_type = cpu_to_le16(rule_type & 2861 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2862 cmd->num_entries = cpu_to_le16(count); 2863 /* Dest VSI for add, rule_id for delete */ 2864 cmd->destination = cpu_to_le16(id); 2865 if (mr_list) { 2866 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2867 I40E_AQ_FLAG_RD)); 2868 if (buf_size > I40E_AQ_LARGE_BUF) 2869 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2870 } 2871 2872 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2873 cmd_details); 2874 if (!status || 2875 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2876 if (rule_id) 2877 *rule_id = le16_to_cpu(resp->rule_id); 2878 if (rules_used) 2879 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2880 if (rules_free) 2881 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2882 } 2883 return status; 2884 } 2885 2886 /** 2887 * i40e_aq_add_mirrorrule - add a mirror rule 2888 * @hw: pointer to the hw struct 2889 * @sw_seid: Switch SEID (to which rule refers) 2890 * @rule_type: Rule Type (ingress/egress/VLAN) 2891 * @dest_vsi: SEID of VSI to which packets will be mirrored 2892 * @count: length of the list 2893 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2894 * @cmd_details: pointer to command details structure or NULL 2895 * @rule_id: Rule ID returned from FW 2896 * @rules_used: Number of rules used in internal switch 2897 * @rules_free: Number of rules free in internal switch 2898 * 2899 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2900 **/ 2901 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2902 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2903 struct i40e_asq_cmd_details *cmd_details, 2904 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2905 { 2906 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2907 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2908 if (count == 0 || !mr_list) 2909 return I40E_ERR_PARAM; 2910 } 2911 2912 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2913 rule_type, dest_vsi, count, mr_list, 2914 cmd_details, rule_id, rules_used, rules_free); 2915 } 2916 2917 /** 2918 * i40e_aq_delete_mirrorrule - delete a mirror rule 2919 * @hw: pointer to the hw struct 2920 * @sw_seid: Switch SEID (to which rule refers) 2921 * @rule_type: Rule Type (ingress/egress/VLAN) 2922 * @count: length of the list 2923 * @rule_id: Rule ID that is returned in the receive desc as part of 2924 * add_mirrorrule. 2925 * @mr_list: list of mirrored VLAN IDs to be removed 2926 * @cmd_details: pointer to command details structure or NULL 2927 * @rules_used: Number of rules used in internal switch 2928 * @rules_free: Number of rules free in internal switch 2929 * 2930 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2931 **/ 2932 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2933 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2934 struct i40e_asq_cmd_details *cmd_details, 2935 u16 *rules_used, u16 *rules_free) 2936 { 2937 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2938 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2939 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2940 * mirroring. For other rule_type, count and rule_type should 2941 * not matter. 2942 */ 2943 if (count == 0 || !mr_list) 2944 return I40E_ERR_PARAM; 2945 } 2946 2947 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2948 rule_type, rule_id, count, mr_list, 2949 cmd_details, NULL, rules_used, rules_free); 2950 } 2951 2952 /** 2953 * i40e_aq_send_msg_to_vf 2954 * @hw: pointer to the hardware structure 2955 * @vfid: VF id to send msg 2956 * @v_opcode: opcodes for VF-PF communication 2957 * @v_retval: return error code 2958 * @msg: pointer to the msg buffer 2959 * @msglen: msg length 2960 * @cmd_details: pointer to command details 2961 * 2962 * send msg to vf 2963 **/ 2964 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2965 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2966 struct i40e_asq_cmd_details *cmd_details) 2967 { 2968 struct i40e_aq_desc desc; 2969 struct i40e_aqc_pf_vf_message *cmd = 2970 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2971 i40e_status status; 2972 2973 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2974 cmd->id = cpu_to_le32(vfid); 2975 desc.cookie_high = cpu_to_le32(v_opcode); 2976 desc.cookie_low = cpu_to_le32(v_retval); 2977 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2978 if (msglen) { 2979 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2980 I40E_AQ_FLAG_RD)); 2981 if (msglen > I40E_AQ_LARGE_BUF) 2982 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2983 desc.datalen = cpu_to_le16(msglen); 2984 } 2985 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2986 2987 return status; 2988 } 2989 2990 /** 2991 * i40e_aq_debug_read_register 2992 * @hw: pointer to the hw struct 2993 * @reg_addr: register address 2994 * @reg_val: register value 2995 * @cmd_details: pointer to command details structure or NULL 2996 * 2997 * Read the register using the admin queue commands 2998 **/ 2999 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 3000 u32 reg_addr, u64 *reg_val, 3001 struct i40e_asq_cmd_details *cmd_details) 3002 { 3003 struct i40e_aq_desc desc; 3004 struct i40e_aqc_debug_reg_read_write *cmd_resp = 3005 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3006 i40e_status status; 3007 3008 if (reg_val == NULL) 3009 return I40E_ERR_PARAM; 3010 3011 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 3012 3013 cmd_resp->address = cpu_to_le32(reg_addr); 3014 3015 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3016 3017 if (!status) { 3018 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 3019 (u64)le32_to_cpu(cmd_resp->value_low); 3020 } 3021 3022 return status; 3023 } 3024 3025 /** 3026 * i40e_aq_debug_write_register 3027 * @hw: pointer to the hw struct 3028 * @reg_addr: register address 3029 * @reg_val: register value 3030 * @cmd_details: pointer to command details structure or NULL 3031 * 3032 * Write to a register using the admin queue commands 3033 **/ 3034 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3035 u32 reg_addr, u64 reg_val, 3036 struct i40e_asq_cmd_details *cmd_details) 3037 { 3038 struct i40e_aq_desc desc; 3039 struct i40e_aqc_debug_reg_read_write *cmd = 3040 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3041 i40e_status status; 3042 3043 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3044 3045 cmd->address = cpu_to_le32(reg_addr); 3046 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3047 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3048 3049 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3050 3051 return status; 3052 } 3053 3054 /** 3055 * i40e_aq_request_resource 3056 * @hw: pointer to the hw struct 3057 * @resource: resource id 3058 * @access: access type 3059 * @sdp_number: resource number 3060 * @timeout: the maximum time in ms that the driver may hold the resource 3061 * @cmd_details: pointer to command details structure or NULL 3062 * 3063 * requests common resource using the admin queue commands 3064 **/ 3065 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3066 enum i40e_aq_resources_ids resource, 3067 enum i40e_aq_resource_access_type access, 3068 u8 sdp_number, u64 *timeout, 3069 struct i40e_asq_cmd_details *cmd_details) 3070 { 3071 struct i40e_aq_desc desc; 3072 struct i40e_aqc_request_resource *cmd_resp = 3073 (struct i40e_aqc_request_resource *)&desc.params.raw; 3074 i40e_status status; 3075 3076 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3077 3078 cmd_resp->resource_id = cpu_to_le16(resource); 3079 cmd_resp->access_type = cpu_to_le16(access); 3080 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3081 3082 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3083 /* The completion specifies the maximum time in ms that the driver 3084 * may hold the resource in the Timeout field. 3085 * If the resource is held by someone else, the command completes with 3086 * busy return value and the timeout field indicates the maximum time 3087 * the current owner of the resource has to free it. 3088 */ 3089 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3090 *timeout = le32_to_cpu(cmd_resp->timeout); 3091 3092 return status; 3093 } 3094 3095 /** 3096 * i40e_aq_release_resource 3097 * @hw: pointer to the hw struct 3098 * @resource: resource id 3099 * @sdp_number: resource number 3100 * @cmd_details: pointer to command details structure or NULL 3101 * 3102 * release common resource using the admin queue commands 3103 **/ 3104 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3105 enum i40e_aq_resources_ids resource, 3106 u8 sdp_number, 3107 struct i40e_asq_cmd_details *cmd_details) 3108 { 3109 struct i40e_aq_desc desc; 3110 struct i40e_aqc_request_resource *cmd = 3111 (struct i40e_aqc_request_resource *)&desc.params.raw; 3112 i40e_status status; 3113 3114 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3115 3116 cmd->resource_id = cpu_to_le16(resource); 3117 cmd->resource_number = cpu_to_le32(sdp_number); 3118 3119 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3120 3121 return status; 3122 } 3123 3124 /** 3125 * i40e_aq_read_nvm 3126 * @hw: pointer to the hw struct 3127 * @module_pointer: module pointer location in words from the NVM beginning 3128 * @offset: byte offset from the module beginning 3129 * @length: length of the section to be read (in bytes from the offset) 3130 * @data: command buffer (size [bytes] = length) 3131 * @last_command: tells if this is the last command in a series 3132 * @cmd_details: pointer to command details structure or NULL 3133 * 3134 * Read the NVM using the admin queue commands 3135 **/ 3136 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3137 u32 offset, u16 length, void *data, 3138 bool last_command, 3139 struct i40e_asq_cmd_details *cmd_details) 3140 { 3141 struct i40e_aq_desc desc; 3142 struct i40e_aqc_nvm_update *cmd = 3143 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3144 i40e_status status; 3145 3146 /* In offset the highest byte must be zeroed. */ 3147 if (offset & 0xFF000000) { 3148 status = I40E_ERR_PARAM; 3149 goto i40e_aq_read_nvm_exit; 3150 } 3151 3152 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3153 3154 /* If this is the last command in a series, set the proper flag. */ 3155 if (last_command) 3156 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3157 cmd->module_pointer = module_pointer; 3158 cmd->offset = cpu_to_le32(offset); 3159 cmd->length = cpu_to_le16(length); 3160 3161 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3162 if (length > I40E_AQ_LARGE_BUF) 3163 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3164 3165 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3166 3167 i40e_aq_read_nvm_exit: 3168 return status; 3169 } 3170 3171 /** 3172 * i40e_aq_erase_nvm 3173 * @hw: pointer to the hw struct 3174 * @module_pointer: module pointer location in words from the NVM beginning 3175 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3176 * @length: length of the section to be erased (expressed in 4 KB) 3177 * @last_command: tells if this is the last command in a series 3178 * @cmd_details: pointer to command details structure or NULL 3179 * 3180 * Erase the NVM sector using the admin queue commands 3181 **/ 3182 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3183 u32 offset, u16 length, bool last_command, 3184 struct i40e_asq_cmd_details *cmd_details) 3185 { 3186 struct i40e_aq_desc desc; 3187 struct i40e_aqc_nvm_update *cmd = 3188 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3189 i40e_status status; 3190 3191 /* In offset the highest byte must be zeroed. */ 3192 if (offset & 0xFF000000) { 3193 status = I40E_ERR_PARAM; 3194 goto i40e_aq_erase_nvm_exit; 3195 } 3196 3197 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3198 3199 /* If this is the last command in a series, set the proper flag. */ 3200 if (last_command) 3201 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3202 cmd->module_pointer = module_pointer; 3203 cmd->offset = cpu_to_le32(offset); 3204 cmd->length = cpu_to_le16(length); 3205 3206 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3207 3208 i40e_aq_erase_nvm_exit: 3209 return status; 3210 } 3211 3212 /** 3213 * i40e_parse_discover_capabilities 3214 * @hw: pointer to the hw struct 3215 * @buff: pointer to a buffer containing device/function capability records 3216 * @cap_count: number of capability records in the list 3217 * @list_type_opc: type of capabilities list to parse 3218 * 3219 * Parse the device/function capabilities list. 3220 **/ 3221 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3222 u32 cap_count, 3223 enum i40e_admin_queue_opc list_type_opc) 3224 { 3225 struct i40e_aqc_list_capabilities_element_resp *cap; 3226 u32 valid_functions, num_functions; 3227 u32 number, logical_id, phys_id; 3228 struct i40e_hw_capabilities *p; 3229 u16 id, ocp_cfg_word0; 3230 i40e_status status; 3231 u8 major_rev; 3232 u32 i = 0; 3233 3234 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3235 3236 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3237 p = &hw->dev_caps; 3238 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3239 p = &hw->func_caps; 3240 else 3241 return; 3242 3243 for (i = 0; i < cap_count; i++, cap++) { 3244 id = le16_to_cpu(cap->id); 3245 number = le32_to_cpu(cap->number); 3246 logical_id = le32_to_cpu(cap->logical_id); 3247 phys_id = le32_to_cpu(cap->phys_id); 3248 major_rev = cap->major_rev; 3249 3250 switch (id) { 3251 case I40E_AQ_CAP_ID_SWITCH_MODE: 3252 p->switch_mode = number; 3253 break; 3254 case I40E_AQ_CAP_ID_MNG_MODE: 3255 p->management_mode = number; 3256 if (major_rev > 1) { 3257 p->mng_protocols_over_mctp = logical_id; 3258 i40e_debug(hw, I40E_DEBUG_INIT, 3259 "HW Capability: Protocols over MCTP = %d\n", 3260 p->mng_protocols_over_mctp); 3261 } else { 3262 p->mng_protocols_over_mctp = 0; 3263 } 3264 break; 3265 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3266 p->npar_enable = number; 3267 break; 3268 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3269 p->os2bmc = number; 3270 break; 3271 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3272 p->valid_functions = number; 3273 break; 3274 case I40E_AQ_CAP_ID_SRIOV: 3275 if (number == 1) 3276 p->sr_iov_1_1 = true; 3277 break; 3278 case I40E_AQ_CAP_ID_VF: 3279 p->num_vfs = number; 3280 p->vf_base_id = logical_id; 3281 break; 3282 case I40E_AQ_CAP_ID_VMDQ: 3283 if (number == 1) 3284 p->vmdq = true; 3285 break; 3286 case I40E_AQ_CAP_ID_8021QBG: 3287 if (number == 1) 3288 p->evb_802_1_qbg = true; 3289 break; 3290 case I40E_AQ_CAP_ID_8021QBR: 3291 if (number == 1) 3292 p->evb_802_1_qbh = true; 3293 break; 3294 case I40E_AQ_CAP_ID_VSI: 3295 p->num_vsis = number; 3296 break; 3297 case I40E_AQ_CAP_ID_DCB: 3298 if (number == 1) { 3299 p->dcb = true; 3300 p->enabled_tcmap = logical_id; 3301 p->maxtc = phys_id; 3302 } 3303 break; 3304 case I40E_AQ_CAP_ID_FCOE: 3305 if (number == 1) 3306 p->fcoe = true; 3307 break; 3308 case I40E_AQ_CAP_ID_ISCSI: 3309 if (number == 1) 3310 p->iscsi = true; 3311 break; 3312 case I40E_AQ_CAP_ID_RSS: 3313 p->rss = true; 3314 p->rss_table_size = number; 3315 p->rss_table_entry_width = logical_id; 3316 break; 3317 case I40E_AQ_CAP_ID_RXQ: 3318 p->num_rx_qp = number; 3319 p->base_queue = phys_id; 3320 break; 3321 case I40E_AQ_CAP_ID_TXQ: 3322 p->num_tx_qp = number; 3323 p->base_queue = phys_id; 3324 break; 3325 case I40E_AQ_CAP_ID_MSIX: 3326 p->num_msix_vectors = number; 3327 i40e_debug(hw, I40E_DEBUG_INIT, 3328 "HW Capability: MSIX vector count = %d\n", 3329 p->num_msix_vectors); 3330 break; 3331 case I40E_AQ_CAP_ID_VF_MSIX: 3332 p->num_msix_vectors_vf = number; 3333 break; 3334 case I40E_AQ_CAP_ID_FLEX10: 3335 if (major_rev == 1) { 3336 if (number == 1) { 3337 p->flex10_enable = true; 3338 p->flex10_capable = true; 3339 } 3340 } else { 3341 /* Capability revision >= 2 */ 3342 if (number & 1) 3343 p->flex10_enable = true; 3344 if (number & 2) 3345 p->flex10_capable = true; 3346 } 3347 p->flex10_mode = logical_id; 3348 p->flex10_status = phys_id; 3349 break; 3350 case I40E_AQ_CAP_ID_CEM: 3351 if (number == 1) 3352 p->mgmt_cem = true; 3353 break; 3354 case I40E_AQ_CAP_ID_IWARP: 3355 if (number == 1) 3356 p->iwarp = true; 3357 break; 3358 case I40E_AQ_CAP_ID_LED: 3359 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3360 p->led[phys_id] = true; 3361 break; 3362 case I40E_AQ_CAP_ID_SDP: 3363 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3364 p->sdp[phys_id] = true; 3365 break; 3366 case I40E_AQ_CAP_ID_MDIO: 3367 if (number == 1) { 3368 p->mdio_port_num = phys_id; 3369 p->mdio_port_mode = logical_id; 3370 } 3371 break; 3372 case I40E_AQ_CAP_ID_1588: 3373 if (number == 1) 3374 p->ieee_1588 = true; 3375 break; 3376 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3377 p->fd = true; 3378 p->fd_filters_guaranteed = number; 3379 p->fd_filters_best_effort = logical_id; 3380 break; 3381 case I40E_AQ_CAP_ID_WSR_PROT: 3382 p->wr_csr_prot = (u64)number; 3383 p->wr_csr_prot |= (u64)logical_id << 32; 3384 break; 3385 case I40E_AQ_CAP_ID_NVM_MGMT: 3386 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3387 p->sec_rev_disabled = true; 3388 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3389 p->update_disabled = true; 3390 break; 3391 default: 3392 break; 3393 } 3394 } 3395 3396 if (p->fcoe) 3397 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3398 3399 /* Software override ensuring FCoE is disabled if npar or mfp 3400 * mode because it is not supported in these modes. 3401 */ 3402 if (p->npar_enable || p->flex10_enable) 3403 p->fcoe = false; 3404 3405 /* count the enabled ports (aka the "not disabled" ports) */ 3406 hw->num_ports = 0; 3407 for (i = 0; i < 4; i++) { 3408 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3409 u64 port_cfg = 0; 3410 3411 /* use AQ read to get the physical register offset instead 3412 * of the port relative offset 3413 */ 3414 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3415 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3416 hw->num_ports++; 3417 } 3418 3419 /* OCP cards case: if a mezz is removed the Ethernet port is at 3420 * disabled state in PRTGEN_CNF register. Additional NVM read is 3421 * needed in order to check if we are dealing with OCP card. 3422 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3423 * physical ports results in wrong partition id calculation and thus 3424 * not supporting WoL. 3425 */ 3426 if (hw->mac.type == I40E_MAC_X722) { 3427 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3428 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3429 2 * I40E_SR_OCP_CFG_WORD0, 3430 sizeof(ocp_cfg_word0), 3431 &ocp_cfg_word0, true, NULL); 3432 if (!status && 3433 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3434 hw->num_ports = 4; 3435 i40e_release_nvm(hw); 3436 } 3437 } 3438 3439 valid_functions = p->valid_functions; 3440 num_functions = 0; 3441 while (valid_functions) { 3442 if (valid_functions & 1) 3443 num_functions++; 3444 valid_functions >>= 1; 3445 } 3446 3447 /* partition id is 1-based, and functions are evenly spread 3448 * across the ports as partitions 3449 */ 3450 if (hw->num_ports != 0) { 3451 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3452 hw->num_partitions = num_functions / hw->num_ports; 3453 } 3454 3455 /* additional HW specific goodies that might 3456 * someday be HW version specific 3457 */ 3458 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3459 } 3460 3461 /** 3462 * i40e_aq_discover_capabilities 3463 * @hw: pointer to the hw struct 3464 * @buff: a virtual buffer to hold the capabilities 3465 * @buff_size: Size of the virtual buffer 3466 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3467 * @list_type_opc: capabilities type to discover - pass in the command opcode 3468 * @cmd_details: pointer to command details structure or NULL 3469 * 3470 * Get the device capabilities descriptions from the firmware 3471 **/ 3472 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3473 void *buff, u16 buff_size, u16 *data_size, 3474 enum i40e_admin_queue_opc list_type_opc, 3475 struct i40e_asq_cmd_details *cmd_details) 3476 { 3477 struct i40e_aqc_list_capabilites *cmd; 3478 struct i40e_aq_desc desc; 3479 i40e_status status = 0; 3480 3481 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3482 3483 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3484 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3485 status = I40E_ERR_PARAM; 3486 goto exit; 3487 } 3488 3489 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3490 3491 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3492 if (buff_size > I40E_AQ_LARGE_BUF) 3493 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3494 3495 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3496 *data_size = le16_to_cpu(desc.datalen); 3497 3498 if (status) 3499 goto exit; 3500 3501 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3502 list_type_opc); 3503 3504 exit: 3505 return status; 3506 } 3507 3508 /** 3509 * i40e_aq_update_nvm 3510 * @hw: pointer to the hw struct 3511 * @module_pointer: module pointer location in words from the NVM beginning 3512 * @offset: byte offset from the module beginning 3513 * @length: length of the section to be written (in bytes from the offset) 3514 * @data: command buffer (size [bytes] = length) 3515 * @last_command: tells if this is the last command in a series 3516 * @preservation_flags: Preservation mode flags 3517 * @cmd_details: pointer to command details structure or NULL 3518 * 3519 * Update the NVM using the admin queue commands 3520 **/ 3521 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3522 u32 offset, u16 length, void *data, 3523 bool last_command, u8 preservation_flags, 3524 struct i40e_asq_cmd_details *cmd_details) 3525 { 3526 struct i40e_aq_desc desc; 3527 struct i40e_aqc_nvm_update *cmd = 3528 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3529 i40e_status status; 3530 3531 /* In offset the highest byte must be zeroed. */ 3532 if (offset & 0xFF000000) { 3533 status = I40E_ERR_PARAM; 3534 goto i40e_aq_update_nvm_exit; 3535 } 3536 3537 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3538 3539 /* If this is the last command in a series, set the proper flag. */ 3540 if (last_command) 3541 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3542 if (hw->mac.type == I40E_MAC_X722) { 3543 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3544 cmd->command_flags |= 3545 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3546 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3547 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3548 cmd->command_flags |= 3549 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3550 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3551 } 3552 cmd->module_pointer = module_pointer; 3553 cmd->offset = cpu_to_le32(offset); 3554 cmd->length = cpu_to_le16(length); 3555 3556 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3557 if (length > I40E_AQ_LARGE_BUF) 3558 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3559 3560 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3561 3562 i40e_aq_update_nvm_exit: 3563 return status; 3564 } 3565 3566 /** 3567 * i40e_aq_rearrange_nvm 3568 * @hw: pointer to the hw struct 3569 * @rearrange_nvm: defines direction of rearrangement 3570 * @cmd_details: pointer to command details structure or NULL 3571 * 3572 * Rearrange NVM structure, available only for transition FW 3573 **/ 3574 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3575 u8 rearrange_nvm, 3576 struct i40e_asq_cmd_details *cmd_details) 3577 { 3578 struct i40e_aqc_nvm_update *cmd; 3579 i40e_status status; 3580 struct i40e_aq_desc desc; 3581 3582 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3583 3584 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3585 3586 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3587 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3588 3589 if (!rearrange_nvm) { 3590 status = I40E_ERR_PARAM; 3591 goto i40e_aq_rearrange_nvm_exit; 3592 } 3593 3594 cmd->command_flags |= rearrange_nvm; 3595 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3596 3597 i40e_aq_rearrange_nvm_exit: 3598 return status; 3599 } 3600 3601 /** 3602 * i40e_aq_get_lldp_mib 3603 * @hw: pointer to the hw struct 3604 * @bridge_type: type of bridge requested 3605 * @mib_type: Local, Remote or both Local and Remote MIBs 3606 * @buff: pointer to a user supplied buffer to store the MIB block 3607 * @buff_size: size of the buffer (in bytes) 3608 * @local_len : length of the returned Local LLDP MIB 3609 * @remote_len: length of the returned Remote LLDP MIB 3610 * @cmd_details: pointer to command details structure or NULL 3611 * 3612 * Requests the complete LLDP MIB (entire packet). 3613 **/ 3614 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3615 u8 mib_type, void *buff, u16 buff_size, 3616 u16 *local_len, u16 *remote_len, 3617 struct i40e_asq_cmd_details *cmd_details) 3618 { 3619 struct i40e_aq_desc desc; 3620 struct i40e_aqc_lldp_get_mib *cmd = 3621 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3622 struct i40e_aqc_lldp_get_mib *resp = 3623 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3624 i40e_status status; 3625 3626 if (buff_size == 0 || !buff) 3627 return I40E_ERR_PARAM; 3628 3629 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3630 /* Indirect Command */ 3631 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3632 3633 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3634 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3635 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3636 3637 desc.datalen = cpu_to_le16(buff_size); 3638 3639 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3640 if (buff_size > I40E_AQ_LARGE_BUF) 3641 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3642 3643 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3644 if (!status) { 3645 if (local_len != NULL) 3646 *local_len = le16_to_cpu(resp->local_len); 3647 if (remote_len != NULL) 3648 *remote_len = le16_to_cpu(resp->remote_len); 3649 } 3650 3651 return status; 3652 } 3653 3654 /** 3655 * i40e_aq_set_lldp_mib - Set the LLDP MIB 3656 * @hw: pointer to the hw struct 3657 * @mib_type: Local, Remote or both Local and Remote MIBs 3658 * @buff: pointer to a user supplied buffer to store the MIB block 3659 * @buff_size: size of the buffer (in bytes) 3660 * @cmd_details: pointer to command details structure or NULL 3661 * 3662 * Set the LLDP MIB. 3663 **/ 3664 enum i40e_status_code 3665 i40e_aq_set_lldp_mib(struct i40e_hw *hw, 3666 u8 mib_type, void *buff, u16 buff_size, 3667 struct i40e_asq_cmd_details *cmd_details) 3668 { 3669 struct i40e_aqc_lldp_set_local_mib *cmd; 3670 enum i40e_status_code status; 3671 struct i40e_aq_desc desc; 3672 3673 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; 3674 if (buff_size == 0 || !buff) 3675 return I40E_ERR_PARAM; 3676 3677 i40e_fill_default_direct_cmd_desc(&desc, 3678 i40e_aqc_opc_lldp_set_local_mib); 3679 /* Indirect Command */ 3680 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3681 if (buff_size > I40E_AQ_LARGE_BUF) 3682 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3683 desc.datalen = cpu_to_le16(buff_size); 3684 3685 cmd->type = mib_type; 3686 cmd->length = cpu_to_le16(buff_size); 3687 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); 3688 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); 3689 3690 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3691 return status; 3692 } 3693 3694 /** 3695 * i40e_aq_cfg_lldp_mib_change_event 3696 * @hw: pointer to the hw struct 3697 * @enable_update: Enable or Disable event posting 3698 * @cmd_details: pointer to command details structure or NULL 3699 * 3700 * Enable or Disable posting of an event on ARQ when LLDP MIB 3701 * associated with the interface changes 3702 **/ 3703 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3704 bool enable_update, 3705 struct i40e_asq_cmd_details *cmd_details) 3706 { 3707 struct i40e_aq_desc desc; 3708 struct i40e_aqc_lldp_update_mib *cmd = 3709 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3710 i40e_status status; 3711 3712 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3713 3714 if (!enable_update) 3715 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3716 3717 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3718 3719 return status; 3720 } 3721 3722 /** 3723 * i40e_aq_restore_lldp 3724 * @hw: pointer to the hw struct 3725 * @setting: pointer to factory setting variable or NULL 3726 * @restore: True if factory settings should be restored 3727 * @cmd_details: pointer to command details structure or NULL 3728 * 3729 * Restore LLDP Agent factory settings if @restore set to True. In other case 3730 * only returns factory setting in AQ response. 3731 **/ 3732 enum i40e_status_code 3733 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3734 struct i40e_asq_cmd_details *cmd_details) 3735 { 3736 struct i40e_aq_desc desc; 3737 struct i40e_aqc_lldp_restore *cmd = 3738 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3739 i40e_status status; 3740 3741 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3742 i40e_debug(hw, I40E_DEBUG_ALL, 3743 "Restore LLDP not supported by current FW version.\n"); 3744 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3745 } 3746 3747 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3748 3749 if (restore) 3750 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3751 3752 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3753 3754 if (setting) 3755 *setting = cmd->command & 1; 3756 3757 return status; 3758 } 3759 3760 /** 3761 * i40e_aq_stop_lldp 3762 * @hw: pointer to the hw struct 3763 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3764 * @persist: True if stop of LLDP should be persistent across power cycles 3765 * @cmd_details: pointer to command details structure or NULL 3766 * 3767 * Stop or Shutdown the embedded LLDP Agent 3768 **/ 3769 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3770 bool persist, 3771 struct i40e_asq_cmd_details *cmd_details) 3772 { 3773 struct i40e_aq_desc desc; 3774 struct i40e_aqc_lldp_stop *cmd = 3775 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3776 i40e_status status; 3777 3778 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3779 3780 if (shutdown_agent) 3781 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3782 3783 if (persist) { 3784 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3785 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3786 else 3787 i40e_debug(hw, I40E_DEBUG_ALL, 3788 "Persistent Stop LLDP not supported by current FW version.\n"); 3789 } 3790 3791 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3792 3793 return status; 3794 } 3795 3796 /** 3797 * i40e_aq_start_lldp 3798 * @hw: pointer to the hw struct 3799 * @persist: True if start of LLDP should be persistent across power cycles 3800 * @cmd_details: pointer to command details structure or NULL 3801 * 3802 * Start the embedded LLDP Agent on all ports. 3803 **/ 3804 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3805 struct i40e_asq_cmd_details *cmd_details) 3806 { 3807 struct i40e_aq_desc desc; 3808 struct i40e_aqc_lldp_start *cmd = 3809 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3810 i40e_status status; 3811 3812 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3813 3814 cmd->command = I40E_AQ_LLDP_AGENT_START; 3815 3816 if (persist) { 3817 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3818 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3819 else 3820 i40e_debug(hw, I40E_DEBUG_ALL, 3821 "Persistent Start LLDP not supported by current FW version.\n"); 3822 } 3823 3824 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3825 3826 return status; 3827 } 3828 3829 /** 3830 * i40e_aq_set_dcb_parameters 3831 * @hw: pointer to the hw struct 3832 * @cmd_details: pointer to command details structure or NULL 3833 * @dcb_enable: True if DCB configuration needs to be applied 3834 * 3835 **/ 3836 enum i40e_status_code 3837 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3838 struct i40e_asq_cmd_details *cmd_details) 3839 { 3840 struct i40e_aq_desc desc; 3841 struct i40e_aqc_set_dcb_parameters *cmd = 3842 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3843 i40e_status status; 3844 3845 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3846 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3847 3848 i40e_fill_default_direct_cmd_desc(&desc, 3849 i40e_aqc_opc_set_dcb_parameters); 3850 3851 if (dcb_enable) { 3852 cmd->valid_flags = I40E_DCB_VALID; 3853 cmd->command = I40E_AQ_DCB_SET_AGENT; 3854 } 3855 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3856 3857 return status; 3858 } 3859 3860 /** 3861 * i40e_aq_get_cee_dcb_config 3862 * @hw: pointer to the hw struct 3863 * @buff: response buffer that stores CEE operational configuration 3864 * @buff_size: size of the buffer passed 3865 * @cmd_details: pointer to command details structure or NULL 3866 * 3867 * Get CEE DCBX mode operational configuration from firmware 3868 **/ 3869 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3870 void *buff, u16 buff_size, 3871 struct i40e_asq_cmd_details *cmd_details) 3872 { 3873 struct i40e_aq_desc desc; 3874 i40e_status status; 3875 3876 if (buff_size == 0 || !buff) 3877 return I40E_ERR_PARAM; 3878 3879 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3880 3881 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3882 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3883 cmd_details); 3884 3885 return status; 3886 } 3887 3888 /** 3889 * i40e_aq_add_udp_tunnel 3890 * @hw: pointer to the hw struct 3891 * @udp_port: the UDP port to add in Host byte order 3892 * @protocol_index: protocol index type 3893 * @filter_index: pointer to filter index 3894 * @cmd_details: pointer to command details structure or NULL 3895 * 3896 * Note: Firmware expects the udp_port value to be in Little Endian format, 3897 * and this function will call cpu_to_le16 to convert from Host byte order to 3898 * Little Endian order. 3899 **/ 3900 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3901 u16 udp_port, u8 protocol_index, 3902 u8 *filter_index, 3903 struct i40e_asq_cmd_details *cmd_details) 3904 { 3905 struct i40e_aq_desc desc; 3906 struct i40e_aqc_add_udp_tunnel *cmd = 3907 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3908 struct i40e_aqc_del_udp_tunnel_completion *resp = 3909 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3910 i40e_status status; 3911 3912 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3913 3914 cmd->udp_port = cpu_to_le16(udp_port); 3915 cmd->protocol_type = protocol_index; 3916 3917 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3918 3919 if (!status && filter_index) 3920 *filter_index = resp->index; 3921 3922 return status; 3923 } 3924 3925 /** 3926 * i40e_aq_del_udp_tunnel 3927 * @hw: pointer to the hw struct 3928 * @index: filter index 3929 * @cmd_details: pointer to command details structure or NULL 3930 **/ 3931 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3932 struct i40e_asq_cmd_details *cmd_details) 3933 { 3934 struct i40e_aq_desc desc; 3935 struct i40e_aqc_remove_udp_tunnel *cmd = 3936 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3937 i40e_status status; 3938 3939 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3940 3941 cmd->index = index; 3942 3943 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3944 3945 return status; 3946 } 3947 3948 /** 3949 * i40e_aq_delete_element - Delete switch element 3950 * @hw: pointer to the hw struct 3951 * @seid: the SEID to delete from the switch 3952 * @cmd_details: pointer to command details structure or NULL 3953 * 3954 * This deletes a switch element from the switch. 3955 **/ 3956 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3957 struct i40e_asq_cmd_details *cmd_details) 3958 { 3959 struct i40e_aq_desc desc; 3960 struct i40e_aqc_switch_seid *cmd = 3961 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3962 i40e_status status; 3963 3964 if (seid == 0) 3965 return I40E_ERR_PARAM; 3966 3967 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3968 3969 cmd->seid = cpu_to_le16(seid); 3970 3971 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, 3972 cmd_details, true); 3973 3974 return status; 3975 } 3976 3977 /** 3978 * i40e_aq_dcb_updated - DCB Updated Command 3979 * @hw: pointer to the hw struct 3980 * @cmd_details: pointer to command details structure or NULL 3981 * 3982 * EMP will return when the shared RPB settings have been 3983 * recomputed and modified. The retval field in the descriptor 3984 * will be set to 0 when RPB is modified. 3985 **/ 3986 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3987 struct i40e_asq_cmd_details *cmd_details) 3988 { 3989 struct i40e_aq_desc desc; 3990 i40e_status status; 3991 3992 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3993 3994 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3995 3996 return status; 3997 } 3998 3999 /** 4000 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 4001 * @hw: pointer to the hw struct 4002 * @seid: seid for the physical port/switching component/vsi 4003 * @buff: Indirect buffer to hold data parameters and response 4004 * @buff_size: Indirect buffer size 4005 * @opcode: Tx scheduler AQ command opcode 4006 * @cmd_details: pointer to command details structure or NULL 4007 * 4008 * Generic command handler for Tx scheduler AQ commands 4009 **/ 4010 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 4011 void *buff, u16 buff_size, 4012 enum i40e_admin_queue_opc opcode, 4013 struct i40e_asq_cmd_details *cmd_details) 4014 { 4015 struct i40e_aq_desc desc; 4016 struct i40e_aqc_tx_sched_ind *cmd = 4017 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4018 i40e_status status; 4019 bool cmd_param_flag = false; 4020 4021 switch (opcode) { 4022 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 4023 case i40e_aqc_opc_configure_vsi_tc_bw: 4024 case i40e_aqc_opc_enable_switching_comp_ets: 4025 case i40e_aqc_opc_modify_switching_comp_ets: 4026 case i40e_aqc_opc_disable_switching_comp_ets: 4027 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 4028 case i40e_aqc_opc_configure_switching_comp_bw_config: 4029 cmd_param_flag = true; 4030 break; 4031 case i40e_aqc_opc_query_vsi_bw_config: 4032 case i40e_aqc_opc_query_vsi_ets_sla_config: 4033 case i40e_aqc_opc_query_switching_comp_ets_config: 4034 case i40e_aqc_opc_query_port_ets_config: 4035 case i40e_aqc_opc_query_switching_comp_bw_config: 4036 cmd_param_flag = false; 4037 break; 4038 default: 4039 return I40E_ERR_PARAM; 4040 } 4041 4042 i40e_fill_default_direct_cmd_desc(&desc, opcode); 4043 4044 /* Indirect command */ 4045 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4046 if (cmd_param_flag) 4047 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4048 if (buff_size > I40E_AQ_LARGE_BUF) 4049 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4050 4051 desc.datalen = cpu_to_le16(buff_size); 4052 4053 cmd->vsi_seid = cpu_to_le16(seid); 4054 4055 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4056 4057 return status; 4058 } 4059 4060 /** 4061 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 4062 * @hw: pointer to the hw struct 4063 * @seid: VSI seid 4064 * @credit: BW limit credits (0 = disabled) 4065 * @max_credit: Max BW limit credits 4066 * @cmd_details: pointer to command details structure or NULL 4067 **/ 4068 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 4069 u16 seid, u16 credit, u8 max_credit, 4070 struct i40e_asq_cmd_details *cmd_details) 4071 { 4072 struct i40e_aq_desc desc; 4073 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4074 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4075 i40e_status status; 4076 4077 i40e_fill_default_direct_cmd_desc(&desc, 4078 i40e_aqc_opc_configure_vsi_bw_limit); 4079 4080 cmd->vsi_seid = cpu_to_le16(seid); 4081 cmd->credit = cpu_to_le16(credit); 4082 cmd->max_credit = max_credit; 4083 4084 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4085 4086 return status; 4087 } 4088 4089 /** 4090 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4091 * @hw: pointer to the hw struct 4092 * @seid: VSI seid 4093 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4094 * @cmd_details: pointer to command details structure or NULL 4095 **/ 4096 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4097 u16 seid, 4098 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4099 struct i40e_asq_cmd_details *cmd_details) 4100 { 4101 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4102 i40e_aqc_opc_configure_vsi_tc_bw, 4103 cmd_details); 4104 } 4105 4106 /** 4107 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4108 * @hw: pointer to the hw struct 4109 * @seid: seid of the switching component connected to Physical Port 4110 * @ets_data: Buffer holding ETS parameters 4111 * @opcode: Tx scheduler AQ command opcode 4112 * @cmd_details: pointer to command details structure or NULL 4113 **/ 4114 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4115 u16 seid, 4116 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4117 enum i40e_admin_queue_opc opcode, 4118 struct i40e_asq_cmd_details *cmd_details) 4119 { 4120 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4121 sizeof(*ets_data), opcode, cmd_details); 4122 } 4123 4124 /** 4125 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4126 * @hw: pointer to the hw struct 4127 * @seid: seid of the switching component 4128 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4129 * @cmd_details: pointer to command details structure or NULL 4130 **/ 4131 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4132 u16 seid, 4133 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4134 struct i40e_asq_cmd_details *cmd_details) 4135 { 4136 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4137 i40e_aqc_opc_configure_switching_comp_bw_config, 4138 cmd_details); 4139 } 4140 4141 /** 4142 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4143 * @hw: pointer to the hw struct 4144 * @seid: seid of the VSI 4145 * @bw_data: Buffer to hold VSI BW configuration 4146 * @cmd_details: pointer to command details structure or NULL 4147 **/ 4148 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4149 u16 seid, 4150 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4151 struct i40e_asq_cmd_details *cmd_details) 4152 { 4153 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4154 i40e_aqc_opc_query_vsi_bw_config, 4155 cmd_details); 4156 } 4157 4158 /** 4159 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4160 * @hw: pointer to the hw struct 4161 * @seid: seid of the VSI 4162 * @bw_data: Buffer to hold VSI BW configuration per TC 4163 * @cmd_details: pointer to command details structure or NULL 4164 **/ 4165 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4166 u16 seid, 4167 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4168 struct i40e_asq_cmd_details *cmd_details) 4169 { 4170 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4171 i40e_aqc_opc_query_vsi_ets_sla_config, 4172 cmd_details); 4173 } 4174 4175 /** 4176 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4177 * @hw: pointer to the hw struct 4178 * @seid: seid of the switching component 4179 * @bw_data: Buffer to hold switching component's per TC BW config 4180 * @cmd_details: pointer to command details structure or NULL 4181 **/ 4182 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4183 u16 seid, 4184 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4185 struct i40e_asq_cmd_details *cmd_details) 4186 { 4187 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4188 i40e_aqc_opc_query_switching_comp_ets_config, 4189 cmd_details); 4190 } 4191 4192 /** 4193 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4194 * @hw: pointer to the hw struct 4195 * @seid: seid of the VSI or switching component connected to Physical Port 4196 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4197 * @cmd_details: pointer to command details structure or NULL 4198 **/ 4199 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4200 u16 seid, 4201 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4202 struct i40e_asq_cmd_details *cmd_details) 4203 { 4204 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4205 i40e_aqc_opc_query_port_ets_config, 4206 cmd_details); 4207 } 4208 4209 /** 4210 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4211 * @hw: pointer to the hw struct 4212 * @seid: seid of the switching component 4213 * @bw_data: Buffer to hold switching component's BW configuration 4214 * @cmd_details: pointer to command details structure or NULL 4215 **/ 4216 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4217 u16 seid, 4218 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4219 struct i40e_asq_cmd_details *cmd_details) 4220 { 4221 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4222 i40e_aqc_opc_query_switching_comp_bw_config, 4223 cmd_details); 4224 } 4225 4226 /** 4227 * i40e_validate_filter_settings 4228 * @hw: pointer to the hardware structure 4229 * @settings: Filter control settings 4230 * 4231 * Check and validate the filter control settings passed. 4232 * The function checks for the valid filter/context sizes being 4233 * passed for FCoE and PE. 4234 * 4235 * Returns 0 if the values passed are valid and within 4236 * range else returns an error. 4237 **/ 4238 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4239 struct i40e_filter_control_settings *settings) 4240 { 4241 u32 fcoe_cntx_size, fcoe_filt_size; 4242 u32 fcoe_fmax; 4243 u32 val; 4244 4245 /* Validate FCoE settings passed */ 4246 switch (settings->fcoe_filt_num) { 4247 case I40E_HASH_FILTER_SIZE_1K: 4248 case I40E_HASH_FILTER_SIZE_2K: 4249 case I40E_HASH_FILTER_SIZE_4K: 4250 case I40E_HASH_FILTER_SIZE_8K: 4251 case I40E_HASH_FILTER_SIZE_16K: 4252 case I40E_HASH_FILTER_SIZE_32K: 4253 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4254 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4255 break; 4256 default: 4257 return I40E_ERR_PARAM; 4258 } 4259 4260 switch (settings->fcoe_cntx_num) { 4261 case I40E_DMA_CNTX_SIZE_512: 4262 case I40E_DMA_CNTX_SIZE_1K: 4263 case I40E_DMA_CNTX_SIZE_2K: 4264 case I40E_DMA_CNTX_SIZE_4K: 4265 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4266 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4267 break; 4268 default: 4269 return I40E_ERR_PARAM; 4270 } 4271 4272 /* Validate PE settings passed */ 4273 switch (settings->pe_filt_num) { 4274 case I40E_HASH_FILTER_SIZE_1K: 4275 case I40E_HASH_FILTER_SIZE_2K: 4276 case I40E_HASH_FILTER_SIZE_4K: 4277 case I40E_HASH_FILTER_SIZE_8K: 4278 case I40E_HASH_FILTER_SIZE_16K: 4279 case I40E_HASH_FILTER_SIZE_32K: 4280 case I40E_HASH_FILTER_SIZE_64K: 4281 case I40E_HASH_FILTER_SIZE_128K: 4282 case I40E_HASH_FILTER_SIZE_256K: 4283 case I40E_HASH_FILTER_SIZE_512K: 4284 case I40E_HASH_FILTER_SIZE_1M: 4285 break; 4286 default: 4287 return I40E_ERR_PARAM; 4288 } 4289 4290 switch (settings->pe_cntx_num) { 4291 case I40E_DMA_CNTX_SIZE_512: 4292 case I40E_DMA_CNTX_SIZE_1K: 4293 case I40E_DMA_CNTX_SIZE_2K: 4294 case I40E_DMA_CNTX_SIZE_4K: 4295 case I40E_DMA_CNTX_SIZE_8K: 4296 case I40E_DMA_CNTX_SIZE_16K: 4297 case I40E_DMA_CNTX_SIZE_32K: 4298 case I40E_DMA_CNTX_SIZE_64K: 4299 case I40E_DMA_CNTX_SIZE_128K: 4300 case I40E_DMA_CNTX_SIZE_256K: 4301 break; 4302 default: 4303 return I40E_ERR_PARAM; 4304 } 4305 4306 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4307 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4308 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4309 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4310 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4311 return I40E_ERR_INVALID_SIZE; 4312 4313 return 0; 4314 } 4315 4316 /** 4317 * i40e_set_filter_control 4318 * @hw: pointer to the hardware structure 4319 * @settings: Filter control settings 4320 * 4321 * Set the Queue Filters for PE/FCoE and enable filters required 4322 * for a single PF. It is expected that these settings are programmed 4323 * at the driver initialization time. 4324 **/ 4325 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4326 struct i40e_filter_control_settings *settings) 4327 { 4328 i40e_status ret = 0; 4329 u32 hash_lut_size = 0; 4330 u32 val; 4331 4332 if (!settings) 4333 return I40E_ERR_PARAM; 4334 4335 /* Validate the input settings */ 4336 ret = i40e_validate_filter_settings(hw, settings); 4337 if (ret) 4338 return ret; 4339 4340 /* Read the PF Queue Filter control register */ 4341 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4342 4343 /* Program required PE hash buckets for the PF */ 4344 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4345 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4346 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4347 /* Program required PE contexts for the PF */ 4348 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4349 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4350 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4351 4352 /* Program required FCoE hash buckets for the PF */ 4353 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4354 val |= ((u32)settings->fcoe_filt_num << 4355 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4356 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4357 /* Program required FCoE DDP contexts for the PF */ 4358 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4359 val |= ((u32)settings->fcoe_cntx_num << 4360 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4361 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4362 4363 /* Program Hash LUT size for the PF */ 4364 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4365 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4366 hash_lut_size = 1; 4367 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4368 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4369 4370 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4371 if (settings->enable_fdir) 4372 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4373 if (settings->enable_ethtype) 4374 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4375 if (settings->enable_macvlan) 4376 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4377 4378 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4379 4380 return 0; 4381 } 4382 4383 /** 4384 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4385 * @hw: pointer to the hw struct 4386 * @mac_addr: MAC address to use in the filter 4387 * @ethtype: Ethertype to use in the filter 4388 * @flags: Flags that needs to be applied to the filter 4389 * @vsi_seid: seid of the control VSI 4390 * @queue: VSI queue number to send the packet to 4391 * @is_add: Add control packet filter if True else remove 4392 * @stats: Structure to hold information on control filter counts 4393 * @cmd_details: pointer to command details structure or NULL 4394 * 4395 * This command will Add or Remove control packet filter for a control VSI. 4396 * In return it will update the total number of perfect filter count in 4397 * the stats member. 4398 **/ 4399 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4400 u8 *mac_addr, u16 ethtype, u16 flags, 4401 u16 vsi_seid, u16 queue, bool is_add, 4402 struct i40e_control_filter_stats *stats, 4403 struct i40e_asq_cmd_details *cmd_details) 4404 { 4405 struct i40e_aq_desc desc; 4406 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4407 (struct i40e_aqc_add_remove_control_packet_filter *) 4408 &desc.params.raw; 4409 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4410 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4411 &desc.params.raw; 4412 i40e_status status; 4413 4414 if (vsi_seid == 0) 4415 return I40E_ERR_PARAM; 4416 4417 if (is_add) { 4418 i40e_fill_default_direct_cmd_desc(&desc, 4419 i40e_aqc_opc_add_control_packet_filter); 4420 cmd->queue = cpu_to_le16(queue); 4421 } else { 4422 i40e_fill_default_direct_cmd_desc(&desc, 4423 i40e_aqc_opc_remove_control_packet_filter); 4424 } 4425 4426 if (mac_addr) 4427 ether_addr_copy(cmd->mac, mac_addr); 4428 4429 cmd->etype = cpu_to_le16(ethtype); 4430 cmd->flags = cpu_to_le16(flags); 4431 cmd->seid = cpu_to_le16(vsi_seid); 4432 4433 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4434 4435 if (!status && stats) { 4436 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4437 stats->etype_used = le16_to_cpu(resp->etype_used); 4438 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4439 stats->etype_free = le16_to_cpu(resp->etype_free); 4440 } 4441 4442 return status; 4443 } 4444 4445 /** 4446 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4447 * @hw: pointer to the hw struct 4448 * @seid: VSI seid to add ethertype filter from 4449 **/ 4450 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4451 u16 seid) 4452 { 4453 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4454 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4455 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4456 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4457 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4458 i40e_status status; 4459 4460 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4461 seid, 0, true, NULL, 4462 NULL); 4463 if (status) 4464 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4465 } 4466 4467 /** 4468 * i40e_aq_alternate_read 4469 * @hw: pointer to the hardware structure 4470 * @reg_addr0: address of first dword to be read 4471 * @reg_val0: pointer for data read from 'reg_addr0' 4472 * @reg_addr1: address of second dword to be read 4473 * @reg_val1: pointer for data read from 'reg_addr1' 4474 * 4475 * Read one or two dwords from alternate structure. Fields are indicated 4476 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4477 * is not passed then only register at 'reg_addr0' is read. 4478 * 4479 **/ 4480 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4481 u32 reg_addr0, u32 *reg_val0, 4482 u32 reg_addr1, u32 *reg_val1) 4483 { 4484 struct i40e_aq_desc desc; 4485 struct i40e_aqc_alternate_write *cmd_resp = 4486 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4487 i40e_status status; 4488 4489 if (!reg_val0) 4490 return I40E_ERR_PARAM; 4491 4492 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4493 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4494 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4495 4496 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4497 4498 if (!status) { 4499 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4500 4501 if (reg_val1) 4502 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4503 } 4504 4505 return status; 4506 } 4507 4508 /** 4509 * i40e_aq_suspend_port_tx 4510 * @hw: pointer to the hardware structure 4511 * @seid: port seid 4512 * @cmd_details: pointer to command details structure or NULL 4513 * 4514 * Suspend port's Tx traffic 4515 **/ 4516 i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, 4517 struct i40e_asq_cmd_details *cmd_details) 4518 { 4519 struct i40e_aqc_tx_sched_ind *cmd; 4520 struct i40e_aq_desc desc; 4521 i40e_status status; 4522 4523 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 4524 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); 4525 cmd->vsi_seid = cpu_to_le16(seid); 4526 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4527 4528 return status; 4529 } 4530 4531 /** 4532 * i40e_aq_resume_port_tx 4533 * @hw: pointer to the hardware structure 4534 * @cmd_details: pointer to command details structure or NULL 4535 * 4536 * Resume port's Tx traffic 4537 **/ 4538 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4539 struct i40e_asq_cmd_details *cmd_details) 4540 { 4541 struct i40e_aq_desc desc; 4542 i40e_status status; 4543 4544 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4545 4546 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4547 4548 return status; 4549 } 4550 4551 /** 4552 * i40e_set_pci_config_data - store PCI bus info 4553 * @hw: pointer to hardware structure 4554 * @link_status: the link status word from PCI config space 4555 * 4556 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4557 **/ 4558 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4559 { 4560 hw->bus.type = i40e_bus_type_pci_express; 4561 4562 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4563 case PCI_EXP_LNKSTA_NLW_X1: 4564 hw->bus.width = i40e_bus_width_pcie_x1; 4565 break; 4566 case PCI_EXP_LNKSTA_NLW_X2: 4567 hw->bus.width = i40e_bus_width_pcie_x2; 4568 break; 4569 case PCI_EXP_LNKSTA_NLW_X4: 4570 hw->bus.width = i40e_bus_width_pcie_x4; 4571 break; 4572 case PCI_EXP_LNKSTA_NLW_X8: 4573 hw->bus.width = i40e_bus_width_pcie_x8; 4574 break; 4575 default: 4576 hw->bus.width = i40e_bus_width_unknown; 4577 break; 4578 } 4579 4580 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4581 case PCI_EXP_LNKSTA_CLS_2_5GB: 4582 hw->bus.speed = i40e_bus_speed_2500; 4583 break; 4584 case PCI_EXP_LNKSTA_CLS_5_0GB: 4585 hw->bus.speed = i40e_bus_speed_5000; 4586 break; 4587 case PCI_EXP_LNKSTA_CLS_8_0GB: 4588 hw->bus.speed = i40e_bus_speed_8000; 4589 break; 4590 default: 4591 hw->bus.speed = i40e_bus_speed_unknown; 4592 break; 4593 } 4594 } 4595 4596 /** 4597 * i40e_aq_debug_dump 4598 * @hw: pointer to the hardware structure 4599 * @cluster_id: specific cluster to dump 4600 * @table_id: table id within cluster 4601 * @start_index: index of line in the block to read 4602 * @buff_size: dump buffer size 4603 * @buff: dump buffer 4604 * @ret_buff_size: actual buffer size returned 4605 * @ret_next_table: next block to read 4606 * @ret_next_index: next index to read 4607 * @cmd_details: pointer to command details structure or NULL 4608 * 4609 * Dump internal FW/HW data for debug purposes. 4610 * 4611 **/ 4612 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4613 u8 table_id, u32 start_index, u16 buff_size, 4614 void *buff, u16 *ret_buff_size, 4615 u8 *ret_next_table, u32 *ret_next_index, 4616 struct i40e_asq_cmd_details *cmd_details) 4617 { 4618 struct i40e_aq_desc desc; 4619 struct i40e_aqc_debug_dump_internals *cmd = 4620 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4621 struct i40e_aqc_debug_dump_internals *resp = 4622 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4623 i40e_status status; 4624 4625 if (buff_size == 0 || !buff) 4626 return I40E_ERR_PARAM; 4627 4628 i40e_fill_default_direct_cmd_desc(&desc, 4629 i40e_aqc_opc_debug_dump_internals); 4630 /* Indirect Command */ 4631 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4632 if (buff_size > I40E_AQ_LARGE_BUF) 4633 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4634 4635 cmd->cluster_id = cluster_id; 4636 cmd->table_id = table_id; 4637 cmd->idx = cpu_to_le32(start_index); 4638 4639 desc.datalen = cpu_to_le16(buff_size); 4640 4641 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4642 if (!status) { 4643 if (ret_buff_size) 4644 *ret_buff_size = le16_to_cpu(desc.datalen); 4645 if (ret_next_table) 4646 *ret_next_table = resp->table_id; 4647 if (ret_next_index) 4648 *ret_next_index = le32_to_cpu(resp->idx); 4649 } 4650 4651 return status; 4652 } 4653 4654 /** 4655 * i40e_read_bw_from_alt_ram 4656 * @hw: pointer to the hardware structure 4657 * @max_bw: pointer for max_bw read 4658 * @min_bw: pointer for min_bw read 4659 * @min_valid: pointer for bool that is true if min_bw is a valid value 4660 * @max_valid: pointer for bool that is true if max_bw is a valid value 4661 * 4662 * Read bw from the alternate ram for the given pf 4663 **/ 4664 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4665 u32 *max_bw, u32 *min_bw, 4666 bool *min_valid, bool *max_valid) 4667 { 4668 i40e_status status; 4669 u32 max_bw_addr, min_bw_addr; 4670 4671 /* Calculate the address of the min/max bw registers */ 4672 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4673 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4674 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4675 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4676 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4677 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4678 4679 /* Read the bandwidths from alt ram */ 4680 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4681 min_bw_addr, min_bw); 4682 4683 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4684 *min_valid = true; 4685 else 4686 *min_valid = false; 4687 4688 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4689 *max_valid = true; 4690 else 4691 *max_valid = false; 4692 4693 return status; 4694 } 4695 4696 /** 4697 * i40e_aq_configure_partition_bw 4698 * @hw: pointer to the hardware structure 4699 * @bw_data: Buffer holding valid pfs and bw limits 4700 * @cmd_details: pointer to command details 4701 * 4702 * Configure partitions guaranteed/max bw 4703 **/ 4704 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4705 struct i40e_aqc_configure_partition_bw_data *bw_data, 4706 struct i40e_asq_cmd_details *cmd_details) 4707 { 4708 i40e_status status; 4709 struct i40e_aq_desc desc; 4710 u16 bwd_size = sizeof(*bw_data); 4711 4712 i40e_fill_default_direct_cmd_desc(&desc, 4713 i40e_aqc_opc_configure_partition_bw); 4714 4715 /* Indirect command */ 4716 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4717 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4718 4719 if (bwd_size > I40E_AQ_LARGE_BUF) 4720 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4721 4722 desc.datalen = cpu_to_le16(bwd_size); 4723 4724 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4725 cmd_details); 4726 4727 return status; 4728 } 4729 4730 /** 4731 * i40e_read_phy_register_clause22 4732 * @hw: pointer to the HW structure 4733 * @reg: register address in the page 4734 * @phy_addr: PHY address on MDIO interface 4735 * @value: PHY register value 4736 * 4737 * Reads specified PHY register value 4738 **/ 4739 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4740 u16 reg, u8 phy_addr, u16 *value) 4741 { 4742 i40e_status status = I40E_ERR_TIMEOUT; 4743 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4744 u32 command = 0; 4745 u16 retry = 1000; 4746 4747 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4748 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4749 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4750 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4751 (I40E_GLGEN_MSCA_MDICMD_MASK); 4752 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4753 do { 4754 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4755 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4756 status = 0; 4757 break; 4758 } 4759 udelay(10); 4760 retry--; 4761 } while (retry); 4762 4763 if (status) { 4764 i40e_debug(hw, I40E_DEBUG_PHY, 4765 "PHY: Can't write command to external PHY.\n"); 4766 } else { 4767 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4768 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4769 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4770 } 4771 4772 return status; 4773 } 4774 4775 /** 4776 * i40e_write_phy_register_clause22 4777 * @hw: pointer to the HW structure 4778 * @reg: register address in the page 4779 * @phy_addr: PHY address on MDIO interface 4780 * @value: PHY register value 4781 * 4782 * Writes specified PHY register value 4783 **/ 4784 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4785 u16 reg, u8 phy_addr, u16 value) 4786 { 4787 i40e_status status = I40E_ERR_TIMEOUT; 4788 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4789 u32 command = 0; 4790 u16 retry = 1000; 4791 4792 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4793 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4794 4795 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4796 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4797 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4798 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4799 (I40E_GLGEN_MSCA_MDICMD_MASK); 4800 4801 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4802 do { 4803 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4804 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4805 status = 0; 4806 break; 4807 } 4808 udelay(10); 4809 retry--; 4810 } while (retry); 4811 4812 return status; 4813 } 4814 4815 /** 4816 * i40e_read_phy_register_clause45 4817 * @hw: pointer to the HW structure 4818 * @page: registers page number 4819 * @reg: register address in the page 4820 * @phy_addr: PHY address on MDIO interface 4821 * @value: PHY register value 4822 * 4823 * Reads specified PHY register value 4824 **/ 4825 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4826 u8 page, u16 reg, u8 phy_addr, u16 *value) 4827 { 4828 i40e_status status = I40E_ERR_TIMEOUT; 4829 u32 command = 0; 4830 u16 retry = 1000; 4831 u8 port_num = hw->func_caps.mdio_port_num; 4832 4833 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4834 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4835 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4836 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4837 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4838 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4839 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4840 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4841 do { 4842 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4843 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4844 status = 0; 4845 break; 4846 } 4847 usleep_range(10, 20); 4848 retry--; 4849 } while (retry); 4850 4851 if (status) { 4852 i40e_debug(hw, I40E_DEBUG_PHY, 4853 "PHY: Can't write command to external PHY.\n"); 4854 goto phy_read_end; 4855 } 4856 4857 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4858 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4859 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4860 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4861 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4862 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4863 status = I40E_ERR_TIMEOUT; 4864 retry = 1000; 4865 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4866 do { 4867 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4868 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4869 status = 0; 4870 break; 4871 } 4872 usleep_range(10, 20); 4873 retry--; 4874 } while (retry); 4875 4876 if (!status) { 4877 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4878 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4879 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4880 } else { 4881 i40e_debug(hw, I40E_DEBUG_PHY, 4882 "PHY: Can't read register value from external PHY.\n"); 4883 } 4884 4885 phy_read_end: 4886 return status; 4887 } 4888 4889 /** 4890 * i40e_write_phy_register_clause45 4891 * @hw: pointer to the HW structure 4892 * @page: registers page number 4893 * @reg: register address in the page 4894 * @phy_addr: PHY address on MDIO interface 4895 * @value: PHY register value 4896 * 4897 * Writes value to specified PHY register 4898 **/ 4899 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4900 u8 page, u16 reg, u8 phy_addr, u16 value) 4901 { 4902 i40e_status status = I40E_ERR_TIMEOUT; 4903 u32 command = 0; 4904 u16 retry = 1000; 4905 u8 port_num = hw->func_caps.mdio_port_num; 4906 4907 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4908 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4909 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4910 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4911 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4912 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4913 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4914 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4915 do { 4916 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4917 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4918 status = 0; 4919 break; 4920 } 4921 usleep_range(10, 20); 4922 retry--; 4923 } while (retry); 4924 if (status) { 4925 i40e_debug(hw, I40E_DEBUG_PHY, 4926 "PHY: Can't write command to external PHY.\n"); 4927 goto phy_write_end; 4928 } 4929 4930 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4931 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4932 4933 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4934 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4935 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4936 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4937 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4938 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4939 status = I40E_ERR_TIMEOUT; 4940 retry = 1000; 4941 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4942 do { 4943 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4944 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4945 status = 0; 4946 break; 4947 } 4948 usleep_range(10, 20); 4949 retry--; 4950 } while (retry); 4951 4952 phy_write_end: 4953 return status; 4954 } 4955 4956 /** 4957 * i40e_write_phy_register 4958 * @hw: pointer to the HW structure 4959 * @page: registers page number 4960 * @reg: register address in the page 4961 * @phy_addr: PHY address on MDIO interface 4962 * @value: PHY register value 4963 * 4964 * Writes value to specified PHY register 4965 **/ 4966 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4967 u8 page, u16 reg, u8 phy_addr, u16 value) 4968 { 4969 i40e_status status; 4970 4971 switch (hw->device_id) { 4972 case I40E_DEV_ID_1G_BASE_T_X722: 4973 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4974 value); 4975 break; 4976 case I40E_DEV_ID_5G_BASE_T_BC: 4977 case I40E_DEV_ID_10G_BASE_T: 4978 case I40E_DEV_ID_10G_BASE_T4: 4979 case I40E_DEV_ID_10G_BASE_T_BC: 4980 case I40E_DEV_ID_10G_BASE_T_X722: 4981 case I40E_DEV_ID_25G_B: 4982 case I40E_DEV_ID_25G_SFP28: 4983 status = i40e_write_phy_register_clause45(hw, page, reg, 4984 phy_addr, value); 4985 break; 4986 default: 4987 status = I40E_ERR_UNKNOWN_PHY; 4988 break; 4989 } 4990 4991 return status; 4992 } 4993 4994 /** 4995 * i40e_read_phy_register 4996 * @hw: pointer to the HW structure 4997 * @page: registers page number 4998 * @reg: register address in the page 4999 * @phy_addr: PHY address on MDIO interface 5000 * @value: PHY register value 5001 * 5002 * Reads specified PHY register value 5003 **/ 5004 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 5005 u8 page, u16 reg, u8 phy_addr, u16 *value) 5006 { 5007 i40e_status status; 5008 5009 switch (hw->device_id) { 5010 case I40E_DEV_ID_1G_BASE_T_X722: 5011 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 5012 value); 5013 break; 5014 case I40E_DEV_ID_5G_BASE_T_BC: 5015 case I40E_DEV_ID_10G_BASE_T: 5016 case I40E_DEV_ID_10G_BASE_T4: 5017 case I40E_DEV_ID_10G_BASE_T_BC: 5018 case I40E_DEV_ID_10G_BASE_T_X722: 5019 case I40E_DEV_ID_25G_B: 5020 case I40E_DEV_ID_25G_SFP28: 5021 status = i40e_read_phy_register_clause45(hw, page, reg, 5022 phy_addr, value); 5023 break; 5024 default: 5025 status = I40E_ERR_UNKNOWN_PHY; 5026 break; 5027 } 5028 5029 return status; 5030 } 5031 5032 /** 5033 * i40e_get_phy_address 5034 * @hw: pointer to the HW structure 5035 * @dev_num: PHY port num that address we want 5036 * 5037 * Gets PHY address for current port 5038 **/ 5039 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 5040 { 5041 u8 port_num = hw->func_caps.mdio_port_num; 5042 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 5043 5044 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 5045 } 5046 5047 /** 5048 * i40e_blink_phy_link_led 5049 * @hw: pointer to the HW structure 5050 * @time: time how long led will blinks in secs 5051 * @interval: gap between LED on and off in msecs 5052 * 5053 * Blinks PHY link LED 5054 **/ 5055 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 5056 u32 time, u32 interval) 5057 { 5058 i40e_status status = 0; 5059 u32 i; 5060 u16 led_ctl; 5061 u16 gpio_led_port; 5062 u16 led_reg; 5063 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 5064 u8 phy_addr = 0; 5065 u8 port_num; 5066 5067 i = rd32(hw, I40E_PFGEN_PORTNUM); 5068 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5069 phy_addr = i40e_get_phy_address(hw, port_num); 5070 5071 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5072 led_addr++) { 5073 status = i40e_read_phy_register_clause45(hw, 5074 I40E_PHY_COM_REG_PAGE, 5075 led_addr, phy_addr, 5076 &led_reg); 5077 if (status) 5078 goto phy_blinking_end; 5079 led_ctl = led_reg; 5080 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5081 led_reg = 0; 5082 status = i40e_write_phy_register_clause45(hw, 5083 I40E_PHY_COM_REG_PAGE, 5084 led_addr, phy_addr, 5085 led_reg); 5086 if (status) 5087 goto phy_blinking_end; 5088 break; 5089 } 5090 } 5091 5092 if (time > 0 && interval > 0) { 5093 for (i = 0; i < time * 1000; i += interval) { 5094 status = i40e_read_phy_register_clause45(hw, 5095 I40E_PHY_COM_REG_PAGE, 5096 led_addr, phy_addr, &led_reg); 5097 if (status) 5098 goto restore_config; 5099 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5100 led_reg = 0; 5101 else 5102 led_reg = I40E_PHY_LED_MANUAL_ON; 5103 status = i40e_write_phy_register_clause45(hw, 5104 I40E_PHY_COM_REG_PAGE, 5105 led_addr, phy_addr, led_reg); 5106 if (status) 5107 goto restore_config; 5108 msleep(interval); 5109 } 5110 } 5111 5112 restore_config: 5113 status = i40e_write_phy_register_clause45(hw, 5114 I40E_PHY_COM_REG_PAGE, 5115 led_addr, phy_addr, led_ctl); 5116 5117 phy_blinking_end: 5118 return status; 5119 } 5120 5121 /** 5122 * i40e_led_get_reg - read LED register 5123 * @hw: pointer to the HW structure 5124 * @led_addr: LED register address 5125 * @reg_val: read register value 5126 **/ 5127 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5128 u32 *reg_val) 5129 { 5130 enum i40e_status_code status; 5131 u8 phy_addr = 0; 5132 u8 port_num; 5133 u32 i; 5134 5135 *reg_val = 0; 5136 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5137 status = 5138 i40e_aq_get_phy_register(hw, 5139 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5140 I40E_PHY_COM_REG_PAGE, true, 5141 I40E_PHY_LED_PROV_REG_1, 5142 reg_val, NULL); 5143 } else { 5144 i = rd32(hw, I40E_PFGEN_PORTNUM); 5145 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5146 phy_addr = i40e_get_phy_address(hw, port_num); 5147 status = i40e_read_phy_register_clause45(hw, 5148 I40E_PHY_COM_REG_PAGE, 5149 led_addr, phy_addr, 5150 (u16 *)reg_val); 5151 } 5152 return status; 5153 } 5154 5155 /** 5156 * i40e_led_set_reg - write LED register 5157 * @hw: pointer to the HW structure 5158 * @led_addr: LED register address 5159 * @reg_val: register value to write 5160 **/ 5161 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5162 u32 reg_val) 5163 { 5164 enum i40e_status_code status; 5165 u8 phy_addr = 0; 5166 u8 port_num; 5167 u32 i; 5168 5169 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5170 status = 5171 i40e_aq_set_phy_register(hw, 5172 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5173 I40E_PHY_COM_REG_PAGE, true, 5174 I40E_PHY_LED_PROV_REG_1, 5175 reg_val, NULL); 5176 } else { 5177 i = rd32(hw, I40E_PFGEN_PORTNUM); 5178 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5179 phy_addr = i40e_get_phy_address(hw, port_num); 5180 status = i40e_write_phy_register_clause45(hw, 5181 I40E_PHY_COM_REG_PAGE, 5182 led_addr, phy_addr, 5183 (u16)reg_val); 5184 } 5185 5186 return status; 5187 } 5188 5189 /** 5190 * i40e_led_get_phy - return current on/off mode 5191 * @hw: pointer to the hw struct 5192 * @led_addr: address of led register to use 5193 * @val: original value of register to use 5194 * 5195 **/ 5196 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5197 u16 *val) 5198 { 5199 i40e_status status = 0; 5200 u16 gpio_led_port; 5201 u8 phy_addr = 0; 5202 u16 reg_val; 5203 u16 temp_addr; 5204 u8 port_num; 5205 u32 i; 5206 u32 reg_val_aq; 5207 5208 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5209 status = 5210 i40e_aq_get_phy_register(hw, 5211 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5212 I40E_PHY_COM_REG_PAGE, true, 5213 I40E_PHY_LED_PROV_REG_1, 5214 ®_val_aq, NULL); 5215 if (status == I40E_SUCCESS) 5216 *val = (u16)reg_val_aq; 5217 return status; 5218 } 5219 temp_addr = I40E_PHY_LED_PROV_REG_1; 5220 i = rd32(hw, I40E_PFGEN_PORTNUM); 5221 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5222 phy_addr = i40e_get_phy_address(hw, port_num); 5223 5224 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5225 temp_addr++) { 5226 status = i40e_read_phy_register_clause45(hw, 5227 I40E_PHY_COM_REG_PAGE, 5228 temp_addr, phy_addr, 5229 ®_val); 5230 if (status) 5231 return status; 5232 *val = reg_val; 5233 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5234 *led_addr = temp_addr; 5235 break; 5236 } 5237 } 5238 return status; 5239 } 5240 5241 /** 5242 * i40e_led_set_phy 5243 * @hw: pointer to the HW structure 5244 * @on: true or false 5245 * @led_addr: address of led register to use 5246 * @mode: original val plus bit for set or ignore 5247 * 5248 * Set led's on or off when controlled by the PHY 5249 * 5250 **/ 5251 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5252 u16 led_addr, u32 mode) 5253 { 5254 i40e_status status = 0; 5255 u32 led_ctl = 0; 5256 u32 led_reg = 0; 5257 5258 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5259 if (status) 5260 return status; 5261 led_ctl = led_reg; 5262 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5263 led_reg = 0; 5264 status = i40e_led_set_reg(hw, led_addr, led_reg); 5265 if (status) 5266 return status; 5267 } 5268 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5269 if (status) 5270 goto restore_config; 5271 if (on) 5272 led_reg = I40E_PHY_LED_MANUAL_ON; 5273 else 5274 led_reg = 0; 5275 5276 status = i40e_led_set_reg(hw, led_addr, led_reg); 5277 if (status) 5278 goto restore_config; 5279 if (mode & I40E_PHY_LED_MODE_ORIG) { 5280 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5281 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5282 } 5283 return status; 5284 5285 restore_config: 5286 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5287 return status; 5288 } 5289 5290 /** 5291 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5292 * @hw: pointer to the hw struct 5293 * @reg_addr: register address 5294 * @reg_val: ptr to register value 5295 * @cmd_details: pointer to command details structure or NULL 5296 * 5297 * Use the firmware to read the Rx control register, 5298 * especially useful if the Rx unit is under heavy pressure 5299 **/ 5300 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5301 u32 reg_addr, u32 *reg_val, 5302 struct i40e_asq_cmd_details *cmd_details) 5303 { 5304 struct i40e_aq_desc desc; 5305 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5306 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5307 i40e_status status; 5308 5309 if (!reg_val) 5310 return I40E_ERR_PARAM; 5311 5312 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5313 5314 cmd_resp->address = cpu_to_le32(reg_addr); 5315 5316 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5317 5318 if (status == 0) 5319 *reg_val = le32_to_cpu(cmd_resp->value); 5320 5321 return status; 5322 } 5323 5324 /** 5325 * i40e_read_rx_ctl - read from an Rx control register 5326 * @hw: pointer to the hw struct 5327 * @reg_addr: register address 5328 **/ 5329 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5330 { 5331 i40e_status status = 0; 5332 bool use_register; 5333 int retry = 5; 5334 u32 val = 0; 5335 5336 use_register = (((hw->aq.api_maj_ver == 1) && 5337 (hw->aq.api_min_ver < 5)) || 5338 (hw->mac.type == I40E_MAC_X722)); 5339 if (!use_register) { 5340 do_retry: 5341 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5342 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5343 usleep_range(1000, 2000); 5344 retry--; 5345 goto do_retry; 5346 } 5347 } 5348 5349 /* if the AQ access failed, try the old-fashioned way */ 5350 if (status || use_register) 5351 val = rd32(hw, reg_addr); 5352 5353 return val; 5354 } 5355 5356 /** 5357 * i40e_aq_rx_ctl_write_register 5358 * @hw: pointer to the hw struct 5359 * @reg_addr: register address 5360 * @reg_val: register value 5361 * @cmd_details: pointer to command details structure or NULL 5362 * 5363 * Use the firmware to write to an Rx control register, 5364 * especially useful if the Rx unit is under heavy pressure 5365 **/ 5366 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5367 u32 reg_addr, u32 reg_val, 5368 struct i40e_asq_cmd_details *cmd_details) 5369 { 5370 struct i40e_aq_desc desc; 5371 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5372 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5373 i40e_status status; 5374 5375 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5376 5377 cmd->address = cpu_to_le32(reg_addr); 5378 cmd->value = cpu_to_le32(reg_val); 5379 5380 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5381 5382 return status; 5383 } 5384 5385 /** 5386 * i40e_write_rx_ctl - write to an Rx control register 5387 * @hw: pointer to the hw struct 5388 * @reg_addr: register address 5389 * @reg_val: register value 5390 **/ 5391 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5392 { 5393 i40e_status status = 0; 5394 bool use_register; 5395 int retry = 5; 5396 5397 use_register = (((hw->aq.api_maj_ver == 1) && 5398 (hw->aq.api_min_ver < 5)) || 5399 (hw->mac.type == I40E_MAC_X722)); 5400 if (!use_register) { 5401 do_retry: 5402 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5403 reg_val, NULL); 5404 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5405 usleep_range(1000, 2000); 5406 retry--; 5407 goto do_retry; 5408 } 5409 } 5410 5411 /* if the AQ access failed, try the old-fashioned way */ 5412 if (status || use_register) 5413 wr32(hw, reg_addr, reg_val); 5414 } 5415 5416 /** 5417 * i40e_mdio_if_number_selection - MDIO I/F number selection 5418 * @hw: pointer to the hw struct 5419 * @set_mdio: use MDIO I/F number specified by mdio_num 5420 * @mdio_num: MDIO I/F number 5421 * @cmd: pointer to PHY Register command structure 5422 **/ 5423 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, 5424 u8 mdio_num, 5425 struct i40e_aqc_phy_register_access *cmd) 5426 { 5427 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { 5428 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) 5429 cmd->cmd_flags |= 5430 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | 5431 ((mdio_num << 5432 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & 5433 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); 5434 else 5435 i40e_debug(hw, I40E_DEBUG_PHY, 5436 "MDIO I/F number selection not supported by current FW version.\n"); 5437 } 5438 } 5439 5440 /** 5441 * i40e_aq_set_phy_register_ext 5442 * @hw: pointer to the hw struct 5443 * @phy_select: select which phy should be accessed 5444 * @dev_addr: PHY device address 5445 * @page_change: flag to indicate if phy page should be updated 5446 * @set_mdio: use MDIO I/F number specified by mdio_num 5447 * @mdio_num: MDIO I/F number 5448 * @reg_addr: PHY register address 5449 * @reg_val: new register value 5450 * @cmd_details: pointer to command details structure or NULL 5451 * 5452 * Write the external PHY register. 5453 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5454 * may use simple wrapper i40e_aq_set_phy_register. 5455 **/ 5456 enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, 5457 u8 phy_select, u8 dev_addr, bool page_change, 5458 bool set_mdio, u8 mdio_num, 5459 u32 reg_addr, u32 reg_val, 5460 struct i40e_asq_cmd_details *cmd_details) 5461 { 5462 struct i40e_aq_desc desc; 5463 struct i40e_aqc_phy_register_access *cmd = 5464 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5465 i40e_status status; 5466 5467 i40e_fill_default_direct_cmd_desc(&desc, 5468 i40e_aqc_opc_set_phy_register); 5469 5470 cmd->phy_interface = phy_select; 5471 cmd->dev_address = dev_addr; 5472 cmd->reg_address = cpu_to_le32(reg_addr); 5473 cmd->reg_value = cpu_to_le32(reg_val); 5474 5475 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5476 5477 if (!page_change) 5478 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5479 5480 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5481 5482 return status; 5483 } 5484 5485 /** 5486 * i40e_aq_get_phy_register_ext 5487 * @hw: pointer to the hw struct 5488 * @phy_select: select which phy should be accessed 5489 * @dev_addr: PHY device address 5490 * @page_change: flag to indicate if phy page should be updated 5491 * @set_mdio: use MDIO I/F number specified by mdio_num 5492 * @mdio_num: MDIO I/F number 5493 * @reg_addr: PHY register address 5494 * @reg_val: read register value 5495 * @cmd_details: pointer to command details structure or NULL 5496 * 5497 * Read the external PHY register. 5498 * NOTE: In common cases MDIO I/F number should not be changed, thats why you 5499 * may use simple wrapper i40e_aq_get_phy_register. 5500 **/ 5501 enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, 5502 u8 phy_select, u8 dev_addr, bool page_change, 5503 bool set_mdio, u8 mdio_num, 5504 u32 reg_addr, u32 *reg_val, 5505 struct i40e_asq_cmd_details *cmd_details) 5506 { 5507 struct i40e_aq_desc desc; 5508 struct i40e_aqc_phy_register_access *cmd = 5509 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5510 i40e_status status; 5511 5512 i40e_fill_default_direct_cmd_desc(&desc, 5513 i40e_aqc_opc_get_phy_register); 5514 5515 cmd->phy_interface = phy_select; 5516 cmd->dev_address = dev_addr; 5517 cmd->reg_address = cpu_to_le32(reg_addr); 5518 5519 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); 5520 5521 if (!page_change) 5522 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; 5523 5524 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5525 if (!status) 5526 *reg_val = le32_to_cpu(cmd->reg_value); 5527 5528 return status; 5529 } 5530 5531 /** 5532 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5533 * @hw: pointer to the hw struct 5534 * @buff: command buffer (size in bytes = buff_size) 5535 * @buff_size: buffer size in bytes 5536 * @track_id: package tracking id 5537 * @error_offset: returns error offset 5538 * @error_info: returns error information 5539 * @cmd_details: pointer to command details structure or NULL 5540 **/ 5541 enum 5542 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5543 u16 buff_size, u32 track_id, 5544 u32 *error_offset, u32 *error_info, 5545 struct i40e_asq_cmd_details *cmd_details) 5546 { 5547 struct i40e_aq_desc desc; 5548 struct i40e_aqc_write_personalization_profile *cmd = 5549 (struct i40e_aqc_write_personalization_profile *) 5550 &desc.params.raw; 5551 struct i40e_aqc_write_ddp_resp *resp; 5552 i40e_status status; 5553 5554 i40e_fill_default_direct_cmd_desc(&desc, 5555 i40e_aqc_opc_write_personalization_profile); 5556 5557 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5558 if (buff_size > I40E_AQ_LARGE_BUF) 5559 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5560 5561 desc.datalen = cpu_to_le16(buff_size); 5562 5563 cmd->profile_track_id = cpu_to_le32(track_id); 5564 5565 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5566 if (!status) { 5567 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5568 if (error_offset) 5569 *error_offset = le32_to_cpu(resp->error_offset); 5570 if (error_info) 5571 *error_info = le32_to_cpu(resp->error_info); 5572 } 5573 5574 return status; 5575 } 5576 5577 /** 5578 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5579 * @hw: pointer to the hw struct 5580 * @buff: command buffer (size in bytes = buff_size) 5581 * @buff_size: buffer size in bytes 5582 * @flags: AdminQ command flags 5583 * @cmd_details: pointer to command details structure or NULL 5584 **/ 5585 enum 5586 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5587 u16 buff_size, u8 flags, 5588 struct i40e_asq_cmd_details *cmd_details) 5589 { 5590 struct i40e_aq_desc desc; 5591 struct i40e_aqc_get_applied_profiles *cmd = 5592 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5593 i40e_status status; 5594 5595 i40e_fill_default_direct_cmd_desc(&desc, 5596 i40e_aqc_opc_get_personalization_profile_list); 5597 5598 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5599 if (buff_size > I40E_AQ_LARGE_BUF) 5600 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5601 desc.datalen = cpu_to_le16(buff_size); 5602 5603 cmd->flags = flags; 5604 5605 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5606 5607 return status; 5608 } 5609 5610 /** 5611 * i40e_find_segment_in_package 5612 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5613 * @pkg_hdr: pointer to the package header to be searched 5614 * 5615 * This function searches a package file for a particular segment type. On 5616 * success it returns a pointer to the segment header, otherwise it will 5617 * return NULL. 5618 **/ 5619 struct i40e_generic_seg_header * 5620 i40e_find_segment_in_package(u32 segment_type, 5621 struct i40e_package_header *pkg_hdr) 5622 { 5623 struct i40e_generic_seg_header *segment; 5624 u32 i; 5625 5626 /* Search all package segments for the requested segment type */ 5627 for (i = 0; i < pkg_hdr->segment_count; i++) { 5628 segment = 5629 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5630 pkg_hdr->segment_offset[i]); 5631 5632 if (segment->type == segment_type) 5633 return segment; 5634 } 5635 5636 return NULL; 5637 } 5638 5639 /* Get section table in profile */ 5640 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5641 do { \ 5642 struct i40e_profile_segment *p = (profile); \ 5643 u32 count; \ 5644 u32 *nvm; \ 5645 count = p->device_table_count; \ 5646 nvm = (u32 *)&p->device_table[count]; \ 5647 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5648 } while (0) 5649 5650 /* Get section header in profile */ 5651 #define I40E_SECTION_HEADER(profile, offset) \ 5652 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5653 5654 /** 5655 * i40e_find_section_in_profile 5656 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5657 * @profile: pointer to the i40e segment header to be searched 5658 * 5659 * This function searches i40e segment for a particular section type. On 5660 * success it returns a pointer to the section header, otherwise it will 5661 * return NULL. 5662 **/ 5663 struct i40e_profile_section_header * 5664 i40e_find_section_in_profile(u32 section_type, 5665 struct i40e_profile_segment *profile) 5666 { 5667 struct i40e_profile_section_header *sec; 5668 struct i40e_section_table *sec_tbl; 5669 u32 sec_off; 5670 u32 i; 5671 5672 if (profile->header.type != SEGMENT_TYPE_I40E) 5673 return NULL; 5674 5675 I40E_SECTION_TABLE(profile, sec_tbl); 5676 5677 for (i = 0; i < sec_tbl->section_count; i++) { 5678 sec_off = sec_tbl->section_offset[i]; 5679 sec = I40E_SECTION_HEADER(profile, sec_off); 5680 if (sec->section.type == section_type) 5681 return sec; 5682 } 5683 5684 return NULL; 5685 } 5686 5687 /** 5688 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5689 * @hw: pointer to the hw struct 5690 * @aq: command buffer containing all data to execute AQ 5691 **/ 5692 static enum 5693 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5694 struct i40e_profile_aq_section *aq) 5695 { 5696 i40e_status status; 5697 struct i40e_aq_desc desc; 5698 u8 *msg = NULL; 5699 u16 msglen; 5700 5701 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5702 desc.flags |= cpu_to_le16(aq->flags); 5703 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5704 5705 msglen = aq->datalen; 5706 if (msglen) { 5707 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5708 I40E_AQ_FLAG_RD)); 5709 if (msglen > I40E_AQ_LARGE_BUF) 5710 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5711 desc.datalen = cpu_to_le16(msglen); 5712 msg = &aq->data[0]; 5713 } 5714 5715 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5716 5717 if (status) { 5718 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5719 "unable to exec DDP AQ opcode %u, error %d\n", 5720 aq->opcode, status); 5721 return status; 5722 } 5723 5724 /* copy returned desc to aq_buf */ 5725 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5726 5727 return 0; 5728 } 5729 5730 /** 5731 * i40e_validate_profile 5732 * @hw: pointer to the hardware structure 5733 * @profile: pointer to the profile segment of the package to be validated 5734 * @track_id: package tracking id 5735 * @rollback: flag if the profile is for rollback. 5736 * 5737 * Validates supported devices and profile's sections. 5738 */ 5739 static enum i40e_status_code 5740 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5741 u32 track_id, bool rollback) 5742 { 5743 struct i40e_profile_section_header *sec = NULL; 5744 i40e_status status = 0; 5745 struct i40e_section_table *sec_tbl; 5746 u32 vendor_dev_id; 5747 u32 dev_cnt; 5748 u32 sec_off; 5749 u32 i; 5750 5751 if (track_id == I40E_DDP_TRACKID_INVALID) { 5752 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5753 return I40E_NOT_SUPPORTED; 5754 } 5755 5756 dev_cnt = profile->device_table_count; 5757 for (i = 0; i < dev_cnt; i++) { 5758 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5759 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5760 hw->device_id == (vendor_dev_id & 0xFFFF)) 5761 break; 5762 } 5763 if (dev_cnt && i == dev_cnt) { 5764 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5765 "Device doesn't support DDP\n"); 5766 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5767 } 5768 5769 I40E_SECTION_TABLE(profile, sec_tbl); 5770 5771 /* Validate sections types */ 5772 for (i = 0; i < sec_tbl->section_count; i++) { 5773 sec_off = sec_tbl->section_offset[i]; 5774 sec = I40E_SECTION_HEADER(profile, sec_off); 5775 if (rollback) { 5776 if (sec->section.type == SECTION_TYPE_MMIO || 5777 sec->section.type == SECTION_TYPE_AQ || 5778 sec->section.type == SECTION_TYPE_RB_AQ) { 5779 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5780 "Not a roll-back package\n"); 5781 return I40E_NOT_SUPPORTED; 5782 } 5783 } else { 5784 if (sec->section.type == SECTION_TYPE_RB_AQ || 5785 sec->section.type == SECTION_TYPE_RB_MMIO) { 5786 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5787 "Not an original package\n"); 5788 return I40E_NOT_SUPPORTED; 5789 } 5790 } 5791 } 5792 5793 return status; 5794 } 5795 5796 /** 5797 * i40e_write_profile 5798 * @hw: pointer to the hardware structure 5799 * @profile: pointer to the profile segment of the package to be downloaded 5800 * @track_id: package tracking id 5801 * 5802 * Handles the download of a complete package. 5803 */ 5804 enum i40e_status_code 5805 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5806 u32 track_id) 5807 { 5808 i40e_status status = 0; 5809 struct i40e_section_table *sec_tbl; 5810 struct i40e_profile_section_header *sec = NULL; 5811 struct i40e_profile_aq_section *ddp_aq; 5812 u32 section_size = 0; 5813 u32 offset = 0, info = 0; 5814 u32 sec_off; 5815 u32 i; 5816 5817 status = i40e_validate_profile(hw, profile, track_id, false); 5818 if (status) 5819 return status; 5820 5821 I40E_SECTION_TABLE(profile, sec_tbl); 5822 5823 for (i = 0; i < sec_tbl->section_count; i++) { 5824 sec_off = sec_tbl->section_offset[i]; 5825 sec = I40E_SECTION_HEADER(profile, sec_off); 5826 /* Process generic admin command */ 5827 if (sec->section.type == SECTION_TYPE_AQ) { 5828 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5829 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5830 if (status) { 5831 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5832 "Failed to execute aq: section %d, opcode %u\n", 5833 i, ddp_aq->opcode); 5834 break; 5835 } 5836 sec->section.type = SECTION_TYPE_RB_AQ; 5837 } 5838 5839 /* Skip any non-mmio sections */ 5840 if (sec->section.type != SECTION_TYPE_MMIO) 5841 continue; 5842 5843 section_size = sec->section.size + 5844 sizeof(struct i40e_profile_section_header); 5845 5846 /* Write MMIO section */ 5847 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5848 track_id, &offset, &info, NULL); 5849 if (status) { 5850 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5851 "Failed to write profile: section %d, offset %d, info %d\n", 5852 i, offset, info); 5853 break; 5854 } 5855 } 5856 return status; 5857 } 5858 5859 /** 5860 * i40e_rollback_profile 5861 * @hw: pointer to the hardware structure 5862 * @profile: pointer to the profile segment of the package to be removed 5863 * @track_id: package tracking id 5864 * 5865 * Rolls back previously loaded package. 5866 */ 5867 enum i40e_status_code 5868 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5869 u32 track_id) 5870 { 5871 struct i40e_profile_section_header *sec = NULL; 5872 i40e_status status = 0; 5873 struct i40e_section_table *sec_tbl; 5874 u32 offset = 0, info = 0; 5875 u32 section_size = 0; 5876 u32 sec_off; 5877 int i; 5878 5879 status = i40e_validate_profile(hw, profile, track_id, true); 5880 if (status) 5881 return status; 5882 5883 I40E_SECTION_TABLE(profile, sec_tbl); 5884 5885 /* For rollback write sections in reverse */ 5886 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5887 sec_off = sec_tbl->section_offset[i]; 5888 sec = I40E_SECTION_HEADER(profile, sec_off); 5889 5890 /* Skip any non-rollback sections */ 5891 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5892 continue; 5893 5894 section_size = sec->section.size + 5895 sizeof(struct i40e_profile_section_header); 5896 5897 /* Write roll-back MMIO section */ 5898 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5899 track_id, &offset, &info, NULL); 5900 if (status) { 5901 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5902 "Failed to write profile: section %d, offset %d, info %d\n", 5903 i, offset, info); 5904 break; 5905 } 5906 } 5907 return status; 5908 } 5909 5910 /** 5911 * i40e_add_pinfo_to_list 5912 * @hw: pointer to the hardware structure 5913 * @profile: pointer to the profile segment of the package 5914 * @profile_info_sec: buffer for information section 5915 * @track_id: package tracking id 5916 * 5917 * Register a profile to the list of loaded profiles. 5918 */ 5919 enum i40e_status_code 5920 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5921 struct i40e_profile_segment *profile, 5922 u8 *profile_info_sec, u32 track_id) 5923 { 5924 i40e_status status = 0; 5925 struct i40e_profile_section_header *sec = NULL; 5926 struct i40e_profile_info *pinfo; 5927 u32 offset = 0, info = 0; 5928 5929 sec = (struct i40e_profile_section_header *)profile_info_sec; 5930 sec->tbl_size = 1; 5931 sec->data_end = sizeof(struct i40e_profile_section_header) + 5932 sizeof(struct i40e_profile_info); 5933 sec->section.type = SECTION_TYPE_INFO; 5934 sec->section.offset = sizeof(struct i40e_profile_section_header); 5935 sec->section.size = sizeof(struct i40e_profile_info); 5936 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5937 sec->section.offset); 5938 pinfo->track_id = track_id; 5939 pinfo->version = profile->version; 5940 pinfo->op = I40E_DDP_ADD_TRACKID; 5941 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5942 5943 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5944 track_id, &offset, &info, NULL); 5945 5946 return status; 5947 } 5948 5949 /** 5950 * i40e_aq_add_cloud_filters 5951 * @hw: pointer to the hardware structure 5952 * @seid: VSI seid to add cloud filters from 5953 * @filters: Buffer which contains the filters to be added 5954 * @filter_count: number of filters contained in the buffer 5955 * 5956 * Set the cloud filters for a given VSI. The contents of the 5957 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5958 * of the function. 5959 * 5960 **/ 5961 enum i40e_status_code 5962 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5963 struct i40e_aqc_cloud_filters_element_data *filters, 5964 u8 filter_count) 5965 { 5966 struct i40e_aq_desc desc; 5967 struct i40e_aqc_add_remove_cloud_filters *cmd = 5968 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5969 enum i40e_status_code status; 5970 u16 buff_len; 5971 5972 i40e_fill_default_direct_cmd_desc(&desc, 5973 i40e_aqc_opc_add_cloud_filters); 5974 5975 buff_len = filter_count * sizeof(*filters); 5976 desc.datalen = cpu_to_le16(buff_len); 5977 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5978 cmd->num_filters = filter_count; 5979 cmd->seid = cpu_to_le16(seid); 5980 5981 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5982 5983 return status; 5984 } 5985 5986 /** 5987 * i40e_aq_add_cloud_filters_bb 5988 * @hw: pointer to the hardware structure 5989 * @seid: VSI seid to add cloud filters from 5990 * @filters: Buffer which contains the filters in big buffer to be added 5991 * @filter_count: number of filters contained in the buffer 5992 * 5993 * Set the big buffer cloud filters for a given VSI. The contents of the 5994 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5995 * function. 5996 * 5997 **/ 5998 enum i40e_status_code 5999 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6000 struct i40e_aqc_cloud_filters_element_bb *filters, 6001 u8 filter_count) 6002 { 6003 struct i40e_aq_desc desc; 6004 struct i40e_aqc_add_remove_cloud_filters *cmd = 6005 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6006 i40e_status status; 6007 u16 buff_len; 6008 int i; 6009 6010 i40e_fill_default_direct_cmd_desc(&desc, 6011 i40e_aqc_opc_add_cloud_filters); 6012 6013 buff_len = filter_count * sizeof(*filters); 6014 desc.datalen = cpu_to_le16(buff_len); 6015 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6016 cmd->num_filters = filter_count; 6017 cmd->seid = cpu_to_le16(seid); 6018 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6019 6020 for (i = 0; i < filter_count; i++) { 6021 u16 tnl_type; 6022 u32 ti; 6023 6024 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6025 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6026 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6027 6028 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6029 * one more byte further than normally used for Tenant ID in 6030 * other tunnel types. 6031 */ 6032 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6033 ti = le32_to_cpu(filters[i].element.tenant_id); 6034 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6035 } 6036 } 6037 6038 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6039 6040 return status; 6041 } 6042 6043 /** 6044 * i40e_aq_rem_cloud_filters 6045 * @hw: pointer to the hardware structure 6046 * @seid: VSI seid to remove cloud filters from 6047 * @filters: Buffer which contains the filters to be removed 6048 * @filter_count: number of filters contained in the buffer 6049 * 6050 * Remove the cloud filters for a given VSI. The contents of the 6051 * i40e_aqc_cloud_filters_element_data are filled in by the caller 6052 * of the function. 6053 * 6054 **/ 6055 enum i40e_status_code 6056 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 6057 struct i40e_aqc_cloud_filters_element_data *filters, 6058 u8 filter_count) 6059 { 6060 struct i40e_aq_desc desc; 6061 struct i40e_aqc_add_remove_cloud_filters *cmd = 6062 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6063 enum i40e_status_code status; 6064 u16 buff_len; 6065 6066 i40e_fill_default_direct_cmd_desc(&desc, 6067 i40e_aqc_opc_remove_cloud_filters); 6068 6069 buff_len = filter_count * sizeof(*filters); 6070 desc.datalen = cpu_to_le16(buff_len); 6071 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6072 cmd->num_filters = filter_count; 6073 cmd->seid = cpu_to_le16(seid); 6074 6075 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6076 6077 return status; 6078 } 6079 6080 /** 6081 * i40e_aq_rem_cloud_filters_bb 6082 * @hw: pointer to the hardware structure 6083 * @seid: VSI seid to remove cloud filters from 6084 * @filters: Buffer which contains the filters in big buffer to be removed 6085 * @filter_count: number of filters contained in the buffer 6086 * 6087 * Remove the big buffer cloud filters for a given VSI. The contents of the 6088 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 6089 * function. 6090 * 6091 **/ 6092 enum i40e_status_code 6093 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 6094 struct i40e_aqc_cloud_filters_element_bb *filters, 6095 u8 filter_count) 6096 { 6097 struct i40e_aq_desc desc; 6098 struct i40e_aqc_add_remove_cloud_filters *cmd = 6099 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 6100 i40e_status status; 6101 u16 buff_len; 6102 int i; 6103 6104 i40e_fill_default_direct_cmd_desc(&desc, 6105 i40e_aqc_opc_remove_cloud_filters); 6106 6107 buff_len = filter_count * sizeof(*filters); 6108 desc.datalen = cpu_to_le16(buff_len); 6109 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 6110 cmd->num_filters = filter_count; 6111 cmd->seid = cpu_to_le16(seid); 6112 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 6113 6114 for (i = 0; i < filter_count; i++) { 6115 u16 tnl_type; 6116 u32 ti; 6117 6118 tnl_type = (le16_to_cpu(filters[i].element.flags) & 6119 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 6120 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 6121 6122 /* Due to hardware eccentricities, the VNI for Geneve is shifted 6123 * one more byte further than normally used for Tenant ID in 6124 * other tunnel types. 6125 */ 6126 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 6127 ti = le32_to_cpu(filters[i].element.tenant_id); 6128 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 6129 } 6130 } 6131 6132 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 6133 6134 return status; 6135 } 6136